1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
12 
13 /******************************************************************************/
14 /* Data. */
15 
16 rtree_t		extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t	extent_mutex_pool;
19 
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21 
22 static const bitmap_info_t extents_bitmap_info =
23     BITMAP_INFO_INITIALIZER(NPSIZES+1);
24 
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26     size_t size, size_t alignment, bool *zero, bool *commit,
27     unsigned arena_ind);
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29     size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31     size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33     size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36     size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41     size_t size, size_t offset, size_t length, unsigned arena_ind);
42 #endif
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45     size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49 #endif
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52     size_t length, bool growing_retained);
53 #ifdef JEMALLOC_MAPS_COALESCE
54 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
55     size_t size, size_t size_a, size_t size_b, bool committed,
56     unsigned arena_ind);
57 #endif
58 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
59     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
60     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
61     bool growing_retained);
62 #ifdef JEMALLOC_MAPS_COALESCE
63 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
64     size_t size_a, void *addr_b, size_t size_b, bool committed,
65     unsigned arena_ind);
66 #endif
67 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
68     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
69     bool growing_retained);
70 
71 const extent_hooks_t	extent_hooks_default = {
72 	extent_alloc_default,
73 	extent_dalloc_default,
74 	extent_destroy_default,
75 	extent_commit_default,
76 	extent_decommit_default
77 #ifdef PAGES_CAN_PURGE_LAZY
78 	,
79 	extent_purge_lazy_default
80 #else
81 	,
82 	NULL
83 #endif
84 #ifdef PAGES_CAN_PURGE_FORCED
85 	,
86 	extent_purge_forced_default
87 #else
88 	,
89 	NULL
90 #endif
91 #ifdef JEMALLOC_MAPS_COALESCE
92 	,
93 	extent_split_default,
94 	extent_merge_default
95 #endif
96 };
97 
98 /* Used exclusively for gdump triggering. */
99 static atomic_zu_t curpages;
100 static atomic_zu_t highpages;
101 
102 /******************************************************************************/
103 /*
104  * Function prototypes for static functions that are referenced prior to
105  * definition.
106  */
107 
108 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
109 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
110     extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
111     size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
112     bool *zero, bool *commit, bool growing_retained);
113 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
114     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
115     extent_t *extent, bool *coalesced, bool growing_retained);
116 static void extent_record(tsdn_t *tsdn, arena_t *arena,
117     extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
118     bool growing_retained);
119 
120 /******************************************************************************/
121 
122 ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link,
123     extent_esnead_comp)
124 
125 typedef enum {
126 	lock_result_success,
127 	lock_result_failure,
128 	lock_result_no_extent
129 } lock_result_t;
130 
131 static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t * tsdn,rtree_leaf_elm_t * elm,extent_t ** result)132 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
133     extent_t **result) {
134 	extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
135 	    elm, true);
136 
137 	if (extent1 == NULL) {
138 		return lock_result_no_extent;
139 	}
140 	/*
141 	 * It's possible that the extent changed out from under us, and with it
142 	 * the leaf->extent mapping.  We have to recheck while holding the lock.
143 	 */
144 	extent_lock(tsdn, extent1);
145 	extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
146 	    &extents_rtree, elm, true);
147 
148 	if (extent1 == extent2) {
149 		*result = extent1;
150 		return lock_result_success;
151 	} else {
152 		extent_unlock(tsdn, extent1);
153 		return lock_result_failure;
154 	}
155 }
156 
157 /*
158  * Returns a pool-locked extent_t * if there's one associated with the given
159  * address, and NULL otherwise.
160  */
161 static extent_t *
extent_lock_from_addr(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,void * addr)162 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
163 	extent_t *ret = NULL;
164 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
165 	    rtree_ctx, (uintptr_t)addr, false, false);
166 	if (elm == NULL) {
167 		return NULL;
168 	}
169 	lock_result_t lock_result;
170 	do {
171 		lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
172 	} while (lock_result == lock_result_failure);
173 	return ret;
174 }
175 
176 extent_t *
extent_alloc(tsdn_t * tsdn,arena_t * arena)177 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
178 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
179 	extent_t *extent = extent_avail_first(&arena->extent_avail);
180 	if (extent == NULL) {
181 		malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
182 		return base_alloc_extent(tsdn, arena->base);
183 	}
184 	extent_avail_remove(&arena->extent_avail, extent);
185 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
186 	return extent;
187 }
188 
189 void
extent_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * extent)190 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
191 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
192 	extent_avail_insert(&arena->extent_avail, extent);
193 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
194 }
195 
196 extent_hooks_t *
extent_hooks_get(arena_t * arena)197 extent_hooks_get(arena_t *arena) {
198 	return base_extent_hooks_get(arena->base);
199 }
200 
201 extent_hooks_t *
extent_hooks_set(tsd_t * tsd,arena_t * arena,extent_hooks_t * extent_hooks)202 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
203 	background_thread_info_t *info;
204 	if (have_background_thread) {
205 		info = arena_background_thread_info_get(arena);
206 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
207 	}
208 	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
209 	if (have_background_thread) {
210 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
211 	}
212 
213 	return ret;
214 }
215 
216 static void
extent_hooks_assure_initialized(arena_t * arena,extent_hooks_t ** r_extent_hooks)217 extent_hooks_assure_initialized(arena_t *arena,
218     extent_hooks_t **r_extent_hooks) {
219 	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
220 		*r_extent_hooks = extent_hooks_get(arena);
221 	}
222 }
223 
224 #ifndef JEMALLOC_JET
225 static
226 #endif
227 size_t
extent_size_quantize_floor(size_t size)228 extent_size_quantize_floor(size_t size) {
229 	size_t ret;
230 	pszind_t pind;
231 
232 	assert(size > 0);
233 	assert((size & PAGE_MASK) == 0);
234 
235 	pind = sz_psz2ind(size - sz_large_pad + 1);
236 	if (pind == 0) {
237 		/*
238 		 * Avoid underflow.  This short-circuit would also do the right
239 		 * thing for all sizes in the range for which there are
240 		 * PAGE-spaced size classes, but it's simplest to just handle
241 		 * the one case that would cause erroneous results.
242 		 */
243 		return size;
244 	}
245 	ret = sz_pind2sz(pind - 1) + sz_large_pad;
246 	assert(ret <= size);
247 	return ret;
248 }
249 
250 #ifndef JEMALLOC_JET
251 static
252 #endif
253 size_t
extent_size_quantize_ceil(size_t size)254 extent_size_quantize_ceil(size_t size) {
255 	size_t ret;
256 
257 	assert(size > 0);
258 	assert(size - sz_large_pad <= LARGE_MAXCLASS);
259 	assert((size & PAGE_MASK) == 0);
260 
261 	ret = extent_size_quantize_floor(size);
262 	if (ret < size) {
263 		/*
264 		 * Skip a quantization that may have an adequately large extent,
265 		 * because under-sized extents may be mixed in.  This only
266 		 * happens when an unusual size is requested, i.e. for aligned
267 		 * allocation, and is just one of several places where linear
268 		 * search would potentially find sufficiently aligned available
269 		 * memory somewhere lower.
270 		 */
271 		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
272 		    sz_large_pad;
273 	}
274 	return ret;
275 }
276 
277 /* Generate pairing heap functions. */
278 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
279 
280 bool
extents_init(tsdn_t * tsdn,extents_t * extents,extent_state_t state,bool delay_coalesce)281 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
282     bool delay_coalesce) {
283 	if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
284 	    malloc_mutex_rank_exclusive)) {
285 		return true;
286 	}
287 	for (unsigned i = 0; i < NPSIZES+1; i++) {
288 		extent_heap_new(&extents->heaps[i]);
289 	}
290 	bitmap_init(extents->bitmap, &extents_bitmap_info, true);
291 	extent_list_init(&extents->lru);
292 	atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
293 	extents->state = state;
294 	extents->delay_coalesce = delay_coalesce;
295 	return false;
296 }
297 
298 extent_state_t
extents_state_get(const extents_t * extents)299 extents_state_get(const extents_t *extents) {
300 	return extents->state;
301 }
302 
303 size_t
extents_npages_get(extents_t * extents)304 extents_npages_get(extents_t *extents) {
305 	return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
306 }
307 
308 static void
extents_insert_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)309 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
310 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
311 	assert(extent_state_get(extent) == extents->state);
312 
313 	size_t size = extent_size_get(extent);
314 	size_t psz = extent_size_quantize_floor(size);
315 	pszind_t pind = sz_psz2ind(psz);
316 	if (extent_heap_empty(&extents->heaps[pind])) {
317 		bitmap_unset(extents->bitmap, &extents_bitmap_info,
318 		    (size_t)pind);
319 	}
320 	extent_heap_insert(&extents->heaps[pind], extent);
321 	extent_list_append(&extents->lru, extent);
322 	size_t npages = size >> LG_PAGE;
323 	/*
324 	 * All modifications to npages hold the mutex (as asserted above), so we
325 	 * don't need an atomic fetch-add; we can get by with a load followed by
326 	 * a store.
327 	 */
328 	size_t cur_extents_npages =
329 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
330 	atomic_store_zu(&extents->npages, cur_extents_npages + npages,
331 	    ATOMIC_RELAXED);
332 }
333 
334 static void
extents_remove_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)335 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
336 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
337 	assert(extent_state_get(extent) == extents->state);
338 
339 	size_t size = extent_size_get(extent);
340 	size_t psz = extent_size_quantize_floor(size);
341 	pszind_t pind = sz_psz2ind(psz);
342 	extent_heap_remove(&extents->heaps[pind], extent);
343 	if (extent_heap_empty(&extents->heaps[pind])) {
344 		bitmap_set(extents->bitmap, &extents_bitmap_info,
345 		    (size_t)pind);
346 	}
347 	extent_list_remove(&extents->lru, extent);
348 	size_t npages = size >> LG_PAGE;
349 	/*
350 	 * As in extents_insert_locked, we hold extents->mtx and so don't need
351 	 * atomic operations for updating extents->npages.
352 	 */
353 	size_t cur_extents_npages =
354 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
355 	assert(cur_extents_npages >= npages);
356 	atomic_store_zu(&extents->npages,
357 	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
358 }
359 
360 /*
361  * Find an extent with size [min_size, max_size) to satisfy the alignment
362  * requirement.  For each size, try only the first extent in the heap.
363  */
364 static extent_t *
extents_fit_alignment(extents_t * extents,size_t min_size,size_t max_size,size_t alignment)365 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
366     size_t alignment) {
367         pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
368         pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
369 
370 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
371 	    &extents_bitmap_info, (size_t)pind); i < pind_max; i =
372 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
373 	    (size_t)i+1)) {
374 		assert(i < NPSIZES);
375 		assert(!extent_heap_empty(&extents->heaps[i]));
376 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
377 		uintptr_t base = (uintptr_t)extent_base_get(extent);
378 		size_t candidate_size = extent_size_get(extent);
379 		assert(candidate_size >= min_size);
380 
381 		uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
382 		    PAGE_CEILING(alignment));
383 		if (base > next_align || base + candidate_size <= next_align) {
384 			/* Overflow or not crossing the next alignment. */
385 			continue;
386 		}
387 
388 		size_t leadsize = next_align - base;
389 		if (candidate_size - leadsize >= min_size) {
390 			return extent;
391 		}
392 	}
393 
394 	return NULL;
395 }
396 
397 // ANDROID
398 // The best-fit selection is reported to possiblity cause a memory leak.
399 // This code has been completely removed from 5.2.0, so remove it from
400 // our tree rather than risk a leak.
401 // See https://github.com/jemalloc/jemalloc/issues/1454
402 #if 0
403 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
404 static extent_t *
405 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
406     size_t size) {
407 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
408 	pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
409 	    (size_t)pind);
410 	if (i < NPSIZES+1) {
411 		/*
412 		 * In order to reduce fragmentation, avoid reusing and splitting
413 		 * large extents for much smaller sizes.
414 		 */
415 		if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
416 			return NULL;
417 		}
418 		assert(!extent_heap_empty(&extents->heaps[i]));
419 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
420 		assert(extent_size_get(extent) >= size);
421 		return extent;
422 	}
423 
424 	return NULL;
425 }
426 #endif
427 
428 /*
429  * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
430  * large enough.
431  */
432 static extent_t *
extents_first_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t size)433 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
434     size_t size) {
435 	extent_t *ret = NULL;
436 
437 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
438 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
439 	    &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
440 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
441 	    (size_t)i+1)) {
442 		assert(!extent_heap_empty(&extents->heaps[i]));
443 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
444 		assert(extent_size_get(extent) >= size);
445 		if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
446 			ret = extent;
447 		}
448 		if (i == NPSIZES) {
449 			break;
450 		}
451 		assert(i < NPSIZES);
452 	}
453 
454 	return ret;
455 }
456 
457 /*
458  * Do {best,first}-fit extent selection, where the selection policy choice is
459  * based on extents->delay_coalesce.  Best-fit selection requires less
460  * searching, but its layout policy is less stable and may cause higher virtual
461  * memory fragmentation as a side effect.
462  */
463 static extent_t *
extents_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t esize,size_t alignment)464 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
465     size_t esize, size_t alignment) {
466 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
467 
468 	size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
469 	/* Beware size_t wrap-around. */
470 	if (max_size < esize) {
471 		return NULL;
472 	}
473 
474 // ANDROID
475 // The best-fit selection is reported to possiblity cause a memory leak.
476 // This code has been completely removed from 5.2.0, so remove it from
477 // our tree rather than risk a leak.
478 // See https://github.com/jemalloc/jemalloc/issues/1454
479 #if 0
480 	extent_t *extent = extents->delay_coalesce ?
481 	    extents_best_fit_locked(tsdn, arena, extents, max_size) :
482 	    extents_first_fit_locked(tsdn, arena, extents, max_size);
483 #endif
484 	extent_t *extent =
485 	    extents_first_fit_locked(tsdn, arena, extents, max_size);
486 
487 	if (alignment > PAGE && extent == NULL) {
488 		/*
489 		 * max_size guarantees the alignment requirement but is rather
490 		 * pessimistic.  Next we try to satisfy the aligned allocation
491 		 * with sizes in [esize, max_size).
492 		 */
493 		extent = extents_fit_alignment(extents, esize, max_size,
494 		    alignment);
495 	}
496 
497 	return extent;
498 }
499 
500 static bool
extent_try_delayed_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent)501 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
502     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
503     extent_t *extent) {
504 	extent_state_set(extent, extent_state_active);
505 	bool coalesced;
506 	extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
507 	    extents, extent, &coalesced, false);
508 	extent_state_set(extent, extents_state_get(extents));
509 
510 	if (!coalesced) {
511 		return true;
512 	}
513 	extents_insert_locked(tsdn, extents, extent);
514 	return false;
515 }
516 
517 extent_t *
extents_alloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)518 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
519     extents_t *extents, void *new_addr, size_t size, size_t pad,
520     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
521 	assert(size + pad != 0);
522 	assert(alignment != 0);
523 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
524 	    WITNESS_RANK_CORE, 0);
525 
526 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
527 	    new_addr, size, pad, alignment, slab, szind, zero, commit, false);
528 	assert(extent == NULL || extent_dumpable_get(extent));
529 	return extent;
530 }
531 
532 void
extents_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent)533 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
534     extents_t *extents, extent_t *extent) {
535 	assert(extent_base_get(extent) != NULL);
536 	assert(extent_size_get(extent) != 0);
537 	assert(extent_dumpable_get(extent));
538 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
539 	    WITNESS_RANK_CORE, 0);
540 
541 	extent_addr_set(extent, extent_base_get(extent));
542 	extent_zeroed_set(extent, false);
543 
544 	extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
545 }
546 
547 extent_t *
extents_evict(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_min)548 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
549     extents_t *extents, size_t npages_min) {
550 	rtree_ctx_t rtree_ctx_fallback;
551 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
552 
553 	malloc_mutex_lock(tsdn, &extents->mtx);
554 
555 	/*
556 	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
557 	 * the loop will iterate until the LRU extent is fully coalesced.
558 	 */
559 	extent_t *extent;
560 	while (true) {
561 		/* Get the LRU extent, if any. */
562 		extent = extent_list_first(&extents->lru);
563 		if (extent == NULL) {
564 			goto label_return;
565 		}
566 		/* Check the eviction limit. */
567 		size_t extents_npages = atomic_load_zu(&extents->npages,
568 		    ATOMIC_RELAXED);
569 		if (extents_npages <= npages_min) {
570 			extent = NULL;
571 			goto label_return;
572 		}
573 		extents_remove_locked(tsdn, extents, extent);
574 		if (!extents->delay_coalesce) {
575 			break;
576 		}
577 		/* Try to coalesce. */
578 		if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
579 		    rtree_ctx, extents, extent)) {
580 			break;
581 		}
582 		/*
583 		 * The LRU extent was just coalesced and the result placed in
584 		 * the LRU at its neighbor's position.  Start over.
585 		 */
586 	}
587 
588 	/*
589 	 * Either mark the extent active or deregister it to protect against
590 	 * concurrent operations.
591 	 */
592 	switch (extents_state_get(extents)) {
593 	case extent_state_active:
594 		not_reached();
595 	case extent_state_dirty:
596 	case extent_state_muzzy:
597 		extent_state_set(extent, extent_state_active);
598 		break;
599 	case extent_state_retained:
600 		extent_deregister(tsdn, extent);
601 		break;
602 	default:
603 		not_reached();
604 	}
605 
606 label_return:
607 	malloc_mutex_unlock(tsdn, &extents->mtx);
608 	return extent;
609 }
610 
611 static void
extents_leak(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)612 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
613     extents_t *extents, extent_t *extent, bool growing_retained) {
614 	/*
615 	 * Leak extent after making sure its pages have already been purged, so
616 	 * that this is only a virtual memory leak.
617 	 */
618 	if (extents_state_get(extents) == extent_state_dirty) {
619 		if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
620 		    extent, 0, extent_size_get(extent), growing_retained)) {
621 			extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
622 			    extent, 0, extent_size_get(extent),
623 			    growing_retained);
624 		}
625 	}
626 	extent_dalloc(tsdn, arena, extent);
627 }
628 
629 void
extents_prefork(tsdn_t * tsdn,extents_t * extents)630 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
631 	malloc_mutex_prefork(tsdn, &extents->mtx);
632 }
633 
634 void
extents_postfork_parent(tsdn_t * tsdn,extents_t * extents)635 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
636 	malloc_mutex_postfork_parent(tsdn, &extents->mtx);
637 }
638 
639 void
extents_postfork_child(tsdn_t * tsdn,extents_t * extents)640 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
641 	malloc_mutex_postfork_child(tsdn, &extents->mtx);
642 }
643 
644 static void
extent_deactivate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)645 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
646     extent_t *extent) {
647 	assert(extent_arena_get(extent) == arena);
648 	assert(extent_state_get(extent) == extent_state_active);
649 
650 	extent_state_set(extent, extents_state_get(extents));
651 	extents_insert_locked(tsdn, extents, extent);
652 }
653 
654 static void
extent_deactivate(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)655 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
656     extent_t *extent) {
657 	malloc_mutex_lock(tsdn, &extents->mtx);
658 	extent_deactivate_locked(tsdn, arena, extents, extent);
659 	malloc_mutex_unlock(tsdn, &extents->mtx);
660 }
661 
662 static void
extent_activate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)663 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
664     extent_t *extent) {
665 	assert(extent_arena_get(extent) == arena);
666 	assert(extent_state_get(extent) == extents_state_get(extents));
667 
668 	extents_remove_locked(tsdn, extents, extent);
669 	extent_state_set(extent, extent_state_active);
670 }
671 
672 static bool
extent_rtree_leaf_elms_lookup(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,const extent_t * extent,bool dependent,bool init_missing,rtree_leaf_elm_t ** r_elm_a,rtree_leaf_elm_t ** r_elm_b)673 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
674     const extent_t *extent, bool dependent, bool init_missing,
675     rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
676 	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
677 	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
678 	if (!dependent && *r_elm_a == NULL) {
679 		return true;
680 	}
681 	assert(*r_elm_a != NULL);
682 
683 	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
684 	    (uintptr_t)extent_last_get(extent), dependent, init_missing);
685 	if (!dependent && *r_elm_b == NULL) {
686 		return true;
687 	}
688 	assert(*r_elm_b != NULL);
689 
690 	return false;
691 }
692 
693 static void
extent_rtree_write_acquired(tsdn_t * tsdn,rtree_leaf_elm_t * elm_a,rtree_leaf_elm_t * elm_b,extent_t * extent,szind_t szind,bool slab)694 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
695     rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
696 	rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
697 	if (elm_b != NULL) {
698 		rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
699 		    slab);
700 	}
701 }
702 
703 static void
extent_interior_register(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent,szind_t szind)704 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
705     szind_t szind) {
706 	assert(extent_slab_get(extent));
707 
708 	/* Register interior. */
709 	for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
710 		rtree_write(tsdn, &extents_rtree, rtree_ctx,
711 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
712 		    LG_PAGE), extent, szind, true);
713 	}
714 }
715 
716 static void
extent_gdump_add(tsdn_t * tsdn,const extent_t * extent)717 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
718 	cassert(config_prof);
719 	/* prof_gdump() requirement. */
720 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
721 	    WITNESS_RANK_CORE, 0);
722 
723 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
724 		size_t nadd = extent_size_get(extent) >> LG_PAGE;
725 		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
726 		    ATOMIC_RELAXED) + nadd;
727 		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
728 		while (cur > high && !atomic_compare_exchange_weak_zu(
729 		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
730 			/*
731 			 * Don't refresh cur, because it may have decreased
732 			 * since this thread lost the highpages update race.
733 			 * Note that high is updated in case of CAS failure.
734 			 */
735 		}
736 		if (cur > high && prof_gdump_get_unlocked()) {
737 			prof_gdump(tsdn);
738 		}
739 	}
740 }
741 
742 static void
extent_gdump_sub(tsdn_t * tsdn,const extent_t * extent)743 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
744 	cassert(config_prof);
745 
746 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
747 		size_t nsub = extent_size_get(extent) >> LG_PAGE;
748 		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
749 		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
750 	}
751 }
752 
753 static bool
extent_register_impl(tsdn_t * tsdn,extent_t * extent,bool gdump_add)754 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
755 	rtree_ctx_t rtree_ctx_fallback;
756 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
757 	rtree_leaf_elm_t *elm_a, *elm_b;
758 
759 	/*
760 	 * We need to hold the lock to protect against a concurrent coalesce
761 	 * operation that sees us in a partial state.
762 	 */
763 	extent_lock(tsdn, extent);
764 
765 	if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
766 	    &elm_a, &elm_b)) {
767 		return true;
768 	}
769 
770 	szind_t szind = extent_szind_get_maybe_invalid(extent);
771 	bool slab = extent_slab_get(extent);
772 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
773 	if (slab) {
774 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
775 	}
776 
777 	extent_unlock(tsdn, extent);
778 
779 	if (config_prof && gdump_add) {
780 		extent_gdump_add(tsdn, extent);
781 	}
782 
783 	return false;
784 }
785 
786 static bool
extent_register(tsdn_t * tsdn,extent_t * extent)787 extent_register(tsdn_t *tsdn, extent_t *extent) {
788 	return extent_register_impl(tsdn, extent, true);
789 }
790 
791 static bool
extent_register_no_gdump_add(tsdn_t * tsdn,extent_t * extent)792 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
793 	return extent_register_impl(tsdn, extent, false);
794 }
795 
796 static void
extent_reregister(tsdn_t * tsdn,extent_t * extent)797 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
798 	bool err = extent_register(tsdn, extent);
799 	assert(!err);
800 }
801 
802 /*
803  * Removes all pointers to the given extent from the global rtree indices for
804  * its interior.  This is relevant for slab extents, for which we need to do
805  * metadata lookups at places other than the head of the extent.  We deregister
806  * on the interior, then, when an extent moves from being an active slab to an
807  * inactive state.
808  */
809 static void
extent_interior_deregister(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent)810 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
811     extent_t *extent) {
812 	size_t i;
813 
814 	assert(extent_slab_get(extent));
815 
816 	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
817 		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
818 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
819 		    LG_PAGE));
820 	}
821 }
822 
823 /*
824  * Removes all pointers to the given extent from the global rtree.
825  */
826 static void
extent_deregister_impl(tsdn_t * tsdn,extent_t * extent,bool gdump)827 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
828 	rtree_ctx_t rtree_ctx_fallback;
829 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
830 	rtree_leaf_elm_t *elm_a, *elm_b;
831 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
832 	    &elm_a, &elm_b);
833 
834 	extent_lock(tsdn, extent);
835 
836 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
837 	if (extent_slab_get(extent)) {
838 		extent_interior_deregister(tsdn, rtree_ctx, extent);
839 		extent_slab_set(extent, false);
840 	}
841 
842 	extent_unlock(tsdn, extent);
843 
844 	if (config_prof && gdump) {
845 		extent_gdump_sub(tsdn, extent);
846 	}
847 }
848 
849 static void
extent_deregister(tsdn_t * tsdn,extent_t * extent)850 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
851 	extent_deregister_impl(tsdn, extent, true);
852 }
853 
854 static void
extent_deregister_no_gdump_sub(tsdn_t * tsdn,extent_t * extent)855 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
856 	extent_deregister_impl(tsdn, extent, false);
857 }
858 
859 /*
860  * Tries to find and remove an extent from extents that can be used for the
861  * given allocation request.
862  */
863 static extent_t *
extent_recycle_extract(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,bool growing_retained)864 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
865     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
866     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
867     bool growing_retained) {
868 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
869 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
870 	assert(alignment > 0);
871 	if (config_debug && new_addr != NULL) {
872 		/*
873 		 * Non-NULL new_addr has two use cases:
874 		 *
875 		 *   1) Recycle a known-extant extent, e.g. during purging.
876 		 *   2) Perform in-place expanding reallocation.
877 		 *
878 		 * Regardless of use case, new_addr must either refer to a
879 		 * non-existing extent, or to the base of an extant extent,
880 		 * since only active slabs support interior lookups (which of
881 		 * course cannot be recycled).
882 		 */
883 		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
884 		assert(pad == 0);
885 		assert(alignment <= PAGE);
886 	}
887 
888 	size_t esize = size + pad;
889 	malloc_mutex_lock(tsdn, &extents->mtx);
890 	extent_hooks_assure_initialized(arena, r_extent_hooks);
891 	extent_t *extent;
892 	if (new_addr != NULL) {
893 		extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
894 		if (extent != NULL) {
895 			/*
896 			 * We might null-out extent to report an error, but we
897 			 * still need to unlock the associated mutex after.
898 			 */
899 			extent_t *unlock_extent = extent;
900 			assert(extent_base_get(extent) == new_addr);
901 			if (extent_arena_get(extent) != arena ||
902 			    extent_size_get(extent) < esize ||
903 			    extent_state_get(extent) !=
904 			    extents_state_get(extents)) {
905 				extent = NULL;
906 			}
907 			extent_unlock(tsdn, unlock_extent);
908 		}
909 	} else {
910 		extent = extents_fit_locked(tsdn, arena, extents, esize,
911 		    alignment);
912 	}
913 	if (extent == NULL) {
914 		malloc_mutex_unlock(tsdn, &extents->mtx);
915 		return NULL;
916 	}
917 
918 	extent_activate_locked(tsdn, arena, extents, extent);
919 	malloc_mutex_unlock(tsdn, &extents->mtx);
920 
921 	return extent;
922 }
923 
924 /*
925  * Given an allocation request and an extent guaranteed to be able to satisfy
926  * it, this splits off lead and trail extents, leaving extent pointing to an
927  * extent satisfying the allocation.
928  * This function doesn't put lead or trail into any extents_t; it's the caller's
929  * job to ensure that they can be reused.
930  */
931 typedef enum {
932 	/*
933 	 * Split successfully.  lead, extent, and trail, are modified to extents
934 	 * describing the ranges before, in, and after the given allocation.
935 	 */
936 	extent_split_interior_ok,
937 	/*
938 	 * The extent can't satisfy the given allocation request.  None of the
939 	 * input extent_t *s are touched.
940 	 */
941 	extent_split_interior_cant_alloc,
942 	/*
943 	 * In a potentially invalid state.  Must leak (if *to_leak is non-NULL),
944 	 * and salvage what's still salvageable (if *to_salvage is non-NULL).
945 	 * None of lead, extent, or trail are valid.
946 	 */
947 	extent_split_interior_error
948 } extent_split_interior_result_t;
949 
950 static extent_split_interior_result_t
extent_split_interior(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extent_t ** extent,extent_t ** lead,extent_t ** trail,extent_t ** to_leak,extent_t ** to_salvage,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool growing_retained)951 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
952     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
953     /* The result of splitting, in case of success. */
954     extent_t **extent, extent_t **lead, extent_t **trail,
955     /* The mess to clean up, in case of error. */
956     extent_t **to_leak, extent_t **to_salvage,
957     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
958     szind_t szind, bool growing_retained) {
959 	size_t esize = size + pad;
960 	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
961 	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
962 	assert(new_addr == NULL || leadsize == 0);
963 	if (extent_size_get(*extent) < leadsize + esize) {
964 		return extent_split_interior_cant_alloc;
965 	}
966 	size_t trailsize = extent_size_get(*extent) - leadsize - esize;
967 
968 	*lead = NULL;
969 	*trail = NULL;
970 	*to_leak = NULL;
971 	*to_salvage = NULL;
972 
973 	/* Split the lead. */
974 	if (leadsize != 0) {
975 		*lead = *extent;
976 		*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
977 		    *lead, leadsize, NSIZES, false, esize + trailsize, szind,
978 		    slab, growing_retained);
979 		if (*extent == NULL) {
980 			*to_leak = *lead;
981 			*lead = NULL;
982 			return extent_split_interior_error;
983 		}
984 	}
985 
986 	/* Split the trail. */
987 	if (trailsize != 0) {
988 		*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
989 		    esize, szind, slab, trailsize, NSIZES, false,
990 		    growing_retained);
991 		if (*trail == NULL) {
992 			*to_leak = *extent;
993 			*to_salvage = *lead;
994 			*lead = NULL;
995 			*extent = NULL;
996 			return extent_split_interior_error;
997 		}
998 	}
999 
1000 	if (leadsize == 0 && trailsize == 0) {
1001 		/*
1002 		 * Splitting causes szind to be set as a side effect, but no
1003 		 * splitting occurred.
1004 		 */
1005 		extent_szind_set(*extent, szind);
1006 		if (szind != NSIZES) {
1007 			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1008 			    (uintptr_t)extent_addr_get(*extent), szind, slab);
1009 			if (slab && extent_size_get(*extent) > PAGE) {
1010 				rtree_szind_slab_update(tsdn, &extents_rtree,
1011 				    rtree_ctx,
1012 				    (uintptr_t)extent_past_get(*extent) -
1013 				    (uintptr_t)PAGE, szind, slab);
1014 			}
1015 		}
1016 	}
1017 
1018 	return extent_split_interior_ok;
1019 }
1020 
1021 /*
1022  * This fulfills the indicated allocation request out of the given extent (which
1023  * the caller should have ensured was big enough).  If there's any unused space
1024  * before or after the resulting allocation, that space is given its own extent
1025  * and put back into extents.
1026  */
1027 static extent_t *
extent_recycle_split(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,extent_t * extent,bool growing_retained)1028 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1029     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1030     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1031     szind_t szind, extent_t *extent, bool growing_retained) {
1032 	extent_t *lead;
1033 	extent_t *trail;
1034 	extent_t *to_leak;
1035 	extent_t *to_salvage;
1036 
1037 	extent_split_interior_result_t result = extent_split_interior(
1038 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1039 	    &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1040 	    growing_retained);
1041 
1042 	if (result == extent_split_interior_ok) {
1043 		if (lead != NULL) {
1044 			extent_deactivate(tsdn, arena, extents, lead);
1045 		}
1046 		if (trail != NULL) {
1047 			extent_deactivate(tsdn, arena, extents, trail);
1048 		}
1049 		return extent;
1050 	} else {
1051 		/*
1052 		 * We should have picked an extent that was large enough to
1053 		 * fulfill our allocation request.
1054 		 */
1055 		assert(result == extent_split_interior_error);
1056 		if (to_salvage != NULL) {
1057 			extent_deregister(tsdn, to_salvage);
1058 		}
1059 		if (to_leak != NULL) {
1060 			void *leak = extent_base_get(to_leak);
1061 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1062 			extents_leak(tsdn, arena, r_extent_hooks, extents,
1063 			    to_leak, growing_retained);
1064 			assert(extent_lock_from_addr(tsdn, rtree_ctx, leak)
1065 			    == NULL);
1066 		}
1067 		return NULL;
1068 	}
1069 	unreachable();
1070 }
1071 
1072 /*
1073  * Tries to satisfy the given allocation request by reusing one of the extents
1074  * in the given extents_t.
1075  */
1076 static extent_t *
extent_recycle(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit,bool growing_retained)1077 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1078     extents_t *extents, void *new_addr, size_t size, size_t pad,
1079     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1080     bool growing_retained) {
1081 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1082 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1083 	assert(new_addr == NULL || !slab);
1084 	assert(pad == 0 || !slab);
1085 	assert(!*zero || !slab);
1086 
1087 	rtree_ctx_t rtree_ctx_fallback;
1088 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1089 
1090 	extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1091 	    rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1092 	    growing_retained);
1093 	if (extent == NULL) {
1094 		return NULL;
1095 	}
1096 
1097 	extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1098 	    extents, new_addr, size, pad, alignment, slab, szind, extent,
1099 	    growing_retained);
1100 	if (extent == NULL) {
1101 		return NULL;
1102 	}
1103 
1104 	if (*commit && !extent_committed_get(extent)) {
1105 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1106 		    0, extent_size_get(extent), growing_retained)) {
1107 			extent_record(tsdn, arena, r_extent_hooks, extents,
1108 			    extent, growing_retained);
1109 			return NULL;
1110 		}
1111 		extent_zeroed_set(extent, true);
1112 	}
1113 
1114 	if (extent_committed_get(extent)) {
1115 		*commit = true;
1116 	}
1117 	if (extent_zeroed_get(extent)) {
1118 		*zero = true;
1119 	}
1120 
1121 	if (pad != 0) {
1122 		extent_addr_randomize(tsdn, extent, alignment);
1123 	}
1124 	assert(extent_state_get(extent) == extent_state_active);
1125 	if (slab) {
1126 		extent_slab_set(extent, slab);
1127 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1128 	}
1129 
1130 	if (*zero) {
1131 		void *addr = extent_base_get(extent);
1132 		size_t size = extent_size_get(extent);
1133 		if (!extent_zeroed_get(extent)) {
1134 			if (pages_purge_forced(addr, size)) {
1135 				memset(addr, 0, size);
1136 			}
1137 		} else if (config_debug) {
1138 			size_t *p = (size_t *)(uintptr_t)addr;
1139 			for (size_t i = 0; i < size / sizeof(size_t); i++) {
1140 				assert(p[i] == 0);
1141 			}
1142 		}
1143 	}
1144 	return extent;
1145 }
1146 
1147 /*
1148  * If the caller specifies (!*zero), it is still possible to receive zeroed
1149  * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
1150  * advantage of this to avoid demanding zeroed extents, but taking advantage of
1151  * them if they are returned.
1152  */
1153 static void *
extent_alloc_core(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,dss_prec_t dss_prec)1154 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1155     size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1156 	void *ret;
1157 
1158 	assert(size != 0);
1159 	assert(alignment != 0);
1160 
1161 	/* "primary" dss. */
1162 	if (have_dss && dss_prec == dss_prec_primary && (ret =
1163 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1164 	    commit)) != NULL) {
1165 		return ret;
1166 	}
1167 	/* mmap. */
1168 	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1169 	    != NULL) {
1170 		return ret;
1171 	}
1172 	/* "secondary" dss. */
1173 	if (have_dss && dss_prec == dss_prec_secondary && (ret =
1174 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1175 	    commit)) != NULL) {
1176 		return ret;
1177 	}
1178 
1179 	/* All strategies for allocation failed. */
1180 	return NULL;
1181 }
1182 
1183 static void *
extent_alloc_default_impl(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)1184 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1185     size_t size, size_t alignment, bool *zero, bool *commit) {
1186 	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1187 	    commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1188 	    ATOMIC_RELAXED));
1189 	if (have_madvise_huge && ret) {
1190 		pages_set_thp_state(ret, size);
1191 	}
1192 	return ret;
1193 }
1194 
1195 static void *
extent_alloc_default(extent_hooks_t * extent_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,unsigned arena_ind)1196 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1197     size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1198 	tsdn_t *tsdn;
1199 	arena_t *arena;
1200 
1201 	tsdn = tsdn_fetch();
1202 	arena = arena_get(tsdn, arena_ind, false);
1203 	/*
1204 	 * The arena we're allocating on behalf of must have been initialized
1205 	 * already.
1206 	 */
1207 	assert(arena != NULL);
1208 
1209 	return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1210 	    alignment, zero, commit);
1211 }
1212 
1213 static void
extent_hook_pre_reentrancy(tsdn_t * tsdn,arena_t * arena)1214 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1215 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1216 	if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1217 		/*
1218 		 * The only legitimate case of customized extent hooks for a0 is
1219 		 * hooks with no allocation activities.  One such example is to
1220 		 * place metadata on pre-allocated resources such as huge pages.
1221 		 * In that case, rely on reentrancy_level checks to catch
1222 		 * infinite recursions.
1223 		 */
1224 		pre_reentrancy(tsd, NULL);
1225 	} else {
1226 		pre_reentrancy(tsd, arena);
1227 	}
1228 }
1229 
1230 static void
extent_hook_post_reentrancy(tsdn_t * tsdn)1231 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1232 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1233 	post_reentrancy(tsd);
1234 }
1235 
1236 /*
1237  * If virtual memory is retained, create increasingly larger extents from which
1238  * to split requested extents in order to limit the total number of disjoint
1239  * virtual memory ranges retained by each arena.
1240  */
1241 static extent_t *
extent_grow_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1242 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1243     extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1244     bool slab, szind_t szind, bool *zero, bool *commit) {
1245 	malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1246 	assert(pad == 0 || !slab);
1247 	assert(!*zero || !slab);
1248 
1249 	size_t esize = size + pad;
1250 	size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1251 	/* Beware size_t wrap-around. */
1252 	if (alloc_size_min < esize) {
1253 		goto label_err;
1254 	}
1255 	/*
1256 	 * Find the next extent size in the series that would be large enough to
1257 	 * satisfy this request.
1258 	 */
1259 	pszind_t egn_skip = 0;
1260 	size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1261 	while (alloc_size < alloc_size_min) {
1262 		egn_skip++;
1263 		if (arena->extent_grow_next + egn_skip == NPSIZES) {
1264 			/* Outside legal range. */
1265 			goto label_err;
1266 		}
1267 		assert(arena->extent_grow_next + egn_skip < NPSIZES);
1268 		alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1269 	}
1270 
1271 	extent_t *extent = extent_alloc(tsdn, arena);
1272 	if (extent == NULL) {
1273 		goto label_err;
1274 	}
1275 	bool zeroed = false;
1276 	bool committed = false;
1277 
1278 	void *ptr;
1279 	if (*r_extent_hooks == &extent_hooks_default) {
1280 		ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1281 		    alloc_size, PAGE, &zeroed, &committed);
1282 	} else {
1283 		extent_hook_pre_reentrancy(tsdn, arena);
1284 		ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1285 		    alloc_size, PAGE, &zeroed, &committed,
1286 		    arena_ind_get(arena));
1287 		extent_hook_post_reentrancy(tsdn);
1288 	}
1289 
1290 	extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1291 	    arena_extent_sn_next(arena), extent_state_active, zeroed,
1292 	    committed, true);
1293 	if (ptr == NULL) {
1294 		extent_dalloc(tsdn, arena, extent);
1295 		goto label_err;
1296 	}
1297 
1298 	if (extent_register_no_gdump_add(tsdn, extent)) {
1299 		extents_leak(tsdn, arena, r_extent_hooks,
1300 		    &arena->extents_retained, extent, true);
1301 		goto label_err;
1302 	}
1303 
1304 	if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1305 		*zero = true;
1306 	}
1307 	if (extent_committed_get(extent)) {
1308 		*commit = true;
1309 	}
1310 
1311 	rtree_ctx_t rtree_ctx_fallback;
1312 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1313 
1314 	extent_t *lead;
1315 	extent_t *trail;
1316 	extent_t *to_leak;
1317 	extent_t *to_salvage;
1318 	extent_split_interior_result_t result = extent_split_interior(
1319 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1320 	    &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1321 	    true);
1322 
1323 	if (result == extent_split_interior_ok) {
1324 		if (lead != NULL) {
1325 			extent_record(tsdn, arena, r_extent_hooks,
1326 			    &arena->extents_retained, lead, true);
1327 		}
1328 		if (trail != NULL) {
1329 			extent_record(tsdn, arena, r_extent_hooks,
1330 			    &arena->extents_retained, trail, true);
1331 		}
1332 	} else {
1333 		/*
1334 		 * We should have allocated a sufficiently large extent; the
1335 		 * cant_alloc case should not occur.
1336 		 */
1337 		assert(result == extent_split_interior_error);
1338 		if (to_salvage != NULL) {
1339 			if (config_prof) {
1340 				extent_gdump_add(tsdn, to_salvage);
1341 			}
1342 			extent_record(tsdn, arena, r_extent_hooks,
1343 			    &arena->extents_retained, to_salvage, true);
1344 		}
1345 		if (to_leak != NULL) {
1346 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1347 			extents_leak(tsdn, arena, r_extent_hooks,
1348 			    &arena->extents_retained, to_leak, true);
1349 		}
1350 		goto label_err;
1351 	}
1352 
1353 	if (*commit && !extent_committed_get(extent)) {
1354 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1355 		    extent_size_get(extent), true)) {
1356 			extent_record(tsdn, arena, r_extent_hooks,
1357 			    &arena->extents_retained, extent, true);
1358 			goto label_err;
1359 		}
1360 		extent_zeroed_set(extent, true);
1361 	}
1362 
1363 	/*
1364 	 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1365 	 * range.
1366 	 */
1367 	if (arena->extent_grow_next + egn_skip + 1 <=
1368 	    arena->retain_grow_limit) {
1369 		arena->extent_grow_next += egn_skip + 1;
1370 	} else {
1371 		arena->extent_grow_next = arena->retain_grow_limit;
1372 	}
1373 	/* All opportunities for failure are past. */
1374 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1375 
1376 	if (config_prof) {
1377 		/* Adjust gdump stats now that extent is final size. */
1378 		extent_gdump_add(tsdn, extent);
1379 	}
1380 	if (pad != 0) {
1381 		extent_addr_randomize(tsdn, extent, alignment);
1382 	}
1383 	if (slab) {
1384 		rtree_ctx_t rtree_ctx_fallback;
1385 		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1386 		    &rtree_ctx_fallback);
1387 
1388 		extent_slab_set(extent, true);
1389 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1390 	}
1391 	if (*zero && !extent_zeroed_get(extent)) {
1392 		void *addr = extent_base_get(extent);
1393 		size_t size = extent_size_get(extent);
1394 		if (pages_purge_forced(addr, size)) {
1395 			memset(addr, 0, size);
1396 		}
1397 	}
1398 
1399 	return extent;
1400 label_err:
1401 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1402 	return NULL;
1403 }
1404 
1405 static extent_t *
extent_alloc_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1406 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1407     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1408     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1409 	assert(size != 0);
1410 	assert(alignment != 0);
1411 
1412 	malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1413 
1414 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1415 	    &arena->extents_retained, new_addr, size, pad, alignment, slab,
1416 	    szind, zero, commit, true);
1417 	if (extent != NULL) {
1418 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1419 		if (config_prof) {
1420 			extent_gdump_add(tsdn, extent);
1421 		}
1422 	} else if (opt_retain && new_addr == NULL) {
1423 		extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1424 		    pad, alignment, slab, szind, zero, commit);
1425 		/* extent_grow_retained() always releases extent_grow_mtx. */
1426 	} else {
1427 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1428 	}
1429 	malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1430 
1431 	return extent;
1432 }
1433 
1434 static extent_t *
extent_alloc_wrapper_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1435 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1436     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1437     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1438 	size_t esize = size + pad;
1439 	extent_t *extent = extent_alloc(tsdn, arena);
1440 	if (extent == NULL) {
1441 		return NULL;
1442 	}
1443 	void *addr;
1444 	if (*r_extent_hooks == &extent_hooks_default) {
1445 		/* Call directly to propagate tsdn. */
1446 		addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1447 		    alignment, zero, commit);
1448 	} else {
1449 		extent_hook_pre_reentrancy(tsdn, arena);
1450 		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1451 		    esize, alignment, zero, commit, arena_ind_get(arena));
1452 		extent_hook_post_reentrancy(tsdn);
1453 	}
1454 	if (addr == NULL) {
1455 		extent_dalloc(tsdn, arena, extent);
1456 		return NULL;
1457 	}
1458 	extent_init(extent, arena, addr, esize, slab, szind,
1459 	    arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1460 	    true);
1461 	if (pad != 0) {
1462 		extent_addr_randomize(tsdn, extent, alignment);
1463 	}
1464 	if (extent_register(tsdn, extent)) {
1465 		extents_leak(tsdn, arena, r_extent_hooks,
1466 		    &arena->extents_retained, extent, false);
1467 		return NULL;
1468 	}
1469 
1470 	return extent;
1471 }
1472 
1473 extent_t *
extent_alloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1474 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1475     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1476     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1477 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1478 	    WITNESS_RANK_CORE, 0);
1479 
1480 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1481 
1482 	extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1483 	    new_addr, size, pad, alignment, slab, szind, zero, commit);
1484 	if (extent == NULL) {
1485 		if (opt_retain && new_addr != NULL) {
1486 			/*
1487 			 * When retain is enabled and new_addr is set, we do not
1488 			 * attempt extent_alloc_wrapper_hard which does mmap
1489 			 * that is very unlikely to succeed (unless it happens
1490 			 * to be at the end).
1491 			 */
1492 			return NULL;
1493 		}
1494 		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1495 		    new_addr, size, pad, alignment, slab, szind, zero, commit);
1496 	}
1497 
1498 	assert(extent == NULL || extent_dumpable_get(extent));
1499 	return extent;
1500 }
1501 
1502 static bool
extent_can_coalesce(arena_t * arena,extents_t * extents,const extent_t * inner,const extent_t * outer)1503 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1504     const extent_t *outer) {
1505 	assert(extent_arena_get(inner) == arena);
1506 	if (extent_arena_get(outer) != arena) {
1507 		return false;
1508 	}
1509 
1510 	assert(extent_state_get(inner) == extent_state_active);
1511 	if (extent_state_get(outer) != extents->state) {
1512 		return false;
1513 	}
1514 
1515 	if (extent_committed_get(inner) != extent_committed_get(outer)) {
1516 		return false;
1517 	}
1518 
1519 	return true;
1520 }
1521 
1522 static bool
extent_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * inner,extent_t * outer,bool forward,bool growing_retained)1523 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1524     extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1525     bool growing_retained) {
1526 	assert(extent_can_coalesce(arena, extents, inner, outer));
1527 
1528 	extent_activate_locked(tsdn, arena, extents, outer);
1529 
1530 	malloc_mutex_unlock(tsdn, &extents->mtx);
1531 	bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1532 	    forward ? inner : outer, forward ? outer : inner, growing_retained);
1533 	malloc_mutex_lock(tsdn, &extents->mtx);
1534 
1535 	if (err) {
1536 		extent_deactivate_locked(tsdn, arena, extents, outer);
1537 	}
1538 
1539 	return err;
1540 }
1541 
1542 static extent_t *
extent_try_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained)1543 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1544     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1545     extent_t *extent, bool *coalesced, bool growing_retained) {
1546 	/*
1547 	 * Continue attempting to coalesce until failure, to protect against
1548 	 * races with other threads that are thwarted by this one.
1549 	 */
1550 	bool again;
1551 	do {
1552 		again = false;
1553 
1554 		/* Try to coalesce forward. */
1555 		extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1556 		    extent_past_get(extent));
1557 		if (next != NULL) {
1558 			/*
1559 			 * extents->mtx only protects against races for
1560 			 * like-state extents, so call extent_can_coalesce()
1561 			 * before releasing next's pool lock.
1562 			 */
1563 			bool can_coalesce = extent_can_coalesce(arena, extents,
1564 			    extent, next);
1565 
1566 			extent_unlock(tsdn, next);
1567 
1568 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1569 			    r_extent_hooks, extents, extent, next, true,
1570 			    growing_retained)) {
1571 				if (extents->delay_coalesce) {
1572 					/* Do minimal coalescing. */
1573 					*coalesced = true;
1574 					return extent;
1575 				}
1576 				again = true;
1577 			}
1578 		}
1579 
1580 		/* Try to coalesce backward. */
1581 		extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1582 		    extent_before_get(extent));
1583 		if (prev != NULL) {
1584 			bool can_coalesce = extent_can_coalesce(arena, extents,
1585 			    extent, prev);
1586 			extent_unlock(tsdn, prev);
1587 
1588 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1589 			    r_extent_hooks, extents, extent, prev, false,
1590 			    growing_retained)) {
1591 				extent = prev;
1592 				if (extents->delay_coalesce) {
1593 					/* Do minimal coalescing. */
1594 					*coalesced = true;
1595 					return extent;
1596 				}
1597 				again = true;
1598 			}
1599 		}
1600 	} while (again);
1601 
1602 	if (extents->delay_coalesce) {
1603 		*coalesced = false;
1604 	}
1605 	return extent;
1606 }
1607 
1608 /*
1609  * Does the metadata management portions of putting an unused extent into the
1610  * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1611  */
1612 static void
extent_record(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)1613 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1614     extents_t *extents, extent_t *extent, bool growing_retained) {
1615 	rtree_ctx_t rtree_ctx_fallback;
1616 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1617 
1618 	assert((extents_state_get(extents) != extent_state_dirty &&
1619 	    extents_state_get(extents) != extent_state_muzzy) ||
1620 	    !extent_zeroed_get(extent));
1621 
1622 	malloc_mutex_lock(tsdn, &extents->mtx);
1623 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1624 
1625 	extent_szind_set(extent, NSIZES);
1626 	if (extent_slab_get(extent)) {
1627 		extent_interior_deregister(tsdn, rtree_ctx, extent);
1628 		extent_slab_set(extent, false);
1629 	}
1630 
1631 	assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1632 	    (uintptr_t)extent_base_get(extent), true) == extent);
1633 
1634 	if (!extents->delay_coalesce) {
1635 		extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1636 		    rtree_ctx, extents, extent, NULL, growing_retained);
1637 	} else if (extent_size_get(extent) >= LARGE_MINCLASS) {
1638 		/* Always coalesce large extents eagerly. */
1639 		bool coalesced;
1640 		size_t prev_size;
1641 		do {
1642 			prev_size = extent_size_get(extent);
1643 			assert(extent_state_get(extent) == extent_state_active);
1644 			extent = extent_try_coalesce(tsdn, arena,
1645 			    r_extent_hooks, rtree_ctx, extents, extent,
1646 			    &coalesced, growing_retained);
1647 		} while (coalesced &&
1648 		    extent_size_get(extent) >= prev_size + LARGE_MINCLASS);
1649 	}
1650 	extent_deactivate_locked(tsdn, arena, extents, extent);
1651 
1652 	malloc_mutex_unlock(tsdn, &extents->mtx);
1653 }
1654 
1655 void
extent_dalloc_gap(tsdn_t * tsdn,arena_t * arena,extent_t * extent)1656 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1657 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1658 
1659 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1660 	    WITNESS_RANK_CORE, 0);
1661 
1662 	if (extent_register(tsdn, extent)) {
1663 		extents_leak(tsdn, arena, &extent_hooks,
1664 		    &arena->extents_retained, extent, false);
1665 		return;
1666 	}
1667 	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1668 }
1669 
1670 static bool
extent_dalloc_default_impl(void * addr,size_t size)1671 extent_dalloc_default_impl(void *addr, size_t size) {
1672 	if (!have_dss || !extent_in_dss(addr)) {
1673 		return extent_dalloc_mmap(addr, size);
1674 	}
1675 	return true;
1676 }
1677 
1678 static bool
extent_dalloc_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1679 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1680     bool committed, unsigned arena_ind) {
1681 	return extent_dalloc_default_impl(addr, size);
1682 }
1683 
1684 static bool
extent_dalloc_wrapper_try(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1685 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1686     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1687 	bool err;
1688 
1689 	assert(extent_base_get(extent) != NULL);
1690 	assert(extent_size_get(extent) != 0);
1691 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1692 	    WITNESS_RANK_CORE, 0);
1693 
1694 	extent_addr_set(extent, extent_base_get(extent));
1695 
1696 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1697 	/* Try to deallocate. */
1698 	if (*r_extent_hooks == &extent_hooks_default) {
1699 		/* Call directly to propagate tsdn. */
1700 		err = extent_dalloc_default_impl(extent_base_get(extent),
1701 		    extent_size_get(extent));
1702 	} else {
1703 		extent_hook_pre_reentrancy(tsdn, arena);
1704 		err = ((*r_extent_hooks)->dalloc == NULL ||
1705 		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
1706 		    extent_base_get(extent), extent_size_get(extent),
1707 		    extent_committed_get(extent), arena_ind_get(arena)));
1708 		extent_hook_post_reentrancy(tsdn);
1709 	}
1710 
1711 	if (!err) {
1712 		extent_dalloc(tsdn, arena, extent);
1713 	}
1714 
1715 	return err;
1716 }
1717 
1718 void
extent_dalloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1719 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1720     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1721 	assert(extent_dumpable_get(extent));
1722 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1723 	    WITNESS_RANK_CORE, 0);
1724 
1725 	/*
1726 	 * Deregister first to avoid a race with other allocating threads, and
1727 	 * reregister if deallocation fails.
1728 	 */
1729 	extent_deregister(tsdn, extent);
1730 	if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1731 		return;
1732 	}
1733 
1734 	extent_reregister(tsdn, extent);
1735 	if (*r_extent_hooks != &extent_hooks_default) {
1736 		extent_hook_pre_reentrancy(tsdn, arena);
1737 	}
1738 	/* Try to decommit; purge if that fails. */
1739 	bool zeroed;
1740 	if (!extent_committed_get(extent)) {
1741 		zeroed = true;
1742 	} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1743 	    0, extent_size_get(extent))) {
1744 		zeroed = true;
1745 	} else if ((*r_extent_hooks)->purge_forced != NULL &&
1746 	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1747 	    extent_base_get(extent), extent_size_get(extent), 0,
1748 	    extent_size_get(extent), arena_ind_get(arena))) {
1749 		zeroed = true;
1750 	} else if (extent_state_get(extent) == extent_state_muzzy ||
1751 	    ((*r_extent_hooks)->purge_lazy != NULL &&
1752 	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1753 	    extent_base_get(extent), extent_size_get(extent), 0,
1754 	    extent_size_get(extent), arena_ind_get(arena)))) {
1755 		zeroed = false;
1756 	} else {
1757 		zeroed = false;
1758 	}
1759 	if (*r_extent_hooks != &extent_hooks_default) {
1760 		extent_hook_post_reentrancy(tsdn);
1761 	}
1762 	extent_zeroed_set(extent, zeroed);
1763 
1764 	if (config_prof) {
1765 		extent_gdump_sub(tsdn, extent);
1766 	}
1767 
1768 	extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1769 	    extent, false);
1770 }
1771 
1772 static void
extent_destroy_default_impl(void * addr,size_t size)1773 extent_destroy_default_impl(void *addr, size_t size) {
1774 	if (!have_dss || !extent_in_dss(addr)) {
1775 		pages_unmap(addr, size);
1776 	}
1777 }
1778 
1779 static void
extent_destroy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1780 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1781     bool committed, unsigned arena_ind) {
1782 	extent_destroy_default_impl(addr, size);
1783 }
1784 
1785 void
extent_destroy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1786 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1787     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1788 	assert(extent_base_get(extent) != NULL);
1789 	assert(extent_size_get(extent) != 0);
1790 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1791 	    WITNESS_RANK_CORE, 0);
1792 
1793 	/* Deregister first to avoid a race with other allocating threads. */
1794 	extent_deregister(tsdn, extent);
1795 
1796 	extent_addr_set(extent, extent_base_get(extent));
1797 
1798 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1799 	/* Try to destroy; silently fail otherwise. */
1800 	if (*r_extent_hooks == &extent_hooks_default) {
1801 		/* Call directly to propagate tsdn. */
1802 		extent_destroy_default_impl(extent_base_get(extent),
1803 		    extent_size_get(extent));
1804 	} else if ((*r_extent_hooks)->destroy != NULL) {
1805 		extent_hook_pre_reentrancy(tsdn, arena);
1806 		(*r_extent_hooks)->destroy(*r_extent_hooks,
1807 		    extent_base_get(extent), extent_size_get(extent),
1808 		    extent_committed_get(extent), arena_ind_get(arena));
1809 		extent_hook_post_reentrancy(tsdn);
1810 	}
1811 
1812 	extent_dalloc(tsdn, arena, extent);
1813 }
1814 
1815 static bool
extent_commit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1816 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1817     size_t offset, size_t length, unsigned arena_ind) {
1818 	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1819 	    length);
1820 }
1821 
1822 static bool
extent_commit_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1823 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1824     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1825     size_t length, bool growing_retained) {
1826 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1827 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1828 
1829 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1830 	if (*r_extent_hooks != &extent_hooks_default) {
1831 		extent_hook_pre_reentrancy(tsdn, arena);
1832 	}
1833 	bool err = ((*r_extent_hooks)->commit == NULL ||
1834 	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1835 	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
1836 	if (*r_extent_hooks != &extent_hooks_default) {
1837 		extent_hook_post_reentrancy(tsdn);
1838 	}
1839 	extent_committed_set(extent, extent_committed_get(extent) || !err);
1840 	return err;
1841 }
1842 
1843 bool
extent_commit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1844 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1845     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1846     size_t length) {
1847 	return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1848 	    length, false);
1849 }
1850 
1851 static bool
extent_decommit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1852 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1853     size_t offset, size_t length, unsigned arena_ind) {
1854 	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1855 	    length);
1856 }
1857 
1858 bool
extent_decommit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1859 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1860     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1861     size_t length) {
1862 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1863 	    WITNESS_RANK_CORE, 0);
1864 
1865 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1866 
1867 	if (*r_extent_hooks != &extent_hooks_default) {
1868 		extent_hook_pre_reentrancy(tsdn, arena);
1869 	}
1870 	bool err = ((*r_extent_hooks)->decommit == NULL ||
1871 	    (*r_extent_hooks)->decommit(*r_extent_hooks,
1872 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1873 	    arena_ind_get(arena)));
1874 	if (*r_extent_hooks != &extent_hooks_default) {
1875 		extent_hook_post_reentrancy(tsdn);
1876 	}
1877 	extent_committed_set(extent, extent_committed_get(extent) && err);
1878 	return err;
1879 }
1880 
1881 #ifdef PAGES_CAN_PURGE_LAZY
1882 static bool
extent_purge_lazy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1883 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1884     size_t offset, size_t length, unsigned arena_ind) {
1885 	assert(addr != NULL);
1886 	assert((offset & PAGE_MASK) == 0);
1887 	assert(length != 0);
1888 	assert((length & PAGE_MASK) == 0);
1889 
1890 	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1891 	    length);
1892 }
1893 #endif
1894 
1895 static bool
extent_purge_lazy_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1896 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1897     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1898     size_t length, bool growing_retained) {
1899 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1900 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1901 
1902 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1903 
1904 	if ((*r_extent_hooks)->purge_lazy == NULL) {
1905 		return true;
1906 	}
1907 	if (*r_extent_hooks != &extent_hooks_default) {
1908 		extent_hook_pre_reentrancy(tsdn, arena);
1909 	}
1910 	bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1911 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1912 	    arena_ind_get(arena));
1913 	if (*r_extent_hooks != &extent_hooks_default) {
1914 		extent_hook_post_reentrancy(tsdn);
1915 	}
1916 
1917 	return err;
1918 }
1919 
1920 bool
extent_purge_lazy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1921 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1922     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1923     size_t length) {
1924 	return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1925 	    offset, length, false);
1926 }
1927 
1928 #ifdef PAGES_CAN_PURGE_FORCED
1929 static bool
extent_purge_forced_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1930 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1931     size_t size, size_t offset, size_t length, unsigned arena_ind) {
1932 	assert(addr != NULL);
1933 	assert((offset & PAGE_MASK) == 0);
1934 	assert(length != 0);
1935 	assert((length & PAGE_MASK) == 0);
1936 
1937 	return pages_purge_forced((void *)((uintptr_t)addr +
1938 	    (uintptr_t)offset), length);
1939 }
1940 #endif
1941 
1942 static bool
extent_purge_forced_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1943 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1944     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1945     size_t length, bool growing_retained) {
1946 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1947 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1948 
1949 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1950 
1951 	if ((*r_extent_hooks)->purge_forced == NULL) {
1952 		return true;
1953 	}
1954 	if (*r_extent_hooks != &extent_hooks_default) {
1955 		extent_hook_pre_reentrancy(tsdn, arena);
1956 	}
1957 	bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1958 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1959 	    arena_ind_get(arena));
1960 	if (*r_extent_hooks != &extent_hooks_default) {
1961 		extent_hook_post_reentrancy(tsdn);
1962 	}
1963 	return err;
1964 }
1965 
1966 bool
extent_purge_forced_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1967 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1968     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1969     size_t length) {
1970 	return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1971 	    offset, length, false);
1972 }
1973 
1974 #ifdef JEMALLOC_MAPS_COALESCE
1975 static bool
extent_split_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t size_a,size_t size_b,bool committed,unsigned arena_ind)1976 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1977     size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1978 	return !maps_coalesce;
1979 }
1980 #endif
1981 
1982 /*
1983  * Accepts the extent to split, and the characteristics of each side of the
1984  * split.  The 'a' parameters go with the 'lead' of the resulting pair of
1985  * extents (the lower addressed portion of the split), and the 'b' parameters go
1986  * with the trail (the higher addressed portion).  This makes 'extent' the lead,
1987  * and returns the trail (except in case of error).
1988  */
1989 static extent_t *
extent_split_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b,bool growing_retained)1990 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1991     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1992     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1993     bool growing_retained) {
1994 	assert(extent_size_get(extent) == size_a + size_b);
1995 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1996 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1997 
1998 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1999 
2000 	if ((*r_extent_hooks)->split == NULL) {
2001 		return NULL;
2002 	}
2003 
2004 	extent_t *trail = extent_alloc(tsdn, arena);
2005 	if (trail == NULL) {
2006 		goto label_error_a;
2007 	}
2008 
2009 	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
2010 	    size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
2011 	    extent_state_get(extent), extent_zeroed_get(extent),
2012 	    extent_committed_get(extent), extent_dumpable_get(extent));
2013 
2014 	rtree_ctx_t rtree_ctx_fallback;
2015 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2016 	rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2017 	{
2018 		extent_t lead;
2019 
2020 		extent_init(&lead, arena, extent_addr_get(extent), size_a,
2021 		    slab_a, szind_a, extent_sn_get(extent),
2022 		    extent_state_get(extent), extent_zeroed_get(extent),
2023 		    extent_committed_get(extent), extent_dumpable_get(extent));
2024 
2025 		extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2026 		    true, &lead_elm_a, &lead_elm_b);
2027 	}
2028 	rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2029 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2030 	    &trail_elm_a, &trail_elm_b);
2031 
2032 	if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2033 	    || trail_elm_b == NULL) {
2034 		goto label_error_b;
2035 	}
2036 
2037 	extent_lock2(tsdn, extent, trail);
2038 
2039 	if (*r_extent_hooks != &extent_hooks_default) {
2040 		extent_hook_pre_reentrancy(tsdn, arena);
2041 	}
2042 	bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2043 	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
2044 	    arena_ind_get(arena));
2045 	if (*r_extent_hooks != &extent_hooks_default) {
2046 		extent_hook_post_reentrancy(tsdn);
2047 	}
2048 	if (err) {
2049 		goto label_error_c;
2050 	}
2051 
2052 	extent_size_set(extent, size_a);
2053 	extent_szind_set(extent, szind_a);
2054 
2055 	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2056 	    szind_a, slab_a);
2057 	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2058 	    szind_b, slab_b);
2059 
2060 	extent_unlock2(tsdn, extent, trail);
2061 
2062 	return trail;
2063 label_error_c:
2064 	extent_unlock2(tsdn, extent, trail);
2065 label_error_b:
2066 	extent_dalloc(tsdn, arena, trail);
2067 label_error_a:
2068 	return NULL;
2069 }
2070 
2071 extent_t *
extent_split_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b)2072 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2073     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2074     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2075 	return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2076 	    szind_a, slab_a, size_b, szind_b, slab_b, false);
2077 }
2078 
2079 static bool
extent_merge_default_impl(void * addr_a,void * addr_b)2080 extent_merge_default_impl(void *addr_a, void *addr_b) {
2081 	if (!maps_coalesce) {
2082 		return true;
2083 	}
2084 	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2085 		return true;
2086 	}
2087 
2088 	return false;
2089 }
2090 
2091 #ifdef JEMALLOC_MAPS_COALESCE
2092 static bool
extent_merge_default(extent_hooks_t * extent_hooks,void * addr_a,size_t size_a,void * addr_b,size_t size_b,bool committed,unsigned arena_ind)2093 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2094     void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2095 	return extent_merge_default_impl(addr_a, addr_b);
2096 }
2097 #endif
2098 
2099 static bool
extent_merge_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b,bool growing_retained)2100 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2101     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2102     bool growing_retained) {
2103 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2104 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2105 
2106 	extent_hooks_assure_initialized(arena, r_extent_hooks);
2107 
2108 	if ((*r_extent_hooks)->merge == NULL) {
2109 		return true;
2110 	}
2111 
2112 	bool err;
2113 	if (*r_extent_hooks == &extent_hooks_default) {
2114 		/* Call directly to propagate tsdn. */
2115 		err = extent_merge_default_impl(extent_base_get(a),
2116 		    extent_base_get(b));
2117 	} else {
2118 		extent_hook_pre_reentrancy(tsdn, arena);
2119 		err = (*r_extent_hooks)->merge(*r_extent_hooks,
2120 		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
2121 		    extent_size_get(b), extent_committed_get(a),
2122 		    arena_ind_get(arena));
2123 		extent_hook_post_reentrancy(tsdn);
2124 	}
2125 
2126 	if (err) {
2127 		return true;
2128 	}
2129 
2130 	/*
2131 	 * The rtree writes must happen while all the relevant elements are
2132 	 * owned, so the following code uses decomposed helper functions rather
2133 	 * than extent_{,de}register() to do things in the right order.
2134 	 */
2135 	rtree_ctx_t rtree_ctx_fallback;
2136 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2137 	rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2138 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2139 	    &a_elm_b);
2140 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2141 	    &b_elm_b);
2142 
2143 	extent_lock2(tsdn, a, b);
2144 
2145 	if (a_elm_b != NULL) {
2146 		rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2147 		    NSIZES, false);
2148 	}
2149 	if (b_elm_b != NULL) {
2150 		rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2151 		    NSIZES, false);
2152 	} else {
2153 		b_elm_b = b_elm_a;
2154 	}
2155 
2156 	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2157 	extent_szind_set(a, NSIZES);
2158 	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2159 	    extent_sn_get(a) : extent_sn_get(b));
2160 	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2161 
2162 	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
2163 
2164 	extent_unlock2(tsdn, a, b);
2165 
2166 	extent_dalloc(tsdn, extent_arena_get(b), b);
2167 
2168 	return false;
2169 }
2170 
2171 bool
extent_merge_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b)2172 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2173     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2174 	return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2175 }
2176 
2177 bool
extent_boot(void)2178 extent_boot(void) {
2179 	if (rtree_new(&extents_rtree, true)) {
2180 		return true;
2181 	}
2182 
2183 	if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2184 	    WITNESS_RANK_EXTENT_POOL)) {
2185 		return true;
2186 	}
2187 
2188 	if (have_dss) {
2189 		extent_dss_boot();
2190 	}
2191 
2192 	return false;
2193 }
2194