1 #define	JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 /* Runtime configuration options. */
8 const char	*je_malloc_conf JEMALLOC_ATTR(weak);
9 bool	opt_abort =
10 #ifdef JEMALLOC_DEBUG
11     true
12 #else
13     false
14 #endif
15     ;
16 const char	*opt_junk =
17 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
18     "true"
19 #else
20     "false"
21 #endif
22     ;
23 bool	opt_junk_alloc =
24 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
25     true
26 #else
27     false
28 #endif
29     ;
30 bool	opt_junk_free =
31 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
32     true
33 #else
34     false
35 #endif
36     ;
37 
38 size_t	opt_quarantine = ZU(0);
39 bool	opt_redzone = false;
40 bool	opt_utrace = false;
41 bool	opt_xmalloc = false;
42 bool	opt_zero = false;
43 size_t	opt_narenas = 0;
44 
45 /* Initialized to true if the process is running inside Valgrind. */
46 bool	in_valgrind;
47 
48 unsigned	ncpus;
49 
50 /* Protects arenas initialization (arenas, narenas_total). */
51 static malloc_mutex_t	arenas_lock;
52 /*
53  * Arenas that are used to service external requests.  Not all elements of the
54  * arenas array are necessarily used; arenas are created lazily as needed.
55  *
56  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
57  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
58  * takes some action to create them and allocate from them.
59  */
60 static arena_t		**arenas;
61 static unsigned		narenas_total;
62 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
63 static unsigned		narenas_auto; /* Read-only after initialization. */
64 
65 typedef enum {
66 	malloc_init_uninitialized	= 3,
67 	malloc_init_a0_initialized	= 2,
68 	malloc_init_recursible		= 1,
69 	malloc_init_initialized		= 0 /* Common case --> jnz. */
70 } malloc_init_t;
71 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
72 
73 JEMALLOC_ALIGNED(CACHELINE)
74 const size_t	index2size_tab[NSIZES] = {
75 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
76 	((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
77 	SIZE_CLASSES
78 #undef SC
79 };
80 
81 JEMALLOC_ALIGNED(CACHELINE)
82 const uint8_t	size2index_tab[] = {
83 #if LG_TINY_MIN == 0
84 #warning "Dangerous LG_TINY_MIN"
85 #define	S2B_0(i)	i,
86 #elif LG_TINY_MIN == 1
87 #warning "Dangerous LG_TINY_MIN"
88 #define	S2B_1(i)	i,
89 #elif LG_TINY_MIN == 2
90 #warning "Dangerous LG_TINY_MIN"
91 #define	S2B_2(i)	i,
92 #elif LG_TINY_MIN == 3
93 #define	S2B_3(i)	i,
94 #elif LG_TINY_MIN == 4
95 #define	S2B_4(i)	i,
96 #elif LG_TINY_MIN == 5
97 #define	S2B_5(i)	i,
98 #elif LG_TINY_MIN == 6
99 #define	S2B_6(i)	i,
100 #elif LG_TINY_MIN == 7
101 #define	S2B_7(i)	i,
102 #elif LG_TINY_MIN == 8
103 #define	S2B_8(i)	i,
104 #elif LG_TINY_MIN == 9
105 #define	S2B_9(i)	i,
106 #elif LG_TINY_MIN == 10
107 #define	S2B_10(i)	i,
108 #elif LG_TINY_MIN == 11
109 #define	S2B_11(i)	i,
110 #else
111 #error "Unsupported LG_TINY_MIN"
112 #endif
113 #if LG_TINY_MIN < 1
114 #define	S2B_1(i)	S2B_0(i) S2B_0(i)
115 #endif
116 #if LG_TINY_MIN < 2
117 #define	S2B_2(i)	S2B_1(i) S2B_1(i)
118 #endif
119 #if LG_TINY_MIN < 3
120 #define	S2B_3(i)	S2B_2(i) S2B_2(i)
121 #endif
122 #if LG_TINY_MIN < 4
123 #define	S2B_4(i)	S2B_3(i) S2B_3(i)
124 #endif
125 #if LG_TINY_MIN < 5
126 #define	S2B_5(i)	S2B_4(i) S2B_4(i)
127 #endif
128 #if LG_TINY_MIN < 6
129 #define	S2B_6(i)	S2B_5(i) S2B_5(i)
130 #endif
131 #if LG_TINY_MIN < 7
132 #define	S2B_7(i)	S2B_6(i) S2B_6(i)
133 #endif
134 #if LG_TINY_MIN < 8
135 #define	S2B_8(i)	S2B_7(i) S2B_7(i)
136 #endif
137 #if LG_TINY_MIN < 9
138 #define	S2B_9(i)	S2B_8(i) S2B_8(i)
139 #endif
140 #if LG_TINY_MIN < 10
141 #define	S2B_10(i)	S2B_9(i) S2B_9(i)
142 #endif
143 #if LG_TINY_MIN < 11
144 #define	S2B_11(i)	S2B_10(i) S2B_10(i)
145 #endif
146 #define	S2B_no(i)
147 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
148 	S2B_##lg_delta_lookup(index)
149 	SIZE_CLASSES
150 #undef S2B_3
151 #undef S2B_4
152 #undef S2B_5
153 #undef S2B_6
154 #undef S2B_7
155 #undef S2B_8
156 #undef S2B_9
157 #undef S2B_10
158 #undef S2B_11
159 #undef S2B_no
160 #undef SC
161 };
162 
163 #ifdef JEMALLOC_THREADED_INIT
164 /* Used to let the initializing thread recursively allocate. */
165 #  define NO_INITIALIZER	((unsigned long)0)
166 #  define INITIALIZER		pthread_self()
167 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
168 static pthread_t		malloc_initializer = NO_INITIALIZER;
169 #else
170 #  define NO_INITIALIZER	false
171 #  define INITIALIZER		true
172 #  define IS_INITIALIZER	malloc_initializer
173 static bool			malloc_initializer = NO_INITIALIZER;
174 #endif
175 
176 /* Used to avoid initialization races. */
177 #ifdef _WIN32
178 static malloc_mutex_t	init_lock;
179 
JEMALLOC_ATTR(constructor)180 JEMALLOC_ATTR(constructor)
181 static void WINAPI
182 _init_init_lock(void)
183 {
184 
185 	malloc_mutex_init(&init_lock);
186 }
187 
188 #ifdef _MSC_VER
189 #  pragma section(".CRT$XCU", read)
190 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
191 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
192 #endif
193 
194 #else
195 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
196 #endif
197 
198 typedef struct {
199 	void	*p;	/* Input pointer (as in realloc(p, s)). */
200 	size_t	s;	/* Request size. */
201 	void	*r;	/* Result pointer. */
202 } malloc_utrace_t;
203 
204 #ifdef JEMALLOC_UTRACE
205 #  define UTRACE(a, b, c) do {						\
206 	if (unlikely(opt_utrace)) {					\
207 		int utrace_serrno = errno;				\
208 		malloc_utrace_t ut;					\
209 		ut.p = (a);						\
210 		ut.s = (b);						\
211 		ut.r = (c);						\
212 		utrace(&ut, sizeof(ut));				\
213 		errno = utrace_serrno;					\
214 	}								\
215 } while (0)
216 #else
217 #  define UTRACE(a, b, c)
218 #endif
219 
220 /******************************************************************************/
221 /*
222  * Function prototypes for static functions that are referenced prior to
223  * definition.
224  */
225 
226 static bool	malloc_init_hard_a0(void);
227 static bool	malloc_init_hard(void);
228 
229 /******************************************************************************/
230 /*
231  * Begin miscellaneous support functions.
232  */
233 
234 JEMALLOC_ALWAYS_INLINE_C bool
malloc_initialized(void)235 malloc_initialized(void)
236 {
237 
238 	return (malloc_init_state == malloc_init_initialized);
239 }
240 
241 JEMALLOC_ALWAYS_INLINE_C void
malloc_thread_init(void)242 malloc_thread_init(void)
243 {
244 
245 	/*
246 	 * TSD initialization can't be safely done as a side effect of
247 	 * deallocation, because it is possible for a thread to do nothing but
248 	 * deallocate its TLS data via free(), in which case writing to TLS
249 	 * would cause write-after-free memory corruption.  The quarantine
250 	 * facility *only* gets used as a side effect of deallocation, so make
251 	 * a best effort attempt at initializing its TSD by hooking all
252 	 * allocation events.
253 	 */
254 	if (config_fill && unlikely(opt_quarantine))
255 		quarantine_alloc_hook();
256 }
257 
258 JEMALLOC_ALWAYS_INLINE_C bool
malloc_init_a0(void)259 malloc_init_a0(void)
260 {
261 
262 	if (unlikely(malloc_init_state == malloc_init_uninitialized))
263 		return (malloc_init_hard_a0());
264 	return (false);
265 }
266 
267 JEMALLOC_ALWAYS_INLINE_C bool
malloc_init(void)268 malloc_init(void)
269 {
270 
271 	if (unlikely(!malloc_initialized()) && malloc_init_hard())
272 		return (true);
273 	malloc_thread_init();
274 
275 	return (false);
276 }
277 
278 /*
279  * The a0*() functions are used instead of i[mcd]alloc() in situations that
280  * cannot tolerate TLS variable access.
281  */
282 
283 arena_t *
a0get(void)284 a0get(void)
285 {
286 
287 	assert(a0 != NULL);
288 	return (a0);
289 }
290 
291 static void *
a0ialloc(size_t size,bool zero,bool is_metadata)292 a0ialloc(size_t size, bool zero, bool is_metadata)
293 {
294 
295 	if (unlikely(malloc_init_a0()))
296 		return (NULL);
297 
298 	return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
299 }
300 
301 static void
a0idalloc(void * ptr,bool is_metadata)302 a0idalloc(void *ptr, bool is_metadata)
303 {
304 
305 	idalloctm(NULL, ptr, false, is_metadata);
306 }
307 
308 void *
a0malloc(size_t size)309 a0malloc(size_t size)
310 {
311 
312 	return (a0ialloc(size, false, true));
313 }
314 
315 void
a0dalloc(void * ptr)316 a0dalloc(void *ptr)
317 {
318 
319 	a0idalloc(ptr, true);
320 }
321 
322 /*
323  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
324  * situations that cannot tolerate TLS variable access (TLS allocation and very
325  * early internal data structure initialization).
326  */
327 
328 void *
bootstrap_malloc(size_t size)329 bootstrap_malloc(size_t size)
330 {
331 
332 	if (unlikely(size == 0))
333 		size = 1;
334 
335 	return (a0ialloc(size, false, false));
336 }
337 
338 void *
bootstrap_calloc(size_t num,size_t size)339 bootstrap_calloc(size_t num, size_t size)
340 {
341 	size_t num_size;
342 
343 	num_size = num * size;
344 	if (unlikely(num_size == 0)) {
345 		assert(num == 0 || size == 0);
346 		num_size = 1;
347 	}
348 
349 	return (a0ialloc(num_size, true, false));
350 }
351 
352 void
bootstrap_free(void * ptr)353 bootstrap_free(void *ptr)
354 {
355 
356 	if (unlikely(ptr == NULL))
357 		return;
358 
359 	a0idalloc(ptr, false);
360 }
361 
362 /* Create a new arena and insert it into the arenas array at index ind. */
363 static arena_t *
arena_init_locked(unsigned ind)364 arena_init_locked(unsigned ind)
365 {
366 	arena_t *arena;
367 
368 	/* Expand arenas if necessary. */
369 	assert(ind <= narenas_total);
370 	if (ind > MALLOCX_ARENA_MAX)
371 		return (NULL);
372 	if (ind == narenas_total) {
373 		unsigned narenas_new = narenas_total + 1;
374 		arena_t **arenas_new =
375 		    (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
376 		    sizeof(arena_t *)));
377 		if (arenas_new == NULL)
378 			return (NULL);
379 		memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
380 		arenas_new[ind] = NULL;
381 		/*
382 		 * Deallocate only if arenas came from a0malloc() (not
383 		 * base_alloc()).
384 		 */
385 		if (narenas_total != narenas_auto)
386 			a0dalloc(arenas);
387 		arenas = arenas_new;
388 		narenas_total = narenas_new;
389 	}
390 
391 	/*
392 	 * Another thread may have already initialized arenas[ind] if it's an
393 	 * auto arena.
394 	 */
395 	arena = arenas[ind];
396 	if (arena != NULL) {
397 		assert(ind < narenas_auto);
398 		return (arena);
399 	}
400 
401 	/* Actually initialize the arena. */
402 	arena = arenas[ind] = arena_new(ind);
403 	return (arena);
404 }
405 
406 arena_t *
arena_init(unsigned ind)407 arena_init(unsigned ind)
408 {
409 	arena_t *arena;
410 
411 	malloc_mutex_lock(&arenas_lock);
412 	arena = arena_init_locked(ind);
413 	malloc_mutex_unlock(&arenas_lock);
414 	return (arena);
415 }
416 
417 unsigned
narenas_total_get(void)418 narenas_total_get(void)
419 {
420 	unsigned narenas;
421 
422 	malloc_mutex_lock(&arenas_lock);
423 	narenas = narenas_total;
424 	malloc_mutex_unlock(&arenas_lock);
425 
426 	return (narenas);
427 }
428 
429 static void
arena_bind_locked(tsd_t * tsd,unsigned ind)430 arena_bind_locked(tsd_t *tsd, unsigned ind)
431 {
432 	arena_t *arena;
433 
434 	arena = arenas[ind];
435 	arena->nthreads++;
436 
437 	if (tsd_nominal(tsd))
438 		tsd_arena_set(tsd, arena);
439 }
440 
441 static void
arena_bind(tsd_t * tsd,unsigned ind)442 arena_bind(tsd_t *tsd, unsigned ind)
443 {
444 
445 	malloc_mutex_lock(&arenas_lock);
446 	arena_bind_locked(tsd, ind);
447 	malloc_mutex_unlock(&arenas_lock);
448 }
449 
450 void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)451 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
452 {
453 	arena_t *oldarena, *newarena;
454 
455 	malloc_mutex_lock(&arenas_lock);
456 	oldarena = arenas[oldind];
457 	newarena = arenas[newind];
458 	oldarena->nthreads--;
459 	newarena->nthreads++;
460 	malloc_mutex_unlock(&arenas_lock);
461 	tsd_arena_set(tsd, newarena);
462 }
463 
464 unsigned
arena_nbound(unsigned ind)465 arena_nbound(unsigned ind)
466 {
467 	unsigned nthreads;
468 
469 	malloc_mutex_lock(&arenas_lock);
470 	nthreads = arenas[ind]->nthreads;
471 	malloc_mutex_unlock(&arenas_lock);
472 	return (nthreads);
473 }
474 
475 static void
arena_unbind(tsd_t * tsd,unsigned ind)476 arena_unbind(tsd_t *tsd, unsigned ind)
477 {
478 	arena_t *arena;
479 
480 	malloc_mutex_lock(&arenas_lock);
481 	arena = arenas[ind];
482 	arena->nthreads--;
483 	malloc_mutex_unlock(&arenas_lock);
484 	tsd_arena_set(tsd, NULL);
485 }
486 
487 arena_t *
arena_get_hard(tsd_t * tsd,unsigned ind,bool init_if_missing)488 arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
489 {
490 	arena_t *arena;
491 	arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
492 	unsigned narenas_cache = tsd_narenas_cache_get(tsd);
493 	unsigned narenas_actual = narenas_total_get();
494 
495 	/* Deallocate old cache if it's too small. */
496 	if (arenas_cache != NULL && narenas_cache < narenas_actual) {
497 		a0dalloc(arenas_cache);
498 		arenas_cache = NULL;
499 		narenas_cache = 0;
500 		tsd_arenas_cache_set(tsd, arenas_cache);
501 		tsd_narenas_cache_set(tsd, narenas_cache);
502 	}
503 
504 	/* Allocate cache if it's missing. */
505 	if (arenas_cache == NULL) {
506 		bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
507 		assert(ind < narenas_actual || !init_if_missing);
508 		narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
509 
510 		if (!*arenas_cache_bypassp) {
511 			*arenas_cache_bypassp = true;
512 			arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
513 			    narenas_cache);
514 			*arenas_cache_bypassp = false;
515 		} else
516 			arenas_cache = NULL;
517 		if (arenas_cache == NULL) {
518 			/*
519 			 * This function must always tell the truth, even if
520 			 * it's slow, so don't let OOM or recursive allocation
521 			 * avoidance (note arenas_cache_bypass check) get in the
522 			 * way.
523 			 */
524 			if (ind >= narenas_actual)
525 				return (NULL);
526 			malloc_mutex_lock(&arenas_lock);
527 			arena = arenas[ind];
528 			malloc_mutex_unlock(&arenas_lock);
529 			return (arena);
530 		}
531 		tsd_arenas_cache_set(tsd, arenas_cache);
532 		tsd_narenas_cache_set(tsd, narenas_cache);
533 	}
534 
535 	/*
536 	 * Copy to cache.  It's possible that the actual number of arenas has
537 	 * increased since narenas_total_get() was called above, but that causes
538 	 * no correctness issues unless two threads concurrently execute the
539 	 * arenas.extend mallctl, which we trust mallctl synchronization to
540 	 * prevent.
541 	 */
542 	malloc_mutex_lock(&arenas_lock);
543 	memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
544 	malloc_mutex_unlock(&arenas_lock);
545 	if (narenas_cache > narenas_actual) {
546 		memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
547 		    (narenas_cache - narenas_actual));
548 	}
549 
550 	/* Read the refreshed cache, and init the arena if necessary. */
551 	arena = arenas_cache[ind];
552 	if (init_if_missing && arena == NULL)
553 		arena = arenas_cache[ind] = arena_init(ind);
554 	return (arena);
555 }
556 
557 /* Slow path, called only by arena_choose(). */
558 arena_t *
arena_choose_hard(tsd_t * tsd)559 arena_choose_hard(tsd_t *tsd)
560 {
561 	arena_t *ret;
562 
563 	if (narenas_auto > 1) {
564 		unsigned i, choose, first_null;
565 
566 		choose = 0;
567 		first_null = narenas_auto;
568 		malloc_mutex_lock(&arenas_lock);
569 		assert(a0get() != NULL);
570 		for (i = 1; i < narenas_auto; i++) {
571 			if (arenas[i] != NULL) {
572 				/*
573 				 * Choose the first arena that has the lowest
574 				 * number of threads assigned to it.
575 				 */
576 				if (arenas[i]->nthreads <
577 				    arenas[choose]->nthreads)
578 					choose = i;
579 			} else if (first_null == narenas_auto) {
580 				/*
581 				 * Record the index of the first uninitialized
582 				 * arena, in case all extant arenas are in use.
583 				 *
584 				 * NB: It is possible for there to be
585 				 * discontinuities in terms of initialized
586 				 * versus uninitialized arenas, due to the
587 				 * "thread.arena" mallctl.
588 				 */
589 				first_null = i;
590 			}
591 		}
592 
593 		if (arenas[choose]->nthreads == 0
594 		    || first_null == narenas_auto) {
595 			/*
596 			 * Use an unloaded arena, or the least loaded arena if
597 			 * all arenas are already initialized.
598 			 */
599 			ret = arenas[choose];
600 		} else {
601 			/* Initialize a new arena. */
602 			choose = first_null;
603 			ret = arena_init_locked(choose);
604 			if (ret == NULL) {
605 				malloc_mutex_unlock(&arenas_lock);
606 				return (NULL);
607 			}
608 		}
609 		arena_bind_locked(tsd, choose);
610 		malloc_mutex_unlock(&arenas_lock);
611 	} else {
612 		ret = a0get();
613 		arena_bind(tsd, 0);
614 	}
615 
616 	return (ret);
617 }
618 
619 void
thread_allocated_cleanup(tsd_t * tsd)620 thread_allocated_cleanup(tsd_t *tsd)
621 {
622 
623 	/* Do nothing. */
624 }
625 
626 void
thread_deallocated_cleanup(tsd_t * tsd)627 thread_deallocated_cleanup(tsd_t *tsd)
628 {
629 
630 	/* Do nothing. */
631 }
632 
633 void
arena_cleanup(tsd_t * tsd)634 arena_cleanup(tsd_t *tsd)
635 {
636 	arena_t *arena;
637 
638 	arena = tsd_arena_get(tsd);
639 	if (arena != NULL)
640 		arena_unbind(tsd, arena->ind);
641 }
642 
643 void
arenas_cache_cleanup(tsd_t * tsd)644 arenas_cache_cleanup(tsd_t *tsd)
645 {
646 	arena_t **arenas_cache;
647 
648 	arenas_cache = tsd_arenas_cache_get(tsd);
649 	if (arenas_cache != NULL) {
650 		/* ANDROID change */
651 		/* Make sure that the arena cache cannot be reused. */
652 		bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
653 		*arenas_cache_bypassp = true;
654 		tsd_arenas_cache_set(tsd, NULL);
655 		/* End ANDROID change */
656 		a0dalloc(arenas_cache);
657 	}
658 }
659 
660 void
narenas_cache_cleanup(tsd_t * tsd)661 narenas_cache_cleanup(tsd_t *tsd)
662 {
663 
664 	/* Do nothing. */
665 }
666 
667 void
arenas_cache_bypass_cleanup(tsd_t * tsd)668 arenas_cache_bypass_cleanup(tsd_t *tsd)
669 {
670 
671 	/* Do nothing. */
672 }
673 
674 static void
stats_print_atexit(void)675 stats_print_atexit(void)
676 {
677 
678 	if (config_tcache && config_stats) {
679 		unsigned narenas, i;
680 
681 		/*
682 		 * Merge stats from extant threads.  This is racy, since
683 		 * individual threads do not lock when recording tcache stats
684 		 * events.  As a consequence, the final stats may be slightly
685 		 * out of date by the time they are reported, if other threads
686 		 * continue to allocate.
687 		 */
688 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
689 			arena_t *arena = arenas[i];
690 			if (arena != NULL) {
691 				tcache_t *tcache;
692 
693 				/*
694 				 * tcache_stats_merge() locks bins, so if any
695 				 * code is introduced that acquires both arena
696 				 * and bin locks in the opposite order,
697 				 * deadlocks may result.
698 				 */
699 				malloc_mutex_lock(&arena->lock);
700 				ql_foreach(tcache, &arena->tcache_ql, link) {
701 					tcache_stats_merge(tcache, arena);
702 				}
703 				malloc_mutex_unlock(&arena->lock);
704 			}
705 		}
706 	}
707 	je_malloc_stats_print(NULL, NULL, NULL);
708 }
709 
710 /*
711  * End miscellaneous support functions.
712  */
713 /******************************************************************************/
714 /*
715  * Begin initialization functions.
716  */
717 
718 #ifndef JEMALLOC_HAVE_SECURE_GETENV
719 static char *
secure_getenv(const char * name)720 secure_getenv(const char *name)
721 {
722 
723 #  ifdef JEMALLOC_HAVE_ISSETUGID
724 	if (issetugid() != 0)
725 		return (NULL);
726 #  endif
727 	return (getenv(name));
728 }
729 #endif
730 
731 static unsigned
malloc_ncpus(void)732 malloc_ncpus(void)
733 {
734 	long result;
735 
736 #ifdef _WIN32
737 	SYSTEM_INFO si;
738 	GetSystemInfo(&si);
739 	result = si.dwNumberOfProcessors;
740 #else
741 	result = sysconf(_SC_NPROCESSORS_ONLN);
742 #endif
743 	return ((result == -1) ? 1 : (unsigned)result);
744 }
745 
746 static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)747 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
748     char const **v_p, size_t *vlen_p)
749 {
750 	bool accept;
751 	const char *opts = *opts_p;
752 
753 	*k_p = opts;
754 
755 	for (accept = false; !accept;) {
756 		switch (*opts) {
757 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
758 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
759 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
760 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
761 		case 'Y': case 'Z':
762 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
763 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
764 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
765 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
766 		case 'y': case 'z':
767 		case '0': case '1': case '2': case '3': case '4': case '5':
768 		case '6': case '7': case '8': case '9':
769 		case '_':
770 			opts++;
771 			break;
772 		case ':':
773 			opts++;
774 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
775 			*v_p = opts;
776 			accept = true;
777 			break;
778 		case '\0':
779 			if (opts != *opts_p) {
780 				malloc_write("<jemalloc>: Conf string ends "
781 				    "with key\n");
782 			}
783 			return (true);
784 		default:
785 			malloc_write("<jemalloc>: Malformed conf string\n");
786 			return (true);
787 		}
788 	}
789 
790 	for (accept = false; !accept;) {
791 		switch (*opts) {
792 		case ',':
793 			opts++;
794 			/*
795 			 * Look ahead one character here, because the next time
796 			 * this function is called, it will assume that end of
797 			 * input has been cleanly reached if no input remains,
798 			 * but we have optimistically already consumed the
799 			 * comma if one exists.
800 			 */
801 			if (*opts == '\0') {
802 				malloc_write("<jemalloc>: Conf string ends "
803 				    "with comma\n");
804 			}
805 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
806 			accept = true;
807 			break;
808 		case '\0':
809 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
810 			accept = true;
811 			break;
812 		default:
813 			opts++;
814 			break;
815 		}
816 	}
817 
818 	*opts_p = opts;
819 	return (false);
820 }
821 
822 static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)823 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
824     size_t vlen)
825 {
826 
827 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
828 	    (int)vlen, v);
829 }
830 
831 static void
malloc_conf_init(void)832 malloc_conf_init(void)
833 {
834 	unsigned i;
835 	char buf[PATH_MAX + 1];
836 	const char *opts, *k, *v;
837 	size_t klen, vlen;
838 
839 	/*
840 	 * Automatically configure valgrind before processing options.  The
841 	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
842 	 */
843 	if (config_valgrind) {
844 		in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
845 		if (config_fill && unlikely(in_valgrind)) {
846 			opt_junk = "false";
847 			opt_junk_alloc = false;
848 			opt_junk_free = false;
849 			assert(!opt_zero);
850 			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
851 			opt_redzone = true;
852 		}
853 		if (config_tcache && unlikely(in_valgrind))
854 			opt_tcache = false;
855 	}
856 
857 #if defined(__ANDROID__)
858 	/* Android only supports compiled options. */
859 	for (i = 0; i < 1; i++) {
860 #else
861 	for (i = 0; i < 3; i++) {
862 #endif
863 		/* Get runtime configuration. */
864 		switch (i) {
865 		case 0:
866 			if (je_malloc_conf != NULL) {
867 				/*
868 				 * Use options that were compiled into the
869 				 * program.
870 				 */
871 				opts = je_malloc_conf;
872 			} else {
873 				/* No configuration specified. */
874 				buf[0] = '\0';
875 				opts = buf;
876 			}
877 			break;
878 		case 1: {
879 			int linklen = 0;
880 #ifndef _WIN32
881 			int saved_errno = errno;
882 			const char *linkname =
883 #  ifdef JEMALLOC_PREFIX
884 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
885 #  else
886 			    "/etc/malloc.conf"
887 #  endif
888 			    ;
889 
890 			/*
891 			 * Try to use the contents of the "/etc/malloc.conf"
892 			 * symbolic link's name.
893 			 */
894 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
895 			if (linklen == -1) {
896 				/* No configuration specified. */
897 				linklen = 0;
898 				/* Restore errno. */
899 				set_errno(saved_errno);
900 			}
901 #endif
902 			buf[linklen] = '\0';
903 			opts = buf;
904 			break;
905 		} case 2: {
906 			const char *envname =
907 #ifdef JEMALLOC_PREFIX
908 			    JEMALLOC_CPREFIX"MALLOC_CONF"
909 #else
910 			    "MALLOC_CONF"
911 #endif
912 			    ;
913 
914 			if ((opts = secure_getenv(envname)) != NULL) {
915 				/*
916 				 * Do nothing; opts is already initialized to
917 				 * the value of the MALLOC_CONF environment
918 				 * variable.
919 				 */
920 			} else {
921 				/* No configuration specified. */
922 				buf[0] = '\0';
923 				opts = buf;
924 			}
925 			break;
926 		} default:
927 			not_reached();
928 			buf[0] = '\0';
929 			opts = buf;
930 		}
931 
932 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
933 		    &vlen)) {
934 #define	CONF_MATCH(n)							\
935 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
936 #define	CONF_MATCH_VALUE(n)						\
937 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
938 #define	CONF_HANDLE_BOOL(o, n, cont)					\
939 			if (CONF_MATCH(n)) {				\
940 				if (CONF_MATCH_VALUE("true"))		\
941 					o = true;			\
942 				else if (CONF_MATCH_VALUE("false"))	\
943 					o = false;			\
944 				else {					\
945 					malloc_conf_error(		\
946 					    "Invalid conf value",	\
947 					    k, klen, v, vlen);		\
948 				}					\
949 				if (cont)				\
950 					continue;			\
951 			}
952 #define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
953 			if (CONF_MATCH(n)) {				\
954 				uintmax_t um;				\
955 				char *end;				\
956 									\
957 				set_errno(0);				\
958 				um = malloc_strtoumax(v, &end, 0);	\
959 				if (get_errno() != 0 || (uintptr_t)end -\
960 				    (uintptr_t)v != vlen) {		\
961 					malloc_conf_error(		\
962 					    "Invalid conf value",	\
963 					    k, klen, v, vlen);		\
964 				} else if (clip) {			\
965 					if ((min) != 0 && um < (min))	\
966 						o = (min);		\
967 					else if (um > (max))		\
968 						o = (max);		\
969 					else				\
970 						o = um;			\
971 				} else {				\
972 					if (((min) != 0 && um < (min))	\
973 					    || um > (max)) {		\
974 						malloc_conf_error(	\
975 						    "Out-of-range "	\
976 						    "conf value",	\
977 						    k, klen, v, vlen);	\
978 					} else				\
979 						o = um;			\
980 				}					\
981 				continue;				\
982 			}
983 #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
984 			if (CONF_MATCH(n)) {				\
985 				long l;					\
986 				char *end;				\
987 									\
988 				set_errno(0);				\
989 				l = strtol(v, &end, 0);			\
990 				if (get_errno() != 0 || (uintptr_t)end -\
991 				    (uintptr_t)v != vlen) {		\
992 					malloc_conf_error(		\
993 					    "Invalid conf value",	\
994 					    k, klen, v, vlen);		\
995 				} else if (l < (ssize_t)(min) || l >	\
996 				    (ssize_t)(max)) {			\
997 					malloc_conf_error(		\
998 					    "Out-of-range conf value",	\
999 					    k, klen, v, vlen);		\
1000 				} else					\
1001 					o = l;				\
1002 				continue;				\
1003 			}
1004 #define	CONF_HANDLE_CHAR_P(o, n, d)					\
1005 			if (CONF_MATCH(n)) {				\
1006 				size_t cpylen = (vlen <=		\
1007 				    sizeof(o)-1) ? vlen :		\
1008 				    sizeof(o)-1;			\
1009 				strncpy(o, v, cpylen);			\
1010 				o[cpylen] = '\0';			\
1011 				continue;				\
1012 			}
1013 
1014 			CONF_HANDLE_BOOL(opt_abort, "abort", true)
1015 			/*
1016 			 * Chunks always require at least one header page,
1017 			 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1018 			 * possibly an additional page in the presence of
1019 			 * redzones.  In order to simplify options processing,
1020 			 * use a conservative bound that accommodates all these
1021 			 * constraints.
1022 			 */
1023 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1024 			    LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1025 			    (sizeof(size_t) << 3) - 1, true)
1026 			if (strncmp("dss", k, klen) == 0) {
1027 				int i;
1028 				bool match = false;
1029 				for (i = 0; i < dss_prec_limit; i++) {
1030 					if (strncmp(dss_prec_names[i], v, vlen)
1031 					    == 0) {
1032 						if (chunk_dss_prec_set(i)) {
1033 							malloc_conf_error(
1034 							    "Error setting dss",
1035 							    k, klen, v, vlen);
1036 						} else {
1037 							opt_dss =
1038 							    dss_prec_names[i];
1039 							match = true;
1040 							break;
1041 						}
1042 					}
1043 				}
1044 				if (!match) {
1045 					malloc_conf_error("Invalid conf value",
1046 					    k, klen, v, vlen);
1047 				}
1048 				continue;
1049 			}
1050 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
1051 			    SIZE_T_MAX, false)
1052 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1053 			    -1, (sizeof(size_t) << 3) - 1)
1054 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1055 			if (config_fill) {
1056 				if (CONF_MATCH("junk")) {
1057 					if (CONF_MATCH_VALUE("true")) {
1058 						opt_junk = "true";
1059 						opt_junk_alloc = opt_junk_free =
1060 						    true;
1061 					} else if (CONF_MATCH_VALUE("false")) {
1062 						opt_junk = "false";
1063 						opt_junk_alloc = opt_junk_free =
1064 						    false;
1065 					} else if (CONF_MATCH_VALUE("alloc")) {
1066 						opt_junk = "alloc";
1067 						opt_junk_alloc = true;
1068 						opt_junk_free = false;
1069 					} else if (CONF_MATCH_VALUE("free")) {
1070 						opt_junk = "free";
1071 						opt_junk_alloc = false;
1072 						opt_junk_free = true;
1073 					} else {
1074 						malloc_conf_error(
1075 						    "Invalid conf value", k,
1076 						    klen, v, vlen);
1077 					}
1078 					continue;
1079 				}
1080 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1081 				    0, SIZE_T_MAX, false)
1082 				CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1083 				CONF_HANDLE_BOOL(opt_zero, "zero", true)
1084 			}
1085 			if (config_utrace) {
1086 				CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1087 			}
1088 			if (config_xmalloc) {
1089 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1090 			}
1091 			if (config_tcache) {
1092 				CONF_HANDLE_BOOL(opt_tcache, "tcache",
1093 				    !config_valgrind || !in_valgrind)
1094 				if (CONF_MATCH("tcache")) {
1095 					assert(config_valgrind && in_valgrind);
1096 					if (opt_tcache) {
1097 						opt_tcache = false;
1098 						malloc_conf_error(
1099 						"tcache cannot be enabled "
1100 						"while running inside Valgrind",
1101 						k, klen, v, vlen);
1102 					}
1103 					continue;
1104 				}
1105 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1106 				    "lg_tcache_max", -1,
1107 				    (sizeof(size_t) << 3) - 1)
1108 			}
1109 			if (config_prof) {
1110 				CONF_HANDLE_BOOL(opt_prof, "prof", true)
1111 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1112 				    "prof_prefix", "jeprof")
1113 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1114 				    true)
1115 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1116 				    "prof_thread_active_init", true)
1117 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1118 				    "lg_prof_sample", 0,
1119 				    (sizeof(uint64_t) << 3) - 1, true)
1120 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1121 				    true)
1122 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1123 				    "lg_prof_interval", -1,
1124 				    (sizeof(uint64_t) << 3) - 1)
1125 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1126 				    true)
1127 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1128 				    true)
1129 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1130 				    true)
1131 			}
1132 			malloc_conf_error("Invalid conf pair", k, klen, v,
1133 			    vlen);
1134 #undef CONF_MATCH
1135 #undef CONF_HANDLE_BOOL
1136 #undef CONF_HANDLE_SIZE_T
1137 #undef CONF_HANDLE_SSIZE_T
1138 #undef CONF_HANDLE_CHAR_P
1139 		}
1140 	}
1141 }
1142 
1143 /* init_lock must be held. */
1144 static bool
1145 malloc_init_hard_needed(void)
1146 {
1147 
1148 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1149 	    malloc_init_recursible)) {
1150 		/*
1151 		 * Another thread initialized the allocator before this one
1152 		 * acquired init_lock, or this thread is the initializing
1153 		 * thread, and it is recursively allocating.
1154 		 */
1155 		return (false);
1156 	}
1157 #ifdef JEMALLOC_THREADED_INIT
1158 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1159 		/* Busy-wait until the initializing thread completes. */
1160 		do {
1161 			malloc_mutex_unlock(&init_lock);
1162 			CPU_SPINWAIT;
1163 			malloc_mutex_lock(&init_lock);
1164 		} while (!malloc_initialized());
1165 		return (false);
1166 	}
1167 #endif
1168 	return (true);
1169 }
1170 
1171 /* init_lock must be held. */
1172 static bool
1173 malloc_init_hard_a0_locked(void)
1174 {
1175 
1176 	malloc_initializer = INITIALIZER;
1177 
1178 	if (config_prof)
1179 		prof_boot0();
1180 	malloc_conf_init();
1181 	if (opt_stats_print) {
1182 		/* Print statistics at exit. */
1183 		if (atexit(stats_print_atexit) != 0) {
1184 			malloc_write("<jemalloc>: Error in atexit()\n");
1185 			if (opt_abort)
1186 				abort();
1187 		}
1188 	}
1189 	if (base_boot())
1190 		return (true);
1191 	if (chunk_boot())
1192 		return (true);
1193 	if (ctl_boot())
1194 		return (true);
1195 	if (config_prof)
1196 		prof_boot1();
1197 	if (arena_boot())
1198 		return (true);
1199 	if (config_tcache && tcache_boot())
1200 		return (true);
1201 	if (malloc_mutex_init(&arenas_lock))
1202 		return (true);
1203 	/*
1204 	 * Create enough scaffolding to allow recursive allocation in
1205 	 * malloc_ncpus().
1206 	 */
1207 	narenas_total = narenas_auto = 1;
1208 	arenas = &a0;
1209 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1210 	/*
1211 	 * Initialize one arena here.  The rest are lazily created in
1212 	 * arena_choose_hard().
1213 	 */
1214 	if (arena_init(0) == NULL)
1215 		return (true);
1216 	malloc_init_state = malloc_init_a0_initialized;
1217 	return (false);
1218 }
1219 
1220 static bool
1221 malloc_init_hard_a0(void)
1222 {
1223 	bool ret;
1224 
1225 	malloc_mutex_lock(&init_lock);
1226 	ret = malloc_init_hard_a0_locked();
1227 	malloc_mutex_unlock(&init_lock);
1228 	return (ret);
1229 }
1230 
1231 /*
1232  * Initialize data structures which may trigger recursive allocation.
1233  *
1234  * init_lock must be held.
1235  */
1236 static void
1237 malloc_init_hard_recursible(void)
1238 {
1239 
1240 	malloc_init_state = malloc_init_recursible;
1241 	malloc_mutex_unlock(&init_lock);
1242 
1243 	ncpus = malloc_ncpus();
1244 
1245 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1246     && !defined(_WIN32) && !defined(__native_client__))
1247 	/* LinuxThreads's pthread_atfork() allocates. */
1248 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1249 	    jemalloc_postfork_child) != 0) {
1250 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1251 		if (opt_abort)
1252 			abort();
1253 	}
1254 #endif
1255 	malloc_mutex_lock(&init_lock);
1256 }
1257 
1258 /* init_lock must be held. */
1259 static bool
1260 malloc_init_hard_finish(void)
1261 {
1262 
1263 	if (mutex_boot())
1264 		return (true);
1265 
1266 	if (opt_narenas == 0) {
1267 		/*
1268 		 * For SMP systems, create more than one arena per CPU by
1269 		 * default.
1270 		 */
1271 		if (ncpus > 1)
1272 			opt_narenas = ncpus << 2;
1273 		else
1274 			opt_narenas = 1;
1275 	}
1276 #if defined(ANDROID_MAX_ARENAS)
1277 	/* Never create more than MAX_ARENAS arenas regardless of num_cpus.
1278 	 * Extra arenas use more PSS and are not very useful unless
1279 	 * lots of threads are allocing/freeing at the same time.
1280 	 */
1281 	if (opt_narenas > ANDROID_MAX_ARENAS)
1282 		opt_narenas = ANDROID_MAX_ARENAS;
1283 #endif
1284 	narenas_auto = opt_narenas;
1285 	/*
1286 	 * Make sure that the arenas array can be allocated.  In practice, this
1287 	 * limit is enough to allow the allocator to function, but the ctl
1288 	 * machinery will fail to allocate memory at far lower limits.
1289 	 */
1290 	if (narenas_auto > chunksize / sizeof(arena_t *)) {
1291 		narenas_auto = chunksize / sizeof(arena_t *);
1292 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1293 		    narenas_auto);
1294 	}
1295 	narenas_total = narenas_auto;
1296 
1297 	/* Allocate and initialize arenas. */
1298 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
1299 	if (arenas == NULL)
1300 		return (true);
1301 	/*
1302 	 * Zero the array.  In practice, this should always be pre-zeroed,
1303 	 * since it was just mmap()ed, but let's be sure.
1304 	 */
1305 	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
1306 	/* Copy the pointer to the one arena that was already initialized. */
1307 	arenas[0] = a0;
1308 
1309 	malloc_init_state = malloc_init_initialized;
1310 	return (false);
1311 }
1312 
1313 static bool
1314 malloc_init_hard(void)
1315 {
1316 
1317 	malloc_mutex_lock(&init_lock);
1318 	if (!malloc_init_hard_needed()) {
1319 		malloc_mutex_unlock(&init_lock);
1320 		return (false);
1321 	}
1322 
1323 	if (malloc_init_state != malloc_init_a0_initialized &&
1324 	    malloc_init_hard_a0_locked()) {
1325 		malloc_mutex_unlock(&init_lock);
1326 		return (true);
1327 	}
1328 	if (malloc_tsd_boot0()) {
1329 		malloc_mutex_unlock(&init_lock);
1330 		return (true);
1331 	}
1332 	if (config_prof && prof_boot2()) {
1333 		malloc_mutex_unlock(&init_lock);
1334 		return (true);
1335 	}
1336 
1337 	malloc_init_hard_recursible();
1338 
1339 	if (malloc_init_hard_finish()) {
1340 		malloc_mutex_unlock(&init_lock);
1341 		return (true);
1342 	}
1343 
1344 	malloc_mutex_unlock(&init_lock);
1345 	malloc_tsd_boot1();
1346 	return (false);
1347 }
1348 
1349 /*
1350  * End initialization functions.
1351  */
1352 /******************************************************************************/
1353 /*
1354  * Begin malloc(3)-compatible functions.
1355  */
1356 
1357 static void *
1358 imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1359 {
1360 	void *p;
1361 
1362 	if (tctx == NULL)
1363 		return (NULL);
1364 	if (usize <= SMALL_MAXCLASS) {
1365 		p = imalloc(tsd, LARGE_MINCLASS);
1366 		if (p == NULL)
1367 			return (NULL);
1368 		arena_prof_promoted(p, usize);
1369 	} else
1370 		p = imalloc(tsd, usize);
1371 
1372 	return (p);
1373 }
1374 
1375 JEMALLOC_ALWAYS_INLINE_C void *
1376 imalloc_prof(tsd_t *tsd, size_t usize)
1377 {
1378 	void *p;
1379 	prof_tctx_t *tctx;
1380 
1381 	tctx = prof_alloc_prep(tsd, usize, true);
1382 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1383 		p = imalloc_prof_sample(tsd, usize, tctx);
1384 	else
1385 		p = imalloc(tsd, usize);
1386 	if (unlikely(p == NULL)) {
1387 		prof_alloc_rollback(tsd, tctx, true);
1388 		return (NULL);
1389 	}
1390 	prof_malloc(p, usize, tctx);
1391 
1392 	return (p);
1393 }
1394 
1395 JEMALLOC_ALWAYS_INLINE_C void *
1396 imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
1397 {
1398 
1399 	if (unlikely(malloc_init()))
1400 		return (NULL);
1401 	*tsd = tsd_fetch();
1402 
1403 	if (config_prof && opt_prof) {
1404 		*usize = s2u(size);
1405 		return (imalloc_prof(*tsd, *usize));
1406 	}
1407 
1408 	if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1409 		*usize = s2u(size);
1410 	return (imalloc(*tsd, size));
1411 }
1412 
1413 void *
1414 je_malloc(size_t size)
1415 {
1416 	void *ret;
1417 	tsd_t *tsd;
1418 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1419 
1420 	if (size == 0)
1421 		size = 1;
1422 
1423 	ret = imalloc_body(size, &tsd, &usize);
1424 	if (unlikely(ret == NULL)) {
1425 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1426 			malloc_write("<jemalloc>: Error in malloc(): "
1427 			    "out of memory\n");
1428 			abort();
1429 		}
1430 		set_errno(ENOMEM);
1431 	}
1432 	if (config_stats && likely(ret != NULL)) {
1433 		assert(usize == isalloc(ret, config_prof));
1434 		*tsd_thread_allocatedp_get(tsd) += usize;
1435 	}
1436 	UTRACE(0, size, ret);
1437 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1438 	return (ret);
1439 }
1440 
1441 static void *
1442 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1443     prof_tctx_t *tctx)
1444 {
1445 	void *p;
1446 
1447 	if (tctx == NULL)
1448 		return (NULL);
1449 	if (usize <= SMALL_MAXCLASS) {
1450 		assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1451 		p = imalloc(tsd, LARGE_MINCLASS);
1452 		if (p == NULL)
1453 			return (NULL);
1454 		arena_prof_promoted(p, usize);
1455 	} else
1456 		p = ipalloc(tsd, usize, alignment, false);
1457 
1458 	return (p);
1459 }
1460 
1461 JEMALLOC_ALWAYS_INLINE_C void *
1462 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1463 {
1464 	void *p;
1465 	prof_tctx_t *tctx;
1466 
1467 	tctx = prof_alloc_prep(tsd, usize, true);
1468 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1469 		p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1470 	else
1471 		p = ipalloc(tsd, usize, alignment, false);
1472 	if (unlikely(p == NULL)) {
1473 		prof_alloc_rollback(tsd, tctx, true);
1474 		return (NULL);
1475 	}
1476 	prof_malloc(p, usize, tctx);
1477 
1478 	return (p);
1479 }
1480 
1481 JEMALLOC_ATTR(nonnull(1))
1482 static int
1483 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1484 {
1485 	int ret;
1486 	tsd_t *tsd;
1487 	size_t usize;
1488 	void *result;
1489 
1490 	assert(min_alignment != 0);
1491 
1492 	if (unlikely(malloc_init())) {
1493 		result = NULL;
1494 		goto label_oom;
1495 	} else {
1496 		tsd = tsd_fetch();
1497 		if (size == 0)
1498 			size = 1;
1499 
1500 		/* Make sure that alignment is a large enough power of 2. */
1501 		if (unlikely(((alignment - 1) & alignment) != 0
1502 		    || (alignment < min_alignment))) {
1503 			if (config_xmalloc && unlikely(opt_xmalloc)) {
1504 				malloc_write("<jemalloc>: Error allocating "
1505 				    "aligned memory: invalid alignment\n");
1506 				abort();
1507 			}
1508 			result = NULL;
1509 			ret = EINVAL;
1510 			goto label_return;
1511 		}
1512 
1513 		usize = sa2u(size, alignment);
1514 		if (unlikely(usize == 0)) {
1515 			result = NULL;
1516 			goto label_oom;
1517 		}
1518 
1519 		if (config_prof && opt_prof)
1520 			result = imemalign_prof(tsd, alignment, usize);
1521 		else
1522 			result = ipalloc(tsd, usize, alignment, false);
1523 		if (unlikely(result == NULL))
1524 			goto label_oom;
1525 	}
1526 
1527 	*memptr = result;
1528 	ret = 0;
1529 label_return:
1530 	if (config_stats && likely(result != NULL)) {
1531 		assert(usize == isalloc(result, config_prof));
1532 		*tsd_thread_allocatedp_get(tsd) += usize;
1533 	}
1534 	UTRACE(0, size, result);
1535 	return (ret);
1536 label_oom:
1537 	assert(result == NULL);
1538 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1539 		malloc_write("<jemalloc>: Error allocating aligned memory: "
1540 		    "out of memory\n");
1541 		abort();
1542 	}
1543 	ret = ENOMEM;
1544 	goto label_return;
1545 }
1546 
1547 int
1548 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1549 {
1550 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
1551 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1552 	    config_prof), false);
1553 	return (ret);
1554 }
1555 
1556 void *
1557 je_aligned_alloc(size_t alignment, size_t size)
1558 {
1559 	void *ret;
1560 	int err;
1561 
1562 	if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1563 		ret = NULL;
1564 		set_errno(err);
1565 	}
1566 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1567 	    false);
1568 	return (ret);
1569 }
1570 
1571 static void *
1572 icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1573 {
1574 	void *p;
1575 
1576 	if (tctx == NULL)
1577 		return (NULL);
1578 	if (usize <= SMALL_MAXCLASS) {
1579 		p = icalloc(tsd, LARGE_MINCLASS);
1580 		if (p == NULL)
1581 			return (NULL);
1582 		arena_prof_promoted(p, usize);
1583 	} else
1584 		p = icalloc(tsd, usize);
1585 
1586 	return (p);
1587 }
1588 
1589 JEMALLOC_ALWAYS_INLINE_C void *
1590 icalloc_prof(tsd_t *tsd, size_t usize)
1591 {
1592 	void *p;
1593 	prof_tctx_t *tctx;
1594 
1595 	tctx = prof_alloc_prep(tsd, usize, true);
1596 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1597 		p = icalloc_prof_sample(tsd, usize, tctx);
1598 	else
1599 		p = icalloc(tsd, usize);
1600 	if (unlikely(p == NULL)) {
1601 		prof_alloc_rollback(tsd, tctx, true);
1602 		return (NULL);
1603 	}
1604 	prof_malloc(p, usize, tctx);
1605 
1606 	return (p);
1607 }
1608 
1609 void *
1610 je_calloc(size_t num, size_t size)
1611 {
1612 	void *ret;
1613 	tsd_t *tsd;
1614 	size_t num_size;
1615 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1616 
1617 	if (unlikely(malloc_init())) {
1618 		num_size = 0;
1619 		ret = NULL;
1620 		goto label_return;
1621 	}
1622 	tsd = tsd_fetch();
1623 
1624 	num_size = num * size;
1625 	if (unlikely(num_size == 0)) {
1626 		if (num == 0 || size == 0)
1627 			num_size = 1;
1628 		else {
1629 			ret = NULL;
1630 			goto label_return;
1631 		}
1632 	/*
1633 	 * Try to avoid division here.  We know that it isn't possible to
1634 	 * overflow during multiplication if neither operand uses any of the
1635 	 * most significant half of the bits in a size_t.
1636 	 */
1637 	} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1638 	    2))) && (num_size / size != num))) {
1639 		/* size_t overflow. */
1640 		ret = NULL;
1641 		goto label_return;
1642 	}
1643 
1644 	if (config_prof && opt_prof) {
1645 		usize = s2u(num_size);
1646 		ret = icalloc_prof(tsd, usize);
1647 	} else {
1648 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1649 			usize = s2u(num_size);
1650 		ret = icalloc(tsd, num_size);
1651 	}
1652 
1653 label_return:
1654 	if (unlikely(ret == NULL)) {
1655 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1656 			malloc_write("<jemalloc>: Error in calloc(): out of "
1657 			    "memory\n");
1658 			abort();
1659 		}
1660 		set_errno(ENOMEM);
1661 	}
1662 	if (config_stats && likely(ret != NULL)) {
1663 		assert(usize == isalloc(ret, config_prof));
1664 		*tsd_thread_allocatedp_get(tsd) += usize;
1665 	}
1666 	UTRACE(0, num_size, ret);
1667 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1668 	return (ret);
1669 }
1670 
1671 static void *
1672 irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize,
1673     prof_tctx_t *tctx)
1674 {
1675 	void *p;
1676 
1677 	if (tctx == NULL)
1678 		return (NULL);
1679 	if (usize <= SMALL_MAXCLASS) {
1680 		p = iralloc(tsd, oldptr, old_usize, LARGE_MINCLASS, 0, false);
1681 		if (p == NULL)
1682 			return (NULL);
1683 		arena_prof_promoted(p, usize);
1684 	} else
1685 		p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
1686 
1687 	return (p);
1688 }
1689 
1690 JEMALLOC_ALWAYS_INLINE_C void *
1691 irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
1692 {
1693 	void *p;
1694 	prof_tctx_t *old_tctx, *tctx;
1695 
1696 	old_tctx = prof_tctx_get(oldptr);
1697 	tctx = prof_alloc_prep(tsd, usize, true);
1698 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1699 		p = irealloc_prof_sample(tsd, oldptr, old_usize, usize, tctx);
1700 	else
1701 		p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
1702 	if (p == NULL)
1703 		return (NULL);
1704 	prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
1705 
1706 	return (p);
1707 }
1708 
1709 JEMALLOC_INLINE_C void
1710 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
1711 {
1712 	size_t usize;
1713 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1714 
1715 	assert(ptr != NULL);
1716 	assert(malloc_initialized() || IS_INITIALIZER);
1717 
1718 	if (config_prof && opt_prof) {
1719 		usize = isalloc(ptr, config_prof);
1720 		prof_free(tsd, ptr, usize);
1721 	} else if (config_stats || config_valgrind)
1722 		usize = isalloc(ptr, config_prof);
1723 	if (config_stats)
1724 		*tsd_thread_deallocatedp_get(tsd) += usize;
1725 	if (config_valgrind && unlikely(in_valgrind))
1726 		rzsize = p2rz(ptr);
1727 	iqalloc(tsd, ptr, tcache);
1728 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1729 }
1730 
1731 JEMALLOC_INLINE_C void
1732 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1733 {
1734 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1735 
1736 	assert(ptr != NULL);
1737 	assert(malloc_initialized() || IS_INITIALIZER);
1738 
1739 	if (config_prof && opt_prof)
1740 		prof_free(tsd, ptr, usize);
1741 	if (config_stats)
1742 		*tsd_thread_deallocatedp_get(tsd) += usize;
1743 	if (config_valgrind && unlikely(in_valgrind))
1744 		rzsize = p2rz(ptr);
1745 	isqalloc(tsd, ptr, usize, tcache);
1746 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1747 }
1748 
1749 void *
1750 je_realloc(void *ptr, size_t size)
1751 {
1752 	void *ret;
1753 	tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1754 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1755 	size_t old_usize = 0;
1756 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1757 
1758 	if (unlikely(size == 0)) {
1759 		if (ptr != NULL) {
1760 			/* realloc(ptr, 0) is equivalent to free(ptr). */
1761 			UTRACE(ptr, 0, 0);
1762 			tsd = tsd_fetch();
1763 			ifree(tsd, ptr, tcache_get(tsd, false));
1764 			return (NULL);
1765 		}
1766 		size = 1;
1767 	}
1768 
1769 	if (likely(ptr != NULL)) {
1770 		assert(malloc_initialized() || IS_INITIALIZER);
1771 		malloc_thread_init();
1772 		tsd = tsd_fetch();
1773 
1774 		old_usize = isalloc(ptr, config_prof);
1775 		if (config_valgrind && unlikely(in_valgrind))
1776 			old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1777 
1778 		if (config_prof && opt_prof) {
1779 			usize = s2u(size);
1780 			ret = irealloc_prof(tsd, ptr, old_usize, usize);
1781 		} else {
1782 			if (config_stats || (config_valgrind &&
1783 			    unlikely(in_valgrind)))
1784 				usize = s2u(size);
1785 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1786 		}
1787 	} else {
1788 		/* realloc(NULL, size) is equivalent to malloc(size). */
1789 		ret = imalloc_body(size, &tsd, &usize);
1790 	}
1791 
1792 	if (unlikely(ret == NULL)) {
1793 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1794 			malloc_write("<jemalloc>: Error in realloc(): "
1795 			    "out of memory\n");
1796 			abort();
1797 		}
1798 		set_errno(ENOMEM);
1799 	}
1800 	if (config_stats && likely(ret != NULL)) {
1801 		assert(usize == isalloc(ret, config_prof));
1802 		*tsd_thread_allocatedp_get(tsd) += usize;
1803 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
1804 	}
1805 	UTRACE(ptr, size, ret);
1806 	JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1807 	    old_rzsize, true, false);
1808 	return (ret);
1809 }
1810 
1811 void
1812 je_free(void *ptr)
1813 {
1814 
1815 	UTRACE(ptr, 0, 0);
1816 	if (likely(ptr != NULL)) {
1817 		tsd_t *tsd = tsd_fetch();
1818 		ifree(tsd, ptr, tcache_get(tsd, false));
1819 	}
1820 }
1821 
1822 /*
1823  * End malloc(3)-compatible functions.
1824  */
1825 /******************************************************************************/
1826 /*
1827  * Begin non-standard override functions.
1828  */
1829 
1830 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1831 void *
1832 je_memalign(size_t alignment, size_t size)
1833 {
1834 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1835 	if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1836 		ret = NULL;
1837 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1838 	return (ret);
1839 }
1840 #endif
1841 
1842 #ifdef JEMALLOC_OVERRIDE_VALLOC
1843 void *
1844 je_valloc(size_t size)
1845 {
1846 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1847 	if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1848 		ret = NULL;
1849 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1850 	return (ret);
1851 }
1852 #endif
1853 
1854 /*
1855  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1856  * #define je_malloc malloc
1857  */
1858 #define	malloc_is_malloc 1
1859 #define	is_malloc_(a) malloc_is_ ## a
1860 #define	is_malloc(a) is_malloc_(a)
1861 
1862 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1863 /*
1864  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1865  * to inconsistently reference libc's malloc(3)-compatible functions
1866  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1867  *
1868  * These definitions interpose hooks in glibc.  The functions are actually
1869  * passed an extra argument for the caller return address, which will be
1870  * ignored.
1871  */
1872 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1873 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1874 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1875 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1876 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1877     je_memalign;
1878 # endif
1879 #endif
1880 
1881 /*
1882  * End non-standard override functions.
1883  */
1884 /******************************************************************************/
1885 /*
1886  * Begin non-standard functions.
1887  */
1888 
1889 JEMALLOC_ALWAYS_INLINE_C bool
1890 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
1891     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1892 {
1893 
1894 	if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
1895 		*alignment = 0;
1896 		*usize = s2u(size);
1897 	} else {
1898 		*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
1899 		*usize = sa2u(size, *alignment);
1900 	}
1901 	*zero = MALLOCX_ZERO_GET(flags);
1902 	if ((flags & MALLOCX_TCACHE_MASK) != 0) {
1903 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
1904 			*tcache = NULL;
1905 		else
1906 			*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
1907 	} else
1908 		*tcache = tcache_get(tsd, true);
1909 	if ((flags & MALLOCX_ARENA_MASK) != 0) {
1910 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
1911 		*arena = arena_get(tsd, arena_ind, true, true);
1912 		if (unlikely(*arena == NULL))
1913 			return (true);
1914 	} else
1915 		*arena = NULL;
1916 	return (false);
1917 }
1918 
1919 JEMALLOC_ALWAYS_INLINE_C bool
1920 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
1921     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1922 {
1923 
1924 	if (likely(flags == 0)) {
1925 		*usize = s2u(size);
1926 		assert(usize != 0);
1927 		*alignment = 0;
1928 		*zero = false;
1929 		*tcache = tcache_get(tsd, true);
1930 		*arena = NULL;
1931 		return (false);
1932 	} else {
1933 		return (imallocx_flags_decode_hard(tsd, size, flags, usize,
1934 		    alignment, zero, tcache, arena));
1935 	}
1936 }
1937 
1938 JEMALLOC_ALWAYS_INLINE_C void *
1939 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1940     tcache_t *tcache, arena_t *arena)
1941 {
1942 
1943 	if (alignment != 0)
1944 		return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
1945 	if (zero)
1946 		return (icalloct(tsd, usize, tcache, arena));
1947 	return (imalloct(tsd, usize, tcache, arena));
1948 }
1949 
1950 JEMALLOC_ALWAYS_INLINE_C void *
1951 imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
1952     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1953 {
1954 
1955 	if (likely(flags == 0))
1956 		return (imalloc(tsd, size));
1957 	return (imallocx_flags(tsd, usize, alignment, zero, tcache, arena));
1958 }
1959 
1960 static void *
1961 imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
1962     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1963 {
1964 	void *p;
1965 
1966 	if (usize <= SMALL_MAXCLASS) {
1967 		assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
1968 		    sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
1969 		p = imalloct(tsd, LARGE_MINCLASS, tcache, arena);
1970 		if (p == NULL)
1971 			return (NULL);
1972 		arena_prof_promoted(p, usize);
1973 	} else {
1974 		p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
1975 		    zero, tcache, arena);
1976 	}
1977 
1978 	return (p);
1979 }
1980 
1981 JEMALLOC_ALWAYS_INLINE_C void *
1982 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
1983 {
1984 	void *p;
1985 	size_t alignment;
1986 	bool zero;
1987 	tcache_t *tcache;
1988 	arena_t *arena;
1989 	prof_tctx_t *tctx;
1990 
1991 	if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
1992 	    &zero, &tcache, &arena)))
1993 		return (NULL);
1994 	tctx = prof_alloc_prep(tsd, *usize, true);
1995 	if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1996 		p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
1997 		    zero, tcache, arena);
1998 	} else if ((uintptr_t)tctx > (uintptr_t)1U) {
1999 		p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
2000 		    zero, tcache, arena);
2001 	} else
2002 		p = NULL;
2003 	if (unlikely(p == NULL)) {
2004 		prof_alloc_rollback(tsd, tctx, true);
2005 		return (NULL);
2006 	}
2007 	prof_malloc(p, *usize, tctx);
2008 
2009 	return (p);
2010 }
2011 
2012 JEMALLOC_ALWAYS_INLINE_C void *
2013 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2014 {
2015 	size_t alignment;
2016 	bool zero;
2017 	tcache_t *tcache;
2018 	arena_t *arena;
2019 
2020 	if (likely(flags == 0)) {
2021 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2022 			*usize = s2u(size);
2023 		return (imalloc(tsd, size));
2024 	}
2025 
2026 	if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2027 	    &alignment, &zero, &tcache, &arena)))
2028 		return (NULL);
2029 	return (imallocx_flags(tsd, *usize, alignment, zero, tcache, arena));
2030 }
2031 
2032 void *
2033 je_mallocx(size_t size, int flags)
2034 {
2035 	tsd_t *tsd;
2036 	void *p;
2037 	size_t usize;
2038 
2039 	assert(size != 0);
2040 
2041 	if (unlikely(malloc_init()))
2042 		goto label_oom;
2043 	tsd = tsd_fetch();
2044 
2045 	if (config_prof && opt_prof)
2046 		p = imallocx_prof(tsd, size, flags, &usize);
2047 	else
2048 		p = imallocx_no_prof(tsd, size, flags, &usize);
2049 	if (unlikely(p == NULL))
2050 		goto label_oom;
2051 
2052 	if (config_stats) {
2053 		assert(usize == isalloc(p, config_prof));
2054 		*tsd_thread_allocatedp_get(tsd) += usize;
2055 	}
2056 	UTRACE(0, size, p);
2057 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2058 	return (p);
2059 label_oom:
2060 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2061 		malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2062 		abort();
2063 	}
2064 	UTRACE(0, size, 0);
2065 	return (NULL);
2066 }
2067 
2068 static void *
2069 irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
2070     size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
2071     prof_tctx_t *tctx)
2072 {
2073 	void *p;
2074 
2075 	if (tctx == NULL)
2076 		return (NULL);
2077 	if (usize <= SMALL_MAXCLASS) {
2078 		p = iralloct(tsd, oldptr, old_usize, LARGE_MINCLASS, alignment,
2079 		    zero, tcache, arena);
2080 		if (p == NULL)
2081 			return (NULL);
2082 		arena_prof_promoted(p, usize);
2083 	} else {
2084 		p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
2085 		    tcache, arena);
2086 	}
2087 
2088 	return (p);
2089 }
2090 
2091 JEMALLOC_ALWAYS_INLINE_C void *
2092 irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
2093     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2094     arena_t *arena)
2095 {
2096 	void *p;
2097 	prof_tctx_t *old_tctx, *tctx;
2098 
2099 	old_tctx = prof_tctx_get(oldptr);
2100 	tctx = prof_alloc_prep(tsd, *usize, false);
2101 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2102 		p = irallocx_prof_sample(tsd, oldptr, old_usize, size,
2103 		    alignment, *usize, zero, tcache, arena, tctx);
2104 	} else {
2105 		p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
2106 		    tcache, arena);
2107 	}
2108 	if (unlikely(p == NULL)) {
2109 		prof_alloc_rollback(tsd, tctx, false);
2110 		return (NULL);
2111 	}
2112 
2113 	if (p == oldptr && alignment != 0) {
2114 		/*
2115 		 * The allocation did not move, so it is possible that the size
2116 		 * class is smaller than would guarantee the requested
2117 		 * alignment, and that the alignment constraint was
2118 		 * serendipitously satisfied.  Additionally, old_usize may not
2119 		 * be the same as the current usize because of in-place large
2120 		 * reallocation.  Therefore, query the actual value of usize.
2121 		 */
2122 		*usize = isalloc(p, config_prof);
2123 	}
2124 	prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
2125 
2126 	return (p);
2127 }
2128 
2129 void *
2130 je_rallocx(void *ptr, size_t size, int flags)
2131 {
2132 	void *p;
2133 	tsd_t *tsd;
2134 	size_t usize;
2135 	size_t old_usize;
2136 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2137 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2138 	bool zero = flags & MALLOCX_ZERO;
2139 	arena_t *arena;
2140 	tcache_t *tcache;
2141 
2142 	assert(ptr != NULL);
2143 	assert(size != 0);
2144 	assert(malloc_initialized() || IS_INITIALIZER);
2145 	malloc_thread_init();
2146 	tsd = tsd_fetch();
2147 
2148 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2149 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2150 		arena = arena_get(tsd, arena_ind, true, true);
2151 		if (unlikely(arena == NULL))
2152 			goto label_oom;
2153 	} else
2154 		arena = NULL;
2155 
2156 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2157 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2158 			tcache = NULL;
2159 		else
2160 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2161 	} else
2162 		tcache = tcache_get(tsd, true);
2163 
2164 	old_usize = isalloc(ptr, config_prof);
2165 	if (config_valgrind && unlikely(in_valgrind))
2166 		old_rzsize = u2rz(old_usize);
2167 
2168 	if (config_prof && opt_prof) {
2169 		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2170 		assert(usize != 0);
2171 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2172 		    zero, tcache, arena);
2173 		if (unlikely(p == NULL))
2174 			goto label_oom;
2175 	} else {
2176 		p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2177 		     tcache, arena);
2178 		if (unlikely(p == NULL))
2179 			goto label_oom;
2180 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2181 			usize = isalloc(p, config_prof);
2182 	}
2183 
2184 	if (config_stats) {
2185 		*tsd_thread_allocatedp_get(tsd) += usize;
2186 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2187 	}
2188 	UTRACE(ptr, size, p);
2189 	JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2190 	    old_rzsize, false, zero);
2191 	return (p);
2192 label_oom:
2193 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2194 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2195 		abort();
2196 	}
2197 	UTRACE(ptr, size, 0);
2198 	return (NULL);
2199 }
2200 
2201 JEMALLOC_ALWAYS_INLINE_C size_t
2202 ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
2203     size_t alignment, bool zero)
2204 {
2205 	size_t usize;
2206 
2207 	if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
2208 		return (old_usize);
2209 	usize = isalloc(ptr, config_prof);
2210 
2211 	return (usize);
2212 }
2213 
2214 static size_t
2215 ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
2216     size_t alignment, size_t max_usize, bool zero, prof_tctx_t *tctx)
2217 {
2218 	size_t usize;
2219 
2220 	if (tctx == NULL)
2221 		return (old_usize);
2222 	/* Use minimum usize to determine whether promotion may happen. */
2223 	if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
2224 	    SMALL_MAXCLASS) {
2225 		if (ixalloc(ptr, old_usize, SMALL_MAXCLASS+1,
2226 		    (SMALL_MAXCLASS+1 >= size+extra) ? 0 : size+extra -
2227 		    (SMALL_MAXCLASS+1), alignment, zero))
2228 			return (old_usize);
2229 		usize = isalloc(ptr, config_prof);
2230 		if (max_usize < LARGE_MINCLASS)
2231 			arena_prof_promoted(ptr, usize);
2232 	} else {
2233 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2234 		    zero);
2235 	}
2236 
2237 	return (usize);
2238 }
2239 
2240 JEMALLOC_ALWAYS_INLINE_C size_t
2241 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2242     size_t extra, size_t alignment, bool zero)
2243 {
2244 	size_t max_usize, usize;
2245 	prof_tctx_t *old_tctx, *tctx;
2246 
2247 	old_tctx = prof_tctx_get(ptr);
2248 	/*
2249 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2250 	 * Therefore, compute its maximum possible value and use that in
2251 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2252 	 * prof_realloc() will use the actual usize to decide whether to sample.
2253 	 */
2254 	max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
2255 	    alignment);
2256 	tctx = prof_alloc_prep(tsd, max_usize, false);
2257 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2258 		usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
2259 		    alignment, zero, max_usize, tctx);
2260 	} else {
2261 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2262 		    zero);
2263 	}
2264 	if (unlikely(usize == old_usize)) {
2265 		prof_alloc_rollback(tsd, tctx, false);
2266 		return (usize);
2267 	}
2268 	prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
2269 
2270 	return (usize);
2271 }
2272 
2273 size_t
2274 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2275 {
2276 	tsd_t *tsd;
2277 	size_t usize, old_usize;
2278 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2279 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2280 	bool zero = flags & MALLOCX_ZERO;
2281 
2282 	assert(ptr != NULL);
2283 	assert(size != 0);
2284 	assert(SIZE_T_MAX - size >= extra);
2285 	assert(malloc_initialized() || IS_INITIALIZER);
2286 	malloc_thread_init();
2287 	tsd = tsd_fetch();
2288 
2289 	old_usize = isalloc(ptr, config_prof);
2290 	if (config_valgrind && unlikely(in_valgrind))
2291 		old_rzsize = u2rz(old_usize);
2292 
2293 	if (config_prof && opt_prof) {
2294 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2295 		    alignment, zero);
2296 	} else {
2297 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2298 		    zero);
2299 	}
2300 	if (unlikely(usize == old_usize))
2301 		goto label_not_resized;
2302 
2303 	if (config_stats) {
2304 		*tsd_thread_allocatedp_get(tsd) += usize;
2305 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2306 	}
2307 	JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2308 	    old_rzsize, false, zero);
2309 label_not_resized:
2310 	UTRACE(ptr, size, ptr);
2311 	return (usize);
2312 }
2313 
2314 size_t
2315 je_sallocx(const void *ptr, int flags)
2316 {
2317 	size_t usize;
2318 
2319 	assert(malloc_initialized() || IS_INITIALIZER);
2320 	malloc_thread_init();
2321 
2322 	if (config_ivsalloc)
2323 		usize = ivsalloc(ptr, config_prof);
2324 	else
2325 		usize = isalloc(ptr, config_prof);
2326 
2327 	return (usize);
2328 }
2329 
2330 void
2331 je_dallocx(void *ptr, int flags)
2332 {
2333 	tsd_t *tsd;
2334 	tcache_t *tcache;
2335 
2336 	assert(ptr != NULL);
2337 	assert(malloc_initialized() || IS_INITIALIZER);
2338 
2339 	tsd = tsd_fetch();
2340 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2341 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2342 			tcache = NULL;
2343 		else
2344 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2345 	} else
2346 		tcache = tcache_get(tsd, false);
2347 
2348 	UTRACE(ptr, 0, 0);
2349 	ifree(tsd_fetch(), ptr, tcache);
2350 }
2351 
2352 JEMALLOC_ALWAYS_INLINE_C size_t
2353 inallocx(size_t size, int flags)
2354 {
2355 	size_t usize;
2356 
2357 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2358 		usize = s2u(size);
2359 	else
2360 		usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2361 	assert(usize != 0);
2362 	return (usize);
2363 }
2364 
2365 void
2366 je_sdallocx(void *ptr, size_t size, int flags)
2367 {
2368 	tsd_t *tsd;
2369 	tcache_t *tcache;
2370 	size_t usize;
2371 
2372 	assert(ptr != NULL);
2373 	assert(malloc_initialized() || IS_INITIALIZER);
2374 	usize = inallocx(size, flags);
2375 	assert(usize == isalloc(ptr, config_prof));
2376 
2377 	tsd = tsd_fetch();
2378 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2379 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2380 			tcache = NULL;
2381 		else
2382 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2383 	} else
2384 		tcache = tcache_get(tsd, false);
2385 
2386 	UTRACE(ptr, 0, 0);
2387 	isfree(tsd, ptr, usize, tcache);
2388 }
2389 
2390 size_t
2391 je_nallocx(size_t size, int flags)
2392 {
2393 
2394 	assert(size != 0);
2395 
2396 	if (unlikely(malloc_init()))
2397 		return (0);
2398 
2399 	return (inallocx(size, flags));
2400 }
2401 
2402 int
2403 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2404     size_t newlen)
2405 {
2406 
2407 	if (unlikely(malloc_init()))
2408 		return (EAGAIN);
2409 
2410 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2411 }
2412 
2413 int
2414 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2415 {
2416 
2417 	if (unlikely(malloc_init()))
2418 		return (EAGAIN);
2419 
2420 	return (ctl_nametomib(name, mibp, miblenp));
2421 }
2422 
2423 int
2424 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2425   void *newp, size_t newlen)
2426 {
2427 
2428 	if (unlikely(malloc_init()))
2429 		return (EAGAIN);
2430 
2431 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2432 }
2433 
2434 void
2435 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2436     const char *opts)
2437 {
2438 
2439 	stats_print(write_cb, cbopaque, opts);
2440 }
2441 
2442 size_t
2443 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2444 {
2445 	size_t ret;
2446 
2447 	assert(malloc_initialized() || IS_INITIALIZER);
2448 	malloc_thread_init();
2449 
2450 	if (config_ivsalloc)
2451 		ret = ivsalloc(ptr, config_prof);
2452 	else
2453 		ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
2454 
2455 	return (ret);
2456 }
2457 
2458 /*
2459  * End non-standard functions.
2460  */
2461 /******************************************************************************/
2462 /*
2463  * The following functions are used by threading libraries for protection of
2464  * malloc during fork().
2465  */
2466 
2467 /*
2468  * If an application creates a thread before doing any allocation in the main
2469  * thread, then calls fork(2) in the main thread followed by memory allocation
2470  * in the child process, a race can occur that results in deadlock within the
2471  * child: the main thread may have forked while the created thread had
2472  * partially initialized the allocator.  Ordinarily jemalloc prevents
2473  * fork/malloc races via the following functions it registers during
2474  * initialization using pthread_atfork(), but of course that does no good if
2475  * the allocator isn't fully initialized at fork time.  The following library
2476  * constructor is a partial solution to this problem.  It may still be possible
2477  * to trigger the deadlock described above, but doing so would involve forking
2478  * via a library constructor that runs before jemalloc's runs.
2479  */
2480 JEMALLOC_ATTR(constructor)
2481 static void
2482 jemalloc_constructor(void)
2483 {
2484 
2485 	malloc_init();
2486 }
2487 
2488 #ifndef JEMALLOC_MUTEX_INIT_CB
2489 void
2490 jemalloc_prefork(void)
2491 #else
2492 JEMALLOC_EXPORT void
2493 _malloc_prefork(void)
2494 #endif
2495 {
2496 	unsigned i;
2497 
2498 #ifdef JEMALLOC_MUTEX_INIT_CB
2499 	if (!malloc_initialized())
2500 		return;
2501 #endif
2502 	assert(malloc_initialized());
2503 
2504 	/* Acquire all mutexes in a safe order. */
2505 	ctl_prefork();
2506 	prof_prefork();
2507 	malloc_mutex_prefork(&arenas_lock);
2508 	for (i = 0; i < narenas_total; i++) {
2509 		if (arenas[i] != NULL)
2510 			arena_prefork(arenas[i]);
2511 	}
2512 	chunk_prefork();
2513 	base_prefork();
2514 }
2515 
2516 #ifndef JEMALLOC_MUTEX_INIT_CB
2517 void
2518 jemalloc_postfork_parent(void)
2519 #else
2520 JEMALLOC_EXPORT void
2521 _malloc_postfork(void)
2522 #endif
2523 {
2524 	unsigned i;
2525 
2526 #ifdef JEMALLOC_MUTEX_INIT_CB
2527 	if (!malloc_initialized())
2528 		return;
2529 #endif
2530 	assert(malloc_initialized());
2531 
2532 	/* Release all mutexes, now that fork() has completed. */
2533 	base_postfork_parent();
2534 	chunk_postfork_parent();
2535 	for (i = 0; i < narenas_total; i++) {
2536 		if (arenas[i] != NULL)
2537 			arena_postfork_parent(arenas[i]);
2538 	}
2539 	malloc_mutex_postfork_parent(&arenas_lock);
2540 	prof_postfork_parent();
2541 	ctl_postfork_parent();
2542 }
2543 
2544 void
2545 jemalloc_postfork_child(void)
2546 {
2547 	unsigned i;
2548 
2549 	assert(malloc_initialized());
2550 
2551 	/* Release all mutexes, now that fork() has completed. */
2552 	base_postfork_child();
2553 	chunk_postfork_child();
2554 	for (i = 0; i < narenas_total; i++) {
2555 		if (arenas[i] != NULL)
2556 			arena_postfork_child(arenas[i]);
2557 	}
2558 	malloc_mutex_postfork_child(&arenas_lock);
2559 	prof_postfork_child();
2560 	ctl_postfork_child();
2561 }
2562 
2563 /******************************************************************************/
2564 
2565 /* ANDROID change */
2566 /* This is an implementation that uses the same arena access pattern found
2567  * in the arena_stats_merge function from src/arena.c.
2568  */
2569 struct mallinfo je_mallinfo() {
2570   struct mallinfo mi;
2571   memset(&mi, 0, sizeof(mi));
2572 
2573   malloc_mutex_lock(&arenas_lock);
2574   for (unsigned i = 0; i < narenas_auto; i++) {
2575     if (arenas[i] != NULL) {
2576       malloc_mutex_lock(&arenas[i]->lock);
2577       mi.hblkhd += arenas[i]->stats.mapped;
2578       mi.uordblks += arenas[i]->stats.allocated_large;
2579       mi.uordblks += arenas[i]->stats.allocated_huge;
2580       malloc_mutex_unlock(&arenas[i]->lock);
2581 
2582       for (unsigned j = 0; j < NBINS; j++) {
2583         arena_bin_t* bin = &arenas[i]->bins[j];
2584 
2585         malloc_mutex_lock(&bin->lock);
2586         mi.uordblks += arena_bin_info[j].reg_size * bin->stats.curregs;
2587         malloc_mutex_unlock(&bin->lock);
2588       }
2589     }
2590   }
2591   malloc_mutex_unlock(&arenas_lock);
2592   mi.fordblks = mi.hblkhd - mi.uordblks;
2593   mi.usmblks = mi.hblkhd;
2594   return mi;
2595 }
2596 
2597 size_t __mallinfo_narenas() {
2598   return narenas_auto;
2599 }
2600 
2601 size_t __mallinfo_nbins() {
2602   return NBINS;
2603 }
2604 
2605 struct mallinfo __mallinfo_arena_info(size_t aidx) {
2606   struct mallinfo mi;
2607   memset(&mi, 0, sizeof(mi));
2608 
2609   malloc_mutex_lock(&arenas_lock);
2610   if (aidx < narenas_auto) {
2611     if (arenas[aidx] != NULL) {
2612       malloc_mutex_lock(&arenas[aidx]->lock);
2613       mi.hblkhd = arenas[aidx]->stats.mapped;
2614       mi.ordblks = arenas[aidx]->stats.allocated_large;
2615       mi.uordblks = arenas[aidx]->stats.allocated_huge;
2616       malloc_mutex_unlock(&arenas[aidx]->lock);
2617 
2618       for (unsigned j = 0; j < NBINS; j++) {
2619         arena_bin_t* bin = &arenas[aidx]->bins[j];
2620 
2621         malloc_mutex_lock(&bin->lock);
2622         mi.fsmblks += arena_bin_info[j].reg_size * bin->stats.curregs;
2623         malloc_mutex_unlock(&bin->lock);
2624       }
2625     }
2626   }
2627   malloc_mutex_unlock(&arenas_lock);
2628   return mi;
2629 }
2630 
2631 struct mallinfo __mallinfo_bin_info(size_t aidx, size_t bidx) {
2632   struct mallinfo mi;
2633   memset(&mi, 0, sizeof(mi));
2634 
2635   malloc_mutex_lock(&arenas_lock);
2636   if (aidx < narenas_auto && bidx < NBINS) {
2637     if (arenas[aidx] != NULL) {
2638       arena_bin_t* bin = &arenas[aidx]->bins[bidx];
2639 
2640       malloc_mutex_lock(&bin->lock);
2641       mi.ordblks = arena_bin_info[bidx].reg_size * bin->stats.curregs;
2642       mi.uordblks = bin->stats.nmalloc;
2643       mi.fordblks = bin->stats.ndalloc;
2644       malloc_mutex_unlock(&bin->lock);
2645     }
2646   }
2647   malloc_mutex_unlock(&arenas_lock);
2648   return mi;
2649 }
2650 /* End ANDROID change */
2651