1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 static pthread_mutex_t malloc_disabled_lock = PTHREAD_MUTEX_INITIALIZER;
18 static bool malloc_disabled_tcache;
19 
20 static void je_iterate_chunk(arena_chunk_t *chunk,
21     void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg);
22 static void je_iterate_small(arena_run_t *run,
23     void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg);
24 
25 /* je_iterate calls callback for each allocation found in the memory region
26  * between [base, base+size).  base will be rounded down to by the jemalloc
27  * chunk size, and base+size will be rounded up to the chunk size.  If no memory
28  * managed by jemalloc is found in the requested region, je_iterate returns -1
29  * and sets errno to EINVAL.
30  *
31  * je_iterate must be called when no allocations are in progress, either
32  * when single-threaded (for example just after a fork), or between
33  * jemalloc_prefork() and jemalloc_postfork_parent().  The callback must
34  * not attempt to allocate with jemalloc.
35  */
je_iterate(uintptr_t base,size_t size,void (* callback)(uintptr_t ptr,size_t size,void * arg),void * arg)36 int je_iterate(uintptr_t base, size_t size,
37     void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
38 
39   int error = EINVAL;
40   uintptr_t ptr = (uintptr_t)CHUNK_ADDR2BASE(base);
41   uintptr_t end = CHUNK_CEILING(base + size);
42 
43   while (ptr < end) {
44     assert(ptr == (uintptr_t)CHUNK_ADDR2BASE(ptr));
45     extent_node_t *node;
46 
47     node = chunk_lookup((void *)ptr, false);
48     if (node == NULL) {
49       ptr += chunksize;
50       continue;
51     }
52 
53     assert(extent_node_achunk_get(node) ||
54         (uintptr_t)extent_node_addr_get(node) == ptr);
55 
56     error = 0;
57     if (extent_node_achunk_get(node)) {
58       /* Chunk */
59       arena_chunk_t *chunk = (arena_chunk_t *)ptr;
60       ptr += chunksize;
61 
62       if (&chunk->node != node) {
63           /* Empty retained chunk */
64           continue;
65       }
66 
67       je_iterate_chunk(chunk, callback, arg);
68     } else if ((uintptr_t)extent_node_addr_get(node) == ptr) {
69       /* Huge allocation */
70       callback(ptr, extent_node_size_get(node), arg);
71       ptr = CHUNK_CEILING(ptr + extent_node_size_get(node));
72     }
73   }
74 
75   if (error) {
76     set_errno(error);
77     return -1;
78   }
79 
80   return 0;
81 }
82 
83 /* Iterate over a valid jemalloc chunk, calling callback for each large
84  * allocation run, and calling je_iterate_small for each small allocation run */
je_iterate_chunk(arena_chunk_t * chunk,void (* callback)(uintptr_t ptr,size_t size,void * arg),void * arg)85 static void je_iterate_chunk(arena_chunk_t *chunk,
86     void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
87   size_t pageind;
88 
89   pageind = map_bias;
90 
91   while (pageind < chunk_npages) {
92     size_t mapbits;
93     size_t size;
94 
95     mapbits = arena_mapbits_get(chunk, pageind);
96     if (!arena_mapbits_allocated_get(chunk, pageind)) {
97       /* Unallocated run */
98       size = arena_mapbits_unallocated_size_get(chunk, pageind);
99     } else if (arena_mapbits_large_get(chunk, pageind)) {
100       /* Large allocation run */
101       void *rpages;
102 
103       size = arena_mapbits_large_size_get(chunk, pageind);
104       rpages = arena_miscelm_to_rpages(arena_miscelm_get(chunk, pageind));
105       callback((uintptr_t)rpages, size, arg);
106     } else {
107       /* Run of small allocations */
108       szind_t binind;
109       arena_run_t *run;
110 
111       assert(arena_mapbits_small_runind_get(chunk, pageind) == pageind);
112       binind = arena_mapbits_binind_get(chunk, pageind);
113       run = &arena_miscelm_get(chunk, pageind)->run;
114       assert(run->binind == binind);
115       size = arena_bin_info[binind].run_size;
116 
117       je_iterate_small(run, callback, arg);
118     }
119     assert(size == PAGE_CEILING(size));
120     assert(size > 0);
121     pageind += size >> LG_PAGE;
122   }
123 
124 }
125 
126 /* Iterate over a valid jemalloc small allocation run, calling callback for each
127  * active allocation. */
je_iterate_small(arena_run_t * run,void (* callback)(uintptr_t ptr,size_t size,void * arg),void * arg)128 static void je_iterate_small(arena_run_t *run,
129     void (*callback)(uintptr_t ptr, size_t size, void* arg), void* arg) {
130   szind_t binind;
131   const arena_bin_info_t *bin_info;
132   uint32_t regind;
133   uintptr_t ptr;
134   void *rpages;
135 
136   binind = run->binind;
137   bin_info = &arena_bin_info[binind];
138   rpages = arena_miscelm_to_rpages(arena_run_to_miscelm(run));
139   ptr = (uintptr_t)rpages + bin_info->reg0_offset;
140 
141   for (regind = 0; regind < bin_info->nregs; regind++) {
142     if (bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)) {
143       callback(ptr, bin_info->reg_size, arg);
144     }
145     ptr += bin_info->reg_interval;
146   }
147 }
148 
je_malloc_disable_prefork()149 static void je_malloc_disable_prefork() {
150   pthread_mutex_lock(&malloc_disabled_lock);
151 }
152 
je_malloc_disable_postfork_parent()153 static void je_malloc_disable_postfork_parent() {
154   pthread_mutex_unlock(&malloc_disabled_lock);
155 }
156 
je_malloc_disable_postfork_child()157 static void je_malloc_disable_postfork_child() {
158   pthread_mutex_init(&malloc_disabled_lock, NULL);
159 }
160 
je_malloc_disable_init()161 void je_malloc_disable_init() {
162   if (pthread_atfork(je_malloc_disable_prefork,
163       je_malloc_disable_postfork_parent, je_malloc_disable_postfork_child) != 0) {
164     malloc_write("<jemalloc>: Error in pthread_atfork()\n");
165     if (opt_abort)
166       abort();
167   }
168 }
169 
je_malloc_disable()170 void je_malloc_disable() {
171   static pthread_once_t once_control = PTHREAD_ONCE_INIT;
172   pthread_once(&once_control, je_malloc_disable_init);
173 
174   pthread_mutex_lock(&malloc_disabled_lock);
175   bool new_tcache = false;
176   size_t old_len = sizeof(malloc_disabled_tcache);
177   je_mallctl("thread.tcache.enabled",
178       &malloc_disabled_tcache, &old_len,
179       &new_tcache, sizeof(new_tcache));
180   jemalloc_prefork();
181 }
182 
je_malloc_enable()183 void je_malloc_enable() {
184   jemalloc_postfork_parent();
185   if (malloc_disabled_tcache) {
186     je_mallctl("thread.tcache.enabled", NULL, NULL,
187         &malloc_disabled_tcache, sizeof(malloc_disabled_tcache));
188   }
189   pthread_mutex_unlock(&malloc_disabled_lock);
190 }
191