1 /**
2 * Copyright (C) 2019 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16 #define _GNU_SOURCE
17 #include <sys/types.h>
18 #include <sys/wait.h>
19 #include <sys/mman.h>
20 #include <stdlib.h>
21 #include <dlfcn.h>
22 #include <string.h>
23 #include <unistd.h>
24 #include <signal.h>
25 #include "memutils.h"
26
exit_handler(void)27 void exit_handler(void) {
28 size_t page_size = getpagesize();
29 for (int i = 0; i < s_mem_map_index; i++) {
30 if (NULL != s_mem_map[i].start_ptr) {
31 ENABLE_MEM_ACCESS(s_mem_map[i].start_ptr,
32 (s_mem_map[i].num_pages * page_size));
33 }
34 }
35 #ifdef CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE
36 for (int i = 0; i < MAX_ENTRIES; i++) {
37 if (NULL != s_free_list[i].start_ptr) {
38 ENABLE_MEM_ACCESS(s_free_list[i].start_ptr,
39 (s_free_list[i].num_pages * page_size));
40 real_free(s_free_list[i].start_ptr);
41 memset(&s_free_list[i], 0, sizeof(map_struct_t));
42 }
43 }
44 #endif /* CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE */
45 }
46
sigsegv_handler(int signum,siginfo_t * info,void * context)47 void sigsegv_handler(int signum, siginfo_t *info, void* context) {
48 exit_handler();
49 (*old_sa.sa_sigaction)(signum, info, context);
50 }
51
sighandler_init(void)52 void sighandler_init(void) {
53 sigemptyset(&new_sa.sa_mask);
54 new_sa.sa_flags = SA_SIGINFO;
55 new_sa.sa_sigaction = sigsegv_handler;
56 sigaction(SIGSEGV, &new_sa, &old_sa);
57 }
58
memutils_init(void)59 void memutils_init(void) {
60 real_memalign = dlsym(RTLD_NEXT, "memalign");
61 if (NULL == real_memalign) {
62 return;
63 }
64 #ifndef DISABLE_MALLOC_OVERLOADING
65 real_calloc = dlsym(RTLD_NEXT, "calloc");
66 if (NULL == real_calloc) {
67 return;
68 }
69 real_malloc = dlsym(RTLD_NEXT, "malloc");
70 if (NULL == real_malloc) {
71 return;
72 }
73 real_realloc = dlsym(RTLD_NEXT, "realloc");
74 if (NULL == real_realloc) {
75 return;
76 }
77 #endif /* DISABLE_MALLOC_OVERLOADING */
78 real_free = dlsym(RTLD_NEXT, "free");
79 if (NULL == real_free) {
80 return;
81 }
82 memset(&s_mem_map, 0, MAX_ENTRIES * sizeof(map_struct_t));
83 sighandler_init();
84 atexit(exit_handler);
85 s_memutils_initialized = 1;
86 }
87
memalign(size_t alignment,size_t size)88 void *memalign(size_t alignment, size_t size) {
89 if (s_memutils_initialized == 0) {
90 memutils_init();
91 }
92 #ifdef ENABLE_SELECTIVE_OVERLOADING
93 if ((enable_selective_overload & ENABLE_MEMALIGN_CHECK) != ENABLE_MEMALIGN_CHECK) {
94 return real_memalign(alignment, size);
95 }
96 #endif /* ENABLE_SELECTIVE_OVERLOADING */
97 char* start_ptr;
98 char* mem_ptr;
99 size_t total_size;
100 size_t aligned_size = size;
101 size_t num_pages;
102 size_t page_size = getpagesize();
103
104 if (s_mem_map_index == MAX_ENTRIES) {
105 return real_memalign(alignment, size);
106 }
107
108 if (alignment > page_size) {
109 return real_memalign(alignment, size);
110 }
111
112 if ((0 == page_size) || (0 == alignment) || (0 == size)) {
113 return real_memalign(alignment, size);
114 }
115 #ifdef CHECK_OVERFLOW
116 /* User specified alignment is not respected and is overridden by
117 * MINIMUM_ALIGNMENT. This is required to catch OOB read when read offset
118 * is less than user specified alignment. "MINIMUM_ALIGNMENT" helps to
119 * avoid bus errors due to non-aligned memory. */
120 if (0 != (size % MINIMUM_ALIGNMENT)) {
121 aligned_size = size + (MINIMUM_ALIGNMENT - (size % MINIMUM_ALIGNMENT));
122 }
123 #endif
124
125 if (0 != (aligned_size % page_size)) {
126 num_pages = (aligned_size / page_size) + 2;
127 } else {
128 num_pages = (aligned_size / page_size) + 1;
129 }
130
131 total_size = (num_pages * page_size);
132 start_ptr = (char *) real_memalign(page_size, total_size);
133 #ifdef CHECK_OVERFLOW
134 mem_ptr = (char *) start_ptr + ((num_pages - 1) * page_size) - aligned_size;
135 DISABLE_MEM_ACCESS((start_ptr + ((num_pages - 1) * page_size)), page_size);
136 #endif /* CHECK_OVERFLOW */
137 #ifdef CHECK_UNDERFLOW
138 mem_ptr = (char *) start_ptr + page_size;
139 DISABLE_MEM_ACCESS(start_ptr, page_size);
140 #endif /* CHECK_UNDERFLOW */
141 s_mem_map[s_mem_map_index].start_ptr = start_ptr;
142 s_mem_map[s_mem_map_index].mem_ptr = mem_ptr;
143 s_mem_map[s_mem_map_index].num_pages = num_pages;
144 s_mem_map[s_mem_map_index].mem_size = size;
145 s_mem_map_index++;
146 memset(mem_ptr, INITIAL_VAL, size);
147 return mem_ptr;
148 }
149
150 #ifndef DISABLE_MALLOC_OVERLOADING
malloc(size_t size)151 void *malloc(size_t size) {
152 if (s_memutils_initialized == 0) {
153 memutils_init();
154 }
155 #ifdef ENABLE_SELECTIVE_OVERLOADING
156 if ((enable_selective_overload & ENABLE_MALLOC_CHECK) != ENABLE_MALLOC_CHECK) {
157 return real_malloc(size);
158 }
159 #endif /* ENABLE_SELECTIVE_OVERLOADING */
160 return memalign(MINIMUM_ALIGNMENT, size);
161 }
162
calloc(size_t nitems,size_t size)163 void *calloc(size_t nitems, size_t size) {
164 if (s_memutils_initialized == 0) {
165 memutils_init();
166 }
167 #ifdef ENABLE_SELECTIVE_OVERLOADING
168 if ((enable_selective_overload & ENABLE_CALLOC_CHECK) != ENABLE_CALLOC_CHECK) {
169 return real_calloc(nitems, size);
170 }
171 #endif /* ENABLE_SELECTIVE_OVERLOADING */
172 void *ptr = memalign(sizeof(size_t), (nitems * size));
173 if (ptr)
174 memset(ptr, 0, (nitems * size));
175 return ptr;
176 }
177
realloc(void * ptr,size_t size)178 void *realloc(void *ptr, size_t size) {
179 if (s_memutils_initialized == 0) {
180 memutils_init();
181 }
182 #ifdef ENABLE_SELECTIVE_OVERLOADING
183 if ((enable_selective_overload & ENABLE_REALLOC_CHECK) != ENABLE_REALLOC_CHECK) {
184 return real_realloc(ptr, size);
185 }
186 #endif /* ENABLE_SELECTIVE_OVERLOADING */
187 if (ptr != NULL) {
188 int i = 0;
189 for (i = 0; i < s_mem_map_index; i++) {
190 if (ptr == s_mem_map[i].mem_ptr) {
191 void* temp = malloc(size);
192 if (temp == NULL) {
193 return NULL;
194 }
195 if (s_mem_map[i].mem_size > size) {
196 memcpy(temp, ptr, size);
197 } else {
198 memcpy(temp, ptr, s_mem_map[i].mem_size);
199 }
200 free(s_mem_map[i].mem_ptr);
201 return temp;
202 }
203 }
204 }
205 return real_realloc(ptr, size);
206 }
207 #endif /* DISABLE_MALLOC_OVERLOADING */
208
free(void * ptr)209 void free(void *ptr) {
210 if (s_memutils_initialized == 0) {
211 memutils_init();
212 }
213 #ifdef ENABLE_SELECTIVE_OVERLOADING
214 if ((enable_selective_overload & ENABLE_FREE_CHECK) != ENABLE_FREE_CHECK) {
215 return real_free(ptr);
216 }
217 #endif /* ENABLE_SELECTIVE_OVERLOADING */
218 if (ptr != NULL) {
219 int i = 0;
220 size_t page_size = getpagesize();
221 for (i = 0; i < s_mem_map_index; i++) {
222 if (ptr == s_mem_map[i].mem_ptr) {
223 #ifdef CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE
224 s_free_list[s_free_write_index].start_ptr =
225 s_mem_map[i].start_ptr;
226 s_free_list[s_free_write_index].mem_ptr = s_mem_map[i].mem_ptr;
227 s_free_list[s_free_write_index].num_pages =
228 s_mem_map[i].num_pages;
229 s_free_list[s_free_write_index].mem_size = s_mem_map[i].mem_size;
230 s_free_write_index++;
231 s_free_list_size += s_mem_map[i].mem_size;
232 DISABLE_MEM_ACCESS(s_mem_map[i].start_ptr,
233 (s_mem_map[i].num_pages * page_size));
234 memset(&s_mem_map[i], 0, sizeof(map_struct_t));
235 while (s_free_list_size > CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE) {
236 ENABLE_MEM_ACCESS(
237 s_free_list[s_free_read_index].start_ptr,
238 (s_free_list[s_free_read_index].num_pages * page_size));
239 real_free(s_free_list[s_free_read_index].start_ptr);
240 s_free_list_size -= s_free_list[s_free_read_index].mem_size;
241 memset(&s_free_list[s_free_read_index], 0,
242 sizeof(map_struct_t));
243 s_free_read_index++;
244 if ((s_free_read_index == MAX_ENTRIES)
245 || (s_free_read_index >= s_free_write_index)) {
246 break;
247 }
248 }
249 return;
250 #else
251 ENABLE_MEM_ACCESS(s_mem_map[i].start_ptr,
252 (s_mem_map[i].num_pages * page_size));
253 real_free(s_mem_map[i].start_ptr);
254 memset(&s_mem_map[i], 0, sizeof(map_struct_t));
255 return;
256 #endif /* CHECK_USE_AFTER_FREE_WITH_WINDOW_SIZE */
257 }
258 }
259 }
260 real_free(ptr);
261 return;
262 }
263