1 /*
2  * Copyright (C) 2013 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <gtest/gtest.h>
18 
19 #include <elf.h>
20 #include <limits.h>
21 #include <malloc.h>
22 #include <pthread.h>
23 #include <semaphore.h>
24 #include <signal.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <sys/auxv.h>
30 #include <sys/cdefs.h>
31 #include <sys/prctl.h>
32 #include <sys/types.h>
33 #include <sys/wait.h>
34 #include <unistd.h>
35 
36 #include <algorithm>
37 #include <atomic>
38 #include <functional>
39 #include <string>
40 #include <thread>
41 #include <unordered_map>
42 #include <utility>
43 #include <vector>
44 
45 #include <tinyxml2.h>
46 
47 #include <android-base/file.h>
48 #include <android-base/test_utils.h>
49 
50 #include "utils.h"
51 
52 #if defined(__BIONIC__)
53 
54 #include "SignalUtils.h"
55 #include "dlext_private.h"
56 
57 #include "platform/bionic/malloc.h"
58 #include "platform/bionic/mte.h"
59 #include "platform/bionic/reserved_signals.h"
60 #include "private/bionic_config.h"
61 
62 #define HAVE_REALLOCARRAY 1
63 
64 #elif defined(__GLIBC__)
65 
66 #define HAVE_REALLOCARRAY __GLIBC_PREREQ(2, 26)
67 
68 #elif defined(ANDROID_HOST_MUSL)
69 
70 #define HAVE_REALLOCARRAY 1
71 
72 #endif
73 
TEST(malloc,malloc_std)74 TEST(malloc, malloc_std) {
75   // Simple malloc test.
76   void *ptr = malloc(100);
77   ASSERT_TRUE(ptr != nullptr);
78   ASSERT_LE(100U, malloc_usable_size(ptr));
79   free(ptr);
80 }
81 
TEST(malloc,malloc_overflow)82 TEST(malloc, malloc_overflow) {
83   SKIP_WITH_HWASAN;
84   errno = 0;
85   ASSERT_EQ(nullptr, malloc(SIZE_MAX));
86   ASSERT_ERRNO(ENOMEM);
87 }
88 
TEST(malloc,calloc_std)89 TEST(malloc, calloc_std) {
90   // Simple calloc test.
91   size_t alloc_len = 100;
92   char *ptr = (char *)calloc(1, alloc_len);
93   ASSERT_TRUE(ptr != nullptr);
94   ASSERT_LE(alloc_len, malloc_usable_size(ptr));
95   for (size_t i = 0; i < alloc_len; i++) {
96     ASSERT_EQ(0, ptr[i]);
97   }
98   free(ptr);
99 }
100 
TEST(malloc,calloc_mem_init_disabled)101 TEST(malloc, calloc_mem_init_disabled) {
102 #if defined(__BIONIC__)
103   // calloc should still zero memory if mem-init is disabled.
104   // With jemalloc the mallopts will fail but that shouldn't affect the
105   // execution of the test.
106   mallopt(M_THREAD_DISABLE_MEM_INIT, 1);
107   size_t alloc_len = 100;
108   char *ptr = reinterpret_cast<char*>(calloc(1, alloc_len));
109   for (size_t i = 0; i < alloc_len; i++) {
110     ASSERT_EQ(0, ptr[i]);
111   }
112   free(ptr);
113   mallopt(M_THREAD_DISABLE_MEM_INIT, 0);
114 #else
115   GTEST_SKIP() << "bionic-only test";
116 #endif
117 }
118 
TEST(malloc,calloc_illegal)119 TEST(malloc, calloc_illegal) {
120   SKIP_WITH_HWASAN;
121   errno = 0;
122   ASSERT_EQ(nullptr, calloc(-1, 100));
123   ASSERT_ERRNO(ENOMEM);
124 }
125 
TEST(malloc,calloc_overflow)126 TEST(malloc, calloc_overflow) {
127   SKIP_WITH_HWASAN;
128   errno = 0;
129   ASSERT_EQ(nullptr, calloc(1, SIZE_MAX));
130   ASSERT_ERRNO(ENOMEM);
131   errno = 0;
132   ASSERT_EQ(nullptr, calloc(SIZE_MAX, SIZE_MAX));
133   ASSERT_ERRNO(ENOMEM);
134   errno = 0;
135   ASSERT_EQ(nullptr, calloc(2, SIZE_MAX));
136   ASSERT_ERRNO(ENOMEM);
137   errno = 0;
138   ASSERT_EQ(nullptr, calloc(SIZE_MAX, 2));
139   ASSERT_ERRNO(ENOMEM);
140 }
141 
TEST(malloc,memalign_multiple)142 TEST(malloc, memalign_multiple) {
143   SKIP_WITH_HWASAN << "hwasan requires power of 2 alignment";
144   // Memalign test where the alignment is any value.
145   for (size_t i = 0; i <= 12; i++) {
146     for (size_t alignment = 1 << i; alignment < (1U << (i+1)); alignment++) {
147       char *ptr = reinterpret_cast<char*>(memalign(alignment, 100));
148       ASSERT_TRUE(ptr != nullptr) << "Failed at alignment " << alignment;
149       ASSERT_LE(100U, malloc_usable_size(ptr)) << "Failed at alignment " << alignment;
150       ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptr) % ((1U << i)))
151           << "Failed at alignment " << alignment;
152       free(ptr);
153     }
154   }
155 }
156 
TEST(malloc,memalign_overflow)157 TEST(malloc, memalign_overflow) {
158   SKIP_WITH_HWASAN;
159   ASSERT_EQ(nullptr, memalign(4096, SIZE_MAX));
160 }
161 
TEST(malloc,memalign_non_power2)162 TEST(malloc, memalign_non_power2) {
163   SKIP_WITH_HWASAN;
164   void* ptr;
165   for (size_t align = 0; align <= 256; align++) {
166     ptr = memalign(align, 1024);
167     ASSERT_TRUE(ptr != nullptr) << "Failed at align " << align;
168     free(ptr);
169   }
170 }
171 
TEST(malloc,memalign_realloc)172 TEST(malloc, memalign_realloc) {
173   // Memalign and then realloc the pointer a couple of times.
174   for (size_t alignment = 1; alignment <= 4096; alignment <<= 1) {
175     char *ptr = (char*)memalign(alignment, 100);
176     ASSERT_TRUE(ptr != nullptr);
177     ASSERT_LE(100U, malloc_usable_size(ptr));
178     ASSERT_EQ(0U, (intptr_t)ptr % alignment);
179     memset(ptr, 0x23, 100);
180 
181     ptr = (char*)realloc(ptr, 200);
182     ASSERT_TRUE(ptr != nullptr);
183     ASSERT_LE(200U, malloc_usable_size(ptr));
184     ASSERT_TRUE(ptr != nullptr);
185     for (size_t i = 0; i < 100; i++) {
186       ASSERT_EQ(0x23, ptr[i]);
187     }
188     memset(ptr, 0x45, 200);
189 
190     ptr = (char*)realloc(ptr, 300);
191     ASSERT_TRUE(ptr != nullptr);
192     ASSERT_LE(300U, malloc_usable_size(ptr));
193     for (size_t i = 0; i < 200; i++) {
194       ASSERT_EQ(0x45, ptr[i]);
195     }
196     memset(ptr, 0x67, 300);
197 
198     ptr = (char*)realloc(ptr, 250);
199     ASSERT_TRUE(ptr != nullptr);
200     ASSERT_LE(250U, malloc_usable_size(ptr));
201     for (size_t i = 0; i < 250; i++) {
202       ASSERT_EQ(0x67, ptr[i]);
203     }
204     free(ptr);
205   }
206 }
207 
TEST(malloc,malloc_realloc_larger)208 TEST(malloc, malloc_realloc_larger) {
209   // Realloc to a larger size, malloc is used for the original allocation.
210   char *ptr = (char *)malloc(100);
211   ASSERT_TRUE(ptr != nullptr);
212   ASSERT_LE(100U, malloc_usable_size(ptr));
213   memset(ptr, 67, 100);
214 
215   ptr = (char *)realloc(ptr, 200);
216   ASSERT_TRUE(ptr != nullptr);
217   ASSERT_LE(200U, malloc_usable_size(ptr));
218   for (size_t i = 0; i < 100; i++) {
219     ASSERT_EQ(67, ptr[i]);
220   }
221   free(ptr);
222 }
223 
TEST(malloc,malloc_realloc_smaller)224 TEST(malloc, malloc_realloc_smaller) {
225   // Realloc to a smaller size, malloc is used for the original allocation.
226   char *ptr = (char *)malloc(200);
227   ASSERT_TRUE(ptr != nullptr);
228   ASSERT_LE(200U, malloc_usable_size(ptr));
229   memset(ptr, 67, 200);
230 
231   ptr = (char *)realloc(ptr, 100);
232   ASSERT_TRUE(ptr != nullptr);
233   ASSERT_LE(100U, malloc_usable_size(ptr));
234   for (size_t i = 0; i < 100; i++) {
235     ASSERT_EQ(67, ptr[i]);
236   }
237   free(ptr);
238 }
239 
TEST(malloc,malloc_multiple_realloc)240 TEST(malloc, malloc_multiple_realloc) {
241   // Multiple reallocs, malloc is used for the original allocation.
242   char *ptr = (char *)malloc(200);
243   ASSERT_TRUE(ptr != nullptr);
244   ASSERT_LE(200U, malloc_usable_size(ptr));
245   memset(ptr, 0x23, 200);
246 
247   ptr = (char *)realloc(ptr, 100);
248   ASSERT_TRUE(ptr != nullptr);
249   ASSERT_LE(100U, malloc_usable_size(ptr));
250   for (size_t i = 0; i < 100; i++) {
251     ASSERT_EQ(0x23, ptr[i]);
252   }
253 
254   ptr = (char*)realloc(ptr, 50);
255   ASSERT_TRUE(ptr != nullptr);
256   ASSERT_LE(50U, malloc_usable_size(ptr));
257   for (size_t i = 0; i < 50; i++) {
258     ASSERT_EQ(0x23, ptr[i]);
259   }
260 
261   ptr = (char*)realloc(ptr, 150);
262   ASSERT_TRUE(ptr != nullptr);
263   ASSERT_LE(150U, malloc_usable_size(ptr));
264   for (size_t i = 0; i < 50; i++) {
265     ASSERT_EQ(0x23, ptr[i]);
266   }
267   memset(ptr, 0x23, 150);
268 
269   ptr = (char*)realloc(ptr, 425);
270   ASSERT_TRUE(ptr != nullptr);
271   ASSERT_LE(425U, malloc_usable_size(ptr));
272   for (size_t i = 0; i < 150; i++) {
273     ASSERT_EQ(0x23, ptr[i]);
274   }
275   free(ptr);
276 }
277 
TEST(malloc,calloc_realloc_larger)278 TEST(malloc, calloc_realloc_larger) {
279   // Realloc to a larger size, calloc is used for the original allocation.
280   char *ptr = (char *)calloc(1, 100);
281   ASSERT_TRUE(ptr != nullptr);
282   ASSERT_LE(100U, malloc_usable_size(ptr));
283 
284   ptr = (char *)realloc(ptr, 200);
285   ASSERT_TRUE(ptr != nullptr);
286   ASSERT_LE(200U, malloc_usable_size(ptr));
287   for (size_t i = 0; i < 100; i++) {
288     ASSERT_EQ(0, ptr[i]);
289   }
290   free(ptr);
291 }
292 
TEST(malloc,calloc_realloc_smaller)293 TEST(malloc, calloc_realloc_smaller) {
294   // Realloc to a smaller size, calloc is used for the original allocation.
295   char *ptr = (char *)calloc(1, 200);
296   ASSERT_TRUE(ptr != nullptr);
297   ASSERT_LE(200U, malloc_usable_size(ptr));
298 
299   ptr = (char *)realloc(ptr, 100);
300   ASSERT_TRUE(ptr != nullptr);
301   ASSERT_LE(100U, malloc_usable_size(ptr));
302   for (size_t i = 0; i < 100; i++) {
303     ASSERT_EQ(0, ptr[i]);
304   }
305   free(ptr);
306 }
307 
TEST(malloc,calloc_multiple_realloc)308 TEST(malloc, calloc_multiple_realloc) {
309   // Multiple reallocs, calloc is used for the original allocation.
310   char *ptr = (char *)calloc(1, 200);
311   ASSERT_TRUE(ptr != nullptr);
312   ASSERT_LE(200U, malloc_usable_size(ptr));
313 
314   ptr = (char *)realloc(ptr, 100);
315   ASSERT_TRUE(ptr != nullptr);
316   ASSERT_LE(100U, malloc_usable_size(ptr));
317   for (size_t i = 0; i < 100; i++) {
318     ASSERT_EQ(0, ptr[i]);
319   }
320 
321   ptr = (char*)realloc(ptr, 50);
322   ASSERT_TRUE(ptr != nullptr);
323   ASSERT_LE(50U, malloc_usable_size(ptr));
324   for (size_t i = 0; i < 50; i++) {
325     ASSERT_EQ(0, ptr[i]);
326   }
327 
328   ptr = (char*)realloc(ptr, 150);
329   ASSERT_TRUE(ptr != nullptr);
330   ASSERT_LE(150U, malloc_usable_size(ptr));
331   for (size_t i = 0; i < 50; i++) {
332     ASSERT_EQ(0, ptr[i]);
333   }
334   memset(ptr, 0, 150);
335 
336   ptr = (char*)realloc(ptr, 425);
337   ASSERT_TRUE(ptr != nullptr);
338   ASSERT_LE(425U, malloc_usable_size(ptr));
339   for (size_t i = 0; i < 150; i++) {
340     ASSERT_EQ(0, ptr[i]);
341   }
342   free(ptr);
343 }
344 
TEST(malloc,realloc_overflow)345 TEST(malloc, realloc_overflow) {
346   SKIP_WITH_HWASAN;
347   errno = 0;
348   ASSERT_EQ(nullptr, realloc(nullptr, SIZE_MAX));
349   ASSERT_ERRNO(ENOMEM);
350   void* ptr = malloc(100);
351   ASSERT_TRUE(ptr != nullptr);
352   errno = 0;
353   ASSERT_EQ(nullptr, realloc(ptr, SIZE_MAX));
354   ASSERT_ERRNO(ENOMEM);
355   free(ptr);
356 }
357 
358 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
359 extern "C" void* pvalloc(size_t);
360 extern "C" void* valloc(size_t);
361 #endif
362 
TEST(malloc,pvalloc_std)363 TEST(malloc, pvalloc_std) {
364 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
365   size_t pagesize = sysconf(_SC_PAGESIZE);
366   void* ptr = pvalloc(100);
367   ASSERT_TRUE(ptr != nullptr);
368   ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
369   ASSERT_LE(pagesize, malloc_usable_size(ptr));
370   free(ptr);
371 #else
372   GTEST_SKIP() << "pvalloc not supported.";
373 #endif
374 }
375 
TEST(malloc,pvalloc_overflow)376 TEST(malloc, pvalloc_overflow) {
377 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
378   ASSERT_EQ(nullptr, pvalloc(SIZE_MAX));
379 #else
380   GTEST_SKIP() << "pvalloc not supported.";
381 #endif
382 }
383 
TEST(malloc,valloc_std)384 TEST(malloc, valloc_std) {
385 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
386   size_t pagesize = sysconf(_SC_PAGESIZE);
387   void* ptr = valloc(100);
388   ASSERT_TRUE(ptr != nullptr);
389   ASSERT_TRUE((reinterpret_cast<uintptr_t>(ptr) & (pagesize-1)) == 0);
390   free(ptr);
391 #else
392   GTEST_SKIP() << "valloc not supported.";
393 #endif
394 }
395 
TEST(malloc,valloc_overflow)396 TEST(malloc, valloc_overflow) {
397 #if defined(HAVE_DEPRECATED_MALLOC_FUNCS)
398   ASSERT_EQ(nullptr, valloc(SIZE_MAX));
399 #else
400   GTEST_SKIP() << "valloc not supported.";
401 #endif
402 }
403 
TEST(malloc,malloc_info)404 TEST(malloc, malloc_info) {
405 #ifdef __BIONIC__
406   SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
407 
408   TemporaryFile tf;
409   ASSERT_TRUE(tf.fd != -1);
410   FILE* fp = fdopen(tf.fd, "w+");
411   tf.release();
412   ASSERT_TRUE(fp != nullptr);
413   ASSERT_EQ(0, malloc_info(0, fp));
414   ASSERT_EQ(0, fclose(fp));
415 
416   std::string contents;
417   ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
418 
419   tinyxml2::XMLDocument doc;
420   ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
421 
422   auto root = doc.FirstChildElement();
423   ASSERT_NE(nullptr, root);
424   ASSERT_STREQ("malloc", root->Name());
425   std::string version(root->Attribute("version"));
426   if (version == "jemalloc-1") {
427     auto arena = root->FirstChildElement();
428     for (; arena != nullptr; arena = arena->NextSiblingElement()) {
429       int val;
430 
431       ASSERT_STREQ("heap", arena->Name());
432       ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
433       ASSERT_EQ(tinyxml2::XML_SUCCESS,
434                 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
435       ASSERT_EQ(tinyxml2::XML_SUCCESS,
436                 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
437       ASSERT_EQ(tinyxml2::XML_SUCCESS,
438                 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
439       ASSERT_EQ(tinyxml2::XML_SUCCESS,
440                 arena->FirstChildElement("bins-total")->QueryIntText(&val));
441 
442       auto bin = arena->FirstChildElement("bin");
443       for (; bin != nullptr; bin = bin ->NextSiblingElement()) {
444         if (strcmp(bin->Name(), "bin") == 0) {
445           ASSERT_EQ(tinyxml2::XML_SUCCESS, bin->QueryIntAttribute("nr", &val));
446           ASSERT_EQ(tinyxml2::XML_SUCCESS,
447                     bin->FirstChildElement("allocated")->QueryIntText(&val));
448           ASSERT_EQ(tinyxml2::XML_SUCCESS,
449                     bin->FirstChildElement("nmalloc")->QueryIntText(&val));
450           ASSERT_EQ(tinyxml2::XML_SUCCESS,
451                     bin->FirstChildElement("ndalloc")->QueryIntText(&val));
452         }
453       }
454     }
455   } else if (version == "scudo-1") {
456     auto element = root->FirstChildElement();
457     for (; element != nullptr; element = element->NextSiblingElement()) {
458       int val;
459 
460       ASSERT_STREQ("alloc", element->Name());
461       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &val));
462       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &val));
463     }
464   } else {
465     // Do not verify output for debug malloc.
466     ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
467   }
468 #endif
469 }
470 
TEST(malloc,malloc_info_matches_mallinfo)471 TEST(malloc, malloc_info_matches_mallinfo) {
472 #ifdef __BIONIC__
473   SKIP_WITH_HWASAN; // hwasan does not implement malloc_info
474 
475   TemporaryFile tf;
476   ASSERT_TRUE(tf.fd != -1);
477   FILE* fp = fdopen(tf.fd, "w+");
478   tf.release();
479   ASSERT_TRUE(fp != nullptr);
480   size_t mallinfo_before_allocated_bytes = mallinfo().uordblks;
481   ASSERT_EQ(0, malloc_info(0, fp));
482   size_t mallinfo_after_allocated_bytes = mallinfo().uordblks;
483   ASSERT_EQ(0, fclose(fp));
484 
485   std::string contents;
486   ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
487 
488   tinyxml2::XMLDocument doc;
489   ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
490 
491   size_t total_allocated_bytes = 0;
492   auto root = doc.FirstChildElement();
493   ASSERT_NE(nullptr, root);
494   ASSERT_STREQ("malloc", root->Name());
495   std::string version(root->Attribute("version"));
496   if (version == "jemalloc-1") {
497     auto arena = root->FirstChildElement();
498     for (; arena != nullptr; arena = arena->NextSiblingElement()) {
499       int val;
500 
501       ASSERT_STREQ("heap", arena->Name());
502       ASSERT_EQ(tinyxml2::XML_SUCCESS, arena->QueryIntAttribute("nr", &val));
503       ASSERT_EQ(tinyxml2::XML_SUCCESS,
504                 arena->FirstChildElement("allocated-large")->QueryIntText(&val));
505       total_allocated_bytes += val;
506       ASSERT_EQ(tinyxml2::XML_SUCCESS,
507                 arena->FirstChildElement("allocated-huge")->QueryIntText(&val));
508       total_allocated_bytes += val;
509       ASSERT_EQ(tinyxml2::XML_SUCCESS,
510                 arena->FirstChildElement("allocated-bins")->QueryIntText(&val));
511       total_allocated_bytes += val;
512       ASSERT_EQ(tinyxml2::XML_SUCCESS,
513                 arena->FirstChildElement("bins-total")->QueryIntText(&val));
514     }
515     // The total needs to be between the mallinfo call before and after
516     // since malloc_info allocates some memory.
517     EXPECT_LE(mallinfo_before_allocated_bytes, total_allocated_bytes);
518     EXPECT_GE(mallinfo_after_allocated_bytes, total_allocated_bytes);
519   } else if (version == "scudo-1") {
520     auto element = root->FirstChildElement();
521     for (; element != nullptr; element = element->NextSiblingElement()) {
522       ASSERT_STREQ("alloc", element->Name());
523       int size;
524       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("size", &size));
525       int count;
526       ASSERT_EQ(tinyxml2::XML_SUCCESS, element->QueryIntAttribute("count", &count));
527       total_allocated_bytes += size * count;
528     }
529     // Scudo only gives the information on the primary, so simply make
530     // sure that the value is non-zero.
531     EXPECT_NE(0U, total_allocated_bytes);
532   } else {
533     // Do not verify output for debug malloc.
534     ASSERT_TRUE(version == "debug-malloc-1") << "Unknown version: " << version;
535   }
536 #endif
537 }
538 
TEST(malloc,calloc_usable_size)539 TEST(malloc, calloc_usable_size) {
540   for (size_t size = 1; size <= 2048; size++) {
541     void* pointer = malloc(size);
542     ASSERT_TRUE(pointer != nullptr);
543     memset(pointer, 0xeb, malloc_usable_size(pointer));
544     free(pointer);
545 
546     // We should get a previous pointer that has been set to non-zero.
547     // If calloc does not zero out all of the data, this will fail.
548     uint8_t* zero_mem = reinterpret_cast<uint8_t*>(calloc(1, size));
549     ASSERT_TRUE(pointer != nullptr);
550     size_t usable_size = malloc_usable_size(zero_mem);
551     for (size_t i = 0; i < usable_size; i++) {
552       ASSERT_EQ(0, zero_mem[i]) << "Failed at allocation size " << size << " at byte " << i;
553     }
554     free(zero_mem);
555   }
556 }
557 
TEST(malloc,malloc_0)558 TEST(malloc, malloc_0) {
559   void* p = malloc(0);
560   ASSERT_TRUE(p != nullptr);
561   free(p);
562 }
563 
TEST(malloc,calloc_0_0)564 TEST(malloc, calloc_0_0) {
565   void* p = calloc(0, 0);
566   ASSERT_TRUE(p != nullptr);
567   free(p);
568 }
569 
TEST(malloc,calloc_0_1)570 TEST(malloc, calloc_0_1) {
571   void* p = calloc(0, 1);
572   ASSERT_TRUE(p != nullptr);
573   free(p);
574 }
575 
TEST(malloc,calloc_1_0)576 TEST(malloc, calloc_1_0) {
577   void* p = calloc(1, 0);
578   ASSERT_TRUE(p != nullptr);
579   free(p);
580 }
581 
TEST(malloc,realloc_nullptr_0)582 TEST(malloc, realloc_nullptr_0) {
583   // realloc(nullptr, size) is actually malloc(size).
584   void* p = realloc(nullptr, 0);
585   ASSERT_TRUE(p != nullptr);
586   free(p);
587 }
588 
TEST(malloc,realloc_0)589 TEST(malloc, realloc_0) {
590   void* p = malloc(1024);
591   ASSERT_TRUE(p != nullptr);
592   // realloc(p, 0) is actually free(p).
593   void* p2 = realloc(p, 0);
594   ASSERT_TRUE(p2 == nullptr);
595 }
596 
597 constexpr size_t MAX_LOOPS = 200;
598 
599 // Make sure that memory returned by malloc is aligned to allow these data types.
TEST(malloc,verify_alignment)600 TEST(malloc, verify_alignment) {
601   uint32_t** values_32 = new uint32_t*[MAX_LOOPS];
602   uint64_t** values_64 = new uint64_t*[MAX_LOOPS];
603   long double** values_ldouble = new long double*[MAX_LOOPS];
604   // Use filler to attempt to force the allocator to get potentially bad alignments.
605   void** filler = new void*[MAX_LOOPS];
606 
607   for (size_t i = 0; i < MAX_LOOPS; i++) {
608     // Check uint32_t pointers.
609     filler[i] = malloc(1);
610     ASSERT_TRUE(filler[i] != nullptr);
611 
612     values_32[i] = reinterpret_cast<uint32_t*>(malloc(sizeof(uint32_t)));
613     ASSERT_TRUE(values_32[i] != nullptr);
614     *values_32[i] = i;
615     ASSERT_EQ(*values_32[i], i);
616     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_32[i]) & (sizeof(uint32_t) - 1));
617 
618     free(filler[i]);
619   }
620 
621   for (size_t i = 0; i < MAX_LOOPS; i++) {
622     // Check uint64_t pointers.
623     filler[i] = malloc(1);
624     ASSERT_TRUE(filler[i] != nullptr);
625 
626     values_64[i] = reinterpret_cast<uint64_t*>(malloc(sizeof(uint64_t)));
627     ASSERT_TRUE(values_64[i] != nullptr);
628     *values_64[i] = 0x1000 + i;
629     ASSERT_EQ(*values_64[i], 0x1000 + i);
630     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_64[i]) & (sizeof(uint64_t) - 1));
631 
632     free(filler[i]);
633   }
634 
635   for (size_t i = 0; i < MAX_LOOPS; i++) {
636     // Check long double pointers.
637     filler[i] = malloc(1);
638     ASSERT_TRUE(filler[i] != nullptr);
639 
640     values_ldouble[i] = reinterpret_cast<long double*>(malloc(sizeof(long double)));
641     ASSERT_TRUE(values_ldouble[i] != nullptr);
642     *values_ldouble[i] = 5.5 + i;
643     ASSERT_DOUBLE_EQ(*values_ldouble[i], 5.5 + i);
644     // 32 bit glibc has a long double size of 12 bytes, so hardcode the
645     // required alignment to 0x7.
646 #if !defined(__BIONIC__) && !defined(__LP64__)
647     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & 0x7);
648 #else
649     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(values_ldouble[i]) & (sizeof(long double) - 1));
650 #endif
651 
652     free(filler[i]);
653   }
654 
655   for (size_t i = 0; i < MAX_LOOPS; i++) {
656     free(values_32[i]);
657     free(values_64[i]);
658     free(values_ldouble[i]);
659   }
660 
661   delete[] filler;
662   delete[] values_32;
663   delete[] values_64;
664   delete[] values_ldouble;
665 }
666 
TEST(malloc,mallopt_smoke)667 TEST(malloc, mallopt_smoke) {
668 #if defined(__BIONIC__)
669   errno = 0;
670   ASSERT_EQ(0, mallopt(-1000, 1));
671   // mallopt doesn't set errno.
672   ASSERT_ERRNO(0);
673 #else
674   GTEST_SKIP() << "bionic-only test";
675 #endif
676 }
677 
TEST(malloc,mallopt_decay)678 TEST(malloc, mallopt_decay) {
679 #if defined(__BIONIC__)
680   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
681   ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
682   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
683   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
684   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 1));
685   ASSERT_EQ(1, mallopt(M_DECAY_TIME, 0));
686   ASSERT_EQ(1, mallopt(M_DECAY_TIME, -1));
687 #else
688   GTEST_SKIP() << "bionic-only test";
689 #endif
690 }
691 
TEST(malloc,mallopt_purge)692 TEST(malloc, mallopt_purge) {
693 #if defined(__BIONIC__)
694   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
695   ASSERT_EQ(1, mallopt(M_PURGE, 0));
696 #else
697   GTEST_SKIP() << "bionic-only test";
698 #endif
699 }
700 
TEST(malloc,mallopt_purge_all)701 TEST(malloc, mallopt_purge_all) {
702 #if defined(__BIONIC__)
703   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
704   ASSERT_EQ(1, mallopt(M_PURGE_ALL, 0));
705 #else
706   GTEST_SKIP() << "bionic-only test";
707 #endif
708 }
709 
TEST(malloc,mallopt_log_stats)710 TEST(malloc, mallopt_log_stats) {
711 #if defined(__BIONIC__)
712   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
713   ASSERT_EQ(1, mallopt(M_LOG_STATS, 0));
714 #else
715   GTEST_SKIP() << "bionic-only test";
716 #endif
717 }
718 
719 // Verify that all of the mallopt values are unique.
TEST(malloc,mallopt_unique_params)720 TEST(malloc, mallopt_unique_params) {
721 #if defined(__BIONIC__)
722   std::vector<std::pair<int, std::string>> params{
723       std::make_pair(M_DECAY_TIME, "M_DECAY_TIME"),
724       std::make_pair(M_PURGE, "M_PURGE"),
725       std::make_pair(M_PURGE_ALL, "M_PURGE_ALL"),
726       std::make_pair(M_MEMTAG_TUNING, "M_MEMTAG_TUNING"),
727       std::make_pair(M_THREAD_DISABLE_MEM_INIT, "M_THREAD_DISABLE_MEM_INIT"),
728       std::make_pair(M_CACHE_COUNT_MAX, "M_CACHE_COUNT_MAX"),
729       std::make_pair(M_CACHE_SIZE_MAX, "M_CACHE_SIZE_MAX"),
730       std::make_pair(M_TSDS_COUNT_MAX, "M_TSDS_COUNT_MAX"),
731       std::make_pair(M_BIONIC_ZERO_INIT, "M_BIONIC_ZERO_INIT"),
732       std::make_pair(M_BIONIC_SET_HEAP_TAGGING_LEVEL, "M_BIONIC_SET_HEAP_TAGGING_LEVEL"),
733       std::make_pair(M_LOG_STATS, "M_LOG_STATS"),
734   };
735 
736   std::unordered_map<int, std::string> all_params;
737   for (const auto& param : params) {
738     EXPECT_TRUE(all_params.count(param.first) == 0)
739         << "mallopt params " << all_params[param.first] << " and " << param.second
740         << " have the same value " << param.first;
741     all_params.insert(param);
742   }
743 #else
744   GTEST_SKIP() << "bionic-only test";
745 #endif
746 }
747 
748 #if defined(__BIONIC__)
GetAllocatorVersion(bool * allocator_scudo)749 static void GetAllocatorVersion(bool* allocator_scudo) {
750   TemporaryFile tf;
751   ASSERT_TRUE(tf.fd != -1);
752   FILE* fp = fdopen(tf.fd, "w+");
753   tf.release();
754   ASSERT_TRUE(fp != nullptr);
755   if (malloc_info(0, fp) != 0) {
756     *allocator_scudo = false;
757     return;
758   }
759   ASSERT_EQ(0, fclose(fp));
760 
761   std::string contents;
762   ASSERT_TRUE(android::base::ReadFileToString(tf.path, &contents));
763 
764   tinyxml2::XMLDocument doc;
765   ASSERT_EQ(tinyxml2::XML_SUCCESS, doc.Parse(contents.c_str()));
766 
767   auto root = doc.FirstChildElement();
768   ASSERT_NE(nullptr, root);
769   ASSERT_STREQ("malloc", root->Name());
770   std::string version(root->Attribute("version"));
771   *allocator_scudo = (version == "scudo-1");
772 }
773 #endif
774 
TEST(malloc,mallopt_scudo_only_options)775 TEST(malloc, mallopt_scudo_only_options) {
776 #if defined(__BIONIC__)
777   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
778   bool allocator_scudo;
779   GetAllocatorVersion(&allocator_scudo);
780   if (!allocator_scudo) {
781     GTEST_SKIP() << "scudo allocator only test";
782   }
783   ASSERT_EQ(1, mallopt(M_CACHE_COUNT_MAX, 100));
784   ASSERT_EQ(1, mallopt(M_CACHE_SIZE_MAX, 1024 * 1024 * 2));
785   ASSERT_EQ(1, mallopt(M_TSDS_COUNT_MAX, 8));
786 #else
787   GTEST_SKIP() << "bionic-only test";
788 #endif
789 }
790 
TEST(malloc,reallocarray_overflow)791 TEST(malloc, reallocarray_overflow) {
792 #if HAVE_REALLOCARRAY
793   // Values that cause overflow to a result small enough (8 on LP64) that malloc would "succeed".
794   size_t a = static_cast<size_t>(INTPTR_MIN + 4);
795   size_t b = 2;
796 
797   errno = 0;
798   ASSERT_TRUE(reallocarray(nullptr, a, b) == nullptr);
799   ASSERT_ERRNO(ENOMEM);
800 
801   errno = 0;
802   ASSERT_TRUE(reallocarray(nullptr, b, a) == nullptr);
803   ASSERT_ERRNO(ENOMEM);
804 #else
805   GTEST_SKIP() << "reallocarray not available";
806 #endif
807 }
808 
TEST(malloc,reallocarray)809 TEST(malloc, reallocarray) {
810 #if HAVE_REALLOCARRAY
811   void* p = reallocarray(nullptr, 2, 32);
812   ASSERT_TRUE(p != nullptr);
813   ASSERT_GE(malloc_usable_size(p), 64U);
814 #else
815   GTEST_SKIP() << "reallocarray not available";
816 #endif
817 }
818 
TEST(malloc,mallinfo)819 TEST(malloc, mallinfo) {
820 #if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
821   SKIP_WITH_HWASAN << "hwasan does not implement mallinfo";
822   static size_t sizes[] = {
823     8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000
824   };
825 
826   static constexpr size_t kMaxAllocs = 50;
827 
828   for (size_t size : sizes) {
829     // If some of these allocations are stuck in a thread cache, then keep
830     // looping until we make an allocation that changes the total size of the
831     // memory allocated.
832     // jemalloc implementations counts the thread cache allocations against
833     // total memory allocated.
834     void* ptrs[kMaxAllocs] = {};
835     bool pass = false;
836     for (size_t i = 0; i < kMaxAllocs; i++) {
837       size_t allocated = mallinfo().uordblks;
838       ptrs[i] = malloc(size);
839       ASSERT_TRUE(ptrs[i] != nullptr);
840       size_t new_allocated = mallinfo().uordblks;
841       if (allocated != new_allocated) {
842         size_t usable_size = malloc_usable_size(ptrs[i]);
843         // Only check if the total got bigger by at least allocation size.
844         // Sometimes the mallinfo numbers can go backwards due to compaction
845         // and/or freeing of cached data.
846         if (new_allocated >= allocated + usable_size) {
847           pass = true;
848           break;
849         }
850       }
851     }
852     for (void* ptr : ptrs) {
853       free(ptr);
854     }
855     ASSERT_TRUE(pass)
856         << "For size " << size << " allocated bytes did not increase after "
857         << kMaxAllocs << " allocations.";
858   }
859 #else
860   GTEST_SKIP() << "glibc is broken";
861 #endif
862 }
863 
TEST(malloc,mallinfo2)864 TEST(malloc, mallinfo2) {
865 #if defined(__BIONIC__) || defined(ANDROID_HOST_MUSL)
866   SKIP_WITH_HWASAN << "hwasan does not implement mallinfo2";
867   static size_t sizes[] = {8, 32, 128, 4096, 32768, 131072, 1024000, 10240000, 20480000, 300000000};
868 
869   static constexpr size_t kMaxAllocs = 50;
870 
871   for (size_t size : sizes) {
872     // If some of these allocations are stuck in a thread cache, then keep
873     // looping until we make an allocation that changes the total size of the
874     // memory allocated.
875     // jemalloc implementations counts the thread cache allocations against
876     // total memory allocated.
877     void* ptrs[kMaxAllocs] = {};
878     bool pass = false;
879     for (size_t i = 0; i < kMaxAllocs; i++) {
880       struct mallinfo info = mallinfo();
881       struct mallinfo2 info2 = mallinfo2();
882       // Verify that mallinfo and mallinfo2 are exactly the same.
883       ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
884       ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
885       ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
886       ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
887       ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
888       ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
889       ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
890       ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
891       ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
892       ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
893 
894       size_t allocated = info2.uordblks;
895       ptrs[i] = malloc(size);
896       ASSERT_TRUE(ptrs[i] != nullptr);
897 
898       info = mallinfo();
899       info2 = mallinfo2();
900       // Verify that mallinfo and mallinfo2 are exactly the same.
901       ASSERT_EQ(static_cast<size_t>(info.arena), info2.arena);
902       ASSERT_EQ(static_cast<size_t>(info.ordblks), info2.ordblks);
903       ASSERT_EQ(static_cast<size_t>(info.smblks), info2.smblks);
904       ASSERT_EQ(static_cast<size_t>(info.hblks), info2.hblks);
905       ASSERT_EQ(static_cast<size_t>(info.hblkhd), info2.hblkhd);
906       ASSERT_EQ(static_cast<size_t>(info.usmblks), info2.usmblks);
907       ASSERT_EQ(static_cast<size_t>(info.fsmblks), info2.fsmblks);
908       ASSERT_EQ(static_cast<size_t>(info.uordblks), info2.uordblks);
909       ASSERT_EQ(static_cast<size_t>(info.fordblks), info2.fordblks);
910       ASSERT_EQ(static_cast<size_t>(info.keepcost), info2.keepcost);
911 
912       size_t new_allocated = info2.uordblks;
913       if (allocated != new_allocated) {
914         size_t usable_size = malloc_usable_size(ptrs[i]);
915         // Only check if the total got bigger by at least allocation size.
916         // Sometimes the mallinfo2 numbers can go backwards due to compaction
917         // and/or freeing of cached data.
918         if (new_allocated >= allocated + usable_size) {
919           pass = true;
920           break;
921         }
922       }
923     }
924     for (void* ptr : ptrs) {
925       free(ptr);
926     }
927     ASSERT_TRUE(pass) << "For size " << size << " allocated bytes did not increase after "
928                       << kMaxAllocs << " allocations.";
929   }
930 #else
931   GTEST_SKIP() << "glibc is broken";
932 #endif
933 }
934 
935 template <typename Type>
VerifyAlignment(Type * floating)936 void __attribute__((optnone)) VerifyAlignment(Type* floating) {
937   size_t expected_alignment = alignof(Type);
938   if (expected_alignment != 0) {
939     ASSERT_EQ(0U, (expected_alignment - 1) & reinterpret_cast<uintptr_t>(floating))
940         << "Expected alignment " << expected_alignment << " ptr value "
941         << static_cast<void*>(floating);
942   }
943 }
944 
945 template <typename Type>
TestAllocateType()946 void __attribute__((optnone)) TestAllocateType() {
947   // The number of allocations to do in a row. This is to attempt to
948   // expose the worst case alignment for native allocators that use
949   // bins.
950   static constexpr size_t kMaxConsecutiveAllocs = 100;
951 
952   // Verify using new directly.
953   Type* types[kMaxConsecutiveAllocs];
954   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
955     types[i] = new Type;
956     VerifyAlignment(types[i]);
957     if (::testing::Test::HasFatalFailure()) {
958       return;
959     }
960   }
961   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
962     delete types[i];
963   }
964 
965   // Verify using malloc.
966   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
967     types[i] = reinterpret_cast<Type*>(malloc(sizeof(Type)));
968     ASSERT_TRUE(types[i] != nullptr);
969     VerifyAlignment(types[i]);
970     if (::testing::Test::HasFatalFailure()) {
971       return;
972     }
973   }
974   for (size_t i = 0; i < kMaxConsecutiveAllocs; i++) {
975     free(types[i]);
976   }
977 
978   // Verify using a vector.
979   std::vector<Type> type_vector(kMaxConsecutiveAllocs);
980   for (size_t i = 0; i < type_vector.size(); i++) {
981     VerifyAlignment(&type_vector[i]);
982     if (::testing::Test::HasFatalFailure()) {
983       return;
984     }
985   }
986 }
987 
988 #if defined(__ANDROID__)
AndroidVerifyAlignment(size_t alloc_size,size_t aligned_bytes)989 static void __attribute__((optnone)) AndroidVerifyAlignment(size_t alloc_size, size_t aligned_bytes) {
990   void* ptrs[100];
991   uintptr_t mask = aligned_bytes - 1;
992   for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
993     ptrs[i] = malloc(alloc_size);
994     ASSERT_TRUE(ptrs[i] != nullptr);
995     ASSERT_EQ(0U, reinterpret_cast<uintptr_t>(ptrs[i]) & mask)
996         << "Expected at least " << aligned_bytes << " byte alignment: size "
997         << alloc_size << " actual ptr " << ptrs[i];
998   }
999 }
1000 #endif
1001 
AlignCheck()1002 void AlignCheck() {
1003   // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/summary.htm#dr_445
1004   // for a discussion of type alignment.
1005   ASSERT_NO_FATAL_FAILURE(TestAllocateType<float>());
1006   ASSERT_NO_FATAL_FAILURE(TestAllocateType<double>());
1007   ASSERT_NO_FATAL_FAILURE(TestAllocateType<long double>());
1008 
1009   ASSERT_NO_FATAL_FAILURE(TestAllocateType<char>());
1010   ASSERT_NO_FATAL_FAILURE(TestAllocateType<char16_t>());
1011   ASSERT_NO_FATAL_FAILURE(TestAllocateType<char32_t>());
1012   ASSERT_NO_FATAL_FAILURE(TestAllocateType<wchar_t>());
1013   ASSERT_NO_FATAL_FAILURE(TestAllocateType<signed char>());
1014   ASSERT_NO_FATAL_FAILURE(TestAllocateType<short int>());
1015   ASSERT_NO_FATAL_FAILURE(TestAllocateType<int>());
1016   ASSERT_NO_FATAL_FAILURE(TestAllocateType<long int>());
1017   ASSERT_NO_FATAL_FAILURE(TestAllocateType<long long int>());
1018   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned char>());
1019   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned short int>());
1020   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned int>());
1021   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long int>());
1022   ASSERT_NO_FATAL_FAILURE(TestAllocateType<unsigned long long int>());
1023 
1024 #if defined(__ANDROID__)
1025   // On Android, there is a lot of code that expects certain alignments:
1026   //  1. Allocations of a size that rounds up to a multiple of 16 bytes
1027   //     must have at least 16 byte alignment.
1028   //  2. Allocations of a size that rounds up to a multiple of 8 bytes and
1029   //     not 16 bytes, are only required to have at least 8 byte alignment.
1030   // In addition, on Android clang has been configured for 64 bit such that:
1031   //  3. Allocations <= 8 bytes must be aligned to at least 8 bytes.
1032   //  4. Allocations > 8 bytes must be aligned to at least 16 bytes.
1033   // For 32 bit environments, only the first two requirements must be met.
1034 
1035   // See http://www.open-std.org/jtc1/sc22/wg14/www/docs/n2293.htm for
1036   // a discussion of this alignment mess. The code below is enforcing
1037   // strong-alignment, since who knows what code depends on this behavior now.
1038   // As mentioned before, for 64 bit this will enforce the higher
1039   // requirement since clang expects this behavior on Android now.
1040   for (size_t i = 1; i <= 128; i++) {
1041 #if defined(__LP64__)
1042     if (i <= 8) {
1043       AndroidVerifyAlignment(i, 8);
1044     } else {
1045       AndroidVerifyAlignment(i, 16);
1046     }
1047 #else
1048     size_t rounded = (i + 7) & ~7;
1049     if ((rounded % 16) == 0) {
1050       AndroidVerifyAlignment(i, 16);
1051     } else {
1052       AndroidVerifyAlignment(i, 8);
1053     }
1054 #endif
1055     if (::testing::Test::HasFatalFailure()) {
1056       return;
1057     }
1058   }
1059 #endif
1060 }
1061 
TEST(malloc,align_check)1062 TEST(malloc, align_check) {
1063   AlignCheck();
1064 }
1065 
1066 // Jemalloc doesn't pass this test right now, so leave it as disabled.
TEST(malloc,DISABLED_alloc_after_fork)1067 TEST(malloc, DISABLED_alloc_after_fork) {
1068   // Both of these need to be a power of 2.
1069   static constexpr size_t kMinAllocationSize = 8;
1070   static constexpr size_t kMaxAllocationSize = 2097152;
1071 
1072   static constexpr size_t kNumAllocatingThreads = 5;
1073   static constexpr size_t kNumForkLoops = 100;
1074 
1075   std::atomic_bool stop;
1076 
1077   // Create threads that simply allocate and free different sizes.
1078   std::vector<std::thread*> threads;
1079   for (size_t i = 0; i < kNumAllocatingThreads; i++) {
1080     std::thread* t = new std::thread([&stop] {
1081       while (!stop) {
1082         for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
1083           void* ptr;
1084           DoNotOptimize(ptr = malloc(size));
1085           free(ptr);
1086         }
1087       }
1088     });
1089     threads.push_back(t);
1090   }
1091 
1092   // Create a thread to fork and allocate.
1093   for (size_t i = 0; i < kNumForkLoops; i++) {
1094     pid_t pid;
1095     if ((pid = fork()) == 0) {
1096       for (size_t size = kMinAllocationSize; size <= kMaxAllocationSize; size <<= 1) {
1097         void* ptr;
1098         DoNotOptimize(ptr = malloc(size));
1099         ASSERT_TRUE(ptr != nullptr);
1100         // Make sure we can touch all of the allocation.
1101         memset(ptr, 0x1, size);
1102         ASSERT_LE(size, malloc_usable_size(ptr));
1103         free(ptr);
1104       }
1105       _exit(10);
1106     }
1107     ASSERT_NE(-1, pid);
1108     AssertChildExited(pid, 10);
1109   }
1110 
1111   stop = true;
1112   for (auto thread : threads) {
1113     thread->join();
1114     delete thread;
1115   }
1116 }
1117 
TEST(android_mallopt,error_on_unexpected_option)1118 TEST(android_mallopt, error_on_unexpected_option) {
1119 #if defined(__BIONIC__)
1120   const int unrecognized_option = -1;
1121   errno = 0;
1122   EXPECT_EQ(false, android_mallopt(unrecognized_option, nullptr, 0));
1123   EXPECT_ERRNO(ENOTSUP);
1124 #else
1125   GTEST_SKIP() << "bionic-only test";
1126 #endif
1127 }
1128 
IsDynamic()1129 bool IsDynamic() {
1130 #if defined(__LP64__)
1131   Elf64_Ehdr ehdr;
1132 #else
1133   Elf32_Ehdr ehdr;
1134 #endif
1135   std::string path(android::base::GetExecutablePath());
1136 
1137   int fd = open(path.c_str(), O_RDONLY | O_CLOEXEC);
1138   if (fd == -1) {
1139     // Assume dynamic on error.
1140     return true;
1141   }
1142   bool read_completed = android::base::ReadFully(fd, &ehdr, sizeof(ehdr));
1143   close(fd);
1144   // Assume dynamic in error cases.
1145   return !read_completed || ehdr.e_type == ET_DYN;
1146 }
1147 
TEST(android_mallopt,init_zygote_child_profiling)1148 TEST(android_mallopt, init_zygote_child_profiling) {
1149 #if defined(__BIONIC__)
1150   // Successful call.
1151   errno = 0;
1152   if (IsDynamic()) {
1153     EXPECT_EQ(true, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1154     EXPECT_ERRNO(0);
1155   } else {
1156     // Not supported in static executables.
1157     EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1158     EXPECT_ERRNO(ENOTSUP);
1159   }
1160 
1161   // Unexpected arguments rejected.
1162   errno = 0;
1163   char unexpected = 0;
1164   EXPECT_EQ(false, android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, &unexpected, 1));
1165   if (IsDynamic()) {
1166     EXPECT_ERRNO(EINVAL);
1167   } else {
1168     EXPECT_ERRNO(ENOTSUP);
1169   }
1170 #else
1171   GTEST_SKIP() << "bionic-only test";
1172 #endif
1173 }
1174 
1175 #if defined(__BIONIC__)
1176 template <typename FuncType>
CheckAllocationFunction(FuncType func)1177 void CheckAllocationFunction(FuncType func) {
1178   // Assumes that no more than 108MB of memory is allocated before this.
1179   size_t limit = 128 * 1024 * 1024;
1180   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1181   if (!func(20 * 1024 * 1024))
1182     exit(1);
1183   if (func(128 * 1024 * 1024))
1184     exit(1);
1185   exit(0);
1186 }
1187 #endif
1188 
TEST(android_mallopt,set_allocation_limit)1189 TEST(android_mallopt, set_allocation_limit) {
1190 #if defined(__BIONIC__)
1191   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(bytes, 1) != nullptr; }),
1192               testing::ExitedWithCode(0), "");
1193   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return calloc(1, bytes) != nullptr; }),
1194               testing::ExitedWithCode(0), "");
1195   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return malloc(bytes) != nullptr; }),
1196               testing::ExitedWithCode(0), "");
1197   EXPECT_EXIT(CheckAllocationFunction(
1198                   [](size_t bytes) { return memalign(sizeof(void*), bytes) != nullptr; }),
1199               testing::ExitedWithCode(0), "");
1200   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1201                 void* ptr;
1202                 return posix_memalign(&ptr, sizeof(void *), bytes) == 0;
1203               }),
1204               testing::ExitedWithCode(0), "");
1205   EXPECT_EXIT(CheckAllocationFunction(
1206                   [](size_t bytes) { return aligned_alloc(sizeof(void*), bytes) != nullptr; }),
1207               testing::ExitedWithCode(0), "");
1208   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) {
1209                 void* p = malloc(1024 * 1024);
1210                 return realloc(p, bytes) != nullptr;
1211               }),
1212               testing::ExitedWithCode(0), "");
1213 #if !defined(__LP64__)
1214   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return pvalloc(bytes) != nullptr; }),
1215               testing::ExitedWithCode(0), "");
1216   EXPECT_EXIT(CheckAllocationFunction([](size_t bytes) { return valloc(bytes) != nullptr; }),
1217               testing::ExitedWithCode(0), "");
1218 #endif
1219 #else
1220   GTEST_SKIP() << "bionic extension";
1221 #endif
1222 }
1223 
TEST(android_mallopt,set_allocation_limit_multiple)1224 TEST(android_mallopt, set_allocation_limit_multiple) {
1225 #if defined(__BIONIC__)
1226   // Only the first set should work.
1227   size_t limit = 256 * 1024 * 1024;
1228   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1229   limit = 32 * 1024 * 1024;
1230   ASSERT_FALSE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1231 #else
1232   GTEST_SKIP() << "bionic extension";
1233 #endif
1234 }
1235 
1236 #if defined(__BIONIC__)
1237 static constexpr size_t kAllocationSize = 8 * 1024 * 1024;
1238 
GetMaxAllocations()1239 static size_t GetMaxAllocations() {
1240   size_t max_pointers = 0;
1241   void* ptrs[20];
1242   for (size_t i = 0; i < sizeof(ptrs) / sizeof(void*); i++) {
1243     ptrs[i] = malloc(kAllocationSize);
1244     if (ptrs[i] == nullptr) {
1245       max_pointers = i;
1246       break;
1247     }
1248   }
1249   for (size_t i = 0; i < max_pointers; i++) {
1250     free(ptrs[i]);
1251   }
1252   return max_pointers;
1253 }
1254 
VerifyMaxPointers(size_t max_pointers)1255 static void VerifyMaxPointers(size_t max_pointers) {
1256   // Now verify that we can allocate the same number as before.
1257   void* ptrs[20];
1258   for (size_t i = 0; i < max_pointers; i++) {
1259     ptrs[i] = malloc(kAllocationSize);
1260     ASSERT_TRUE(ptrs[i] != nullptr) << "Failed to allocate on iteration " << i;
1261   }
1262 
1263   // Make sure the next allocation still fails.
1264   ASSERT_TRUE(malloc(kAllocationSize) == nullptr);
1265   for (size_t i = 0; i < max_pointers; i++) {
1266     free(ptrs[i]);
1267   }
1268 }
1269 #endif
1270 
TEST(android_mallopt,set_allocation_limit_realloc_increase)1271 TEST(android_mallopt, set_allocation_limit_realloc_increase) {
1272 #if defined(__BIONIC__)
1273   size_t limit = 128 * 1024 * 1024;
1274   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1275 
1276   size_t max_pointers = GetMaxAllocations();
1277   ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1278 
1279   void* memory = malloc(10 * 1024 * 1024);
1280   ASSERT_TRUE(memory != nullptr);
1281 
1282   // Increase size.
1283   memory = realloc(memory, 20 * 1024 * 1024);
1284   ASSERT_TRUE(memory != nullptr);
1285   memory = realloc(memory, 40 * 1024 * 1024);
1286   ASSERT_TRUE(memory != nullptr);
1287   memory = realloc(memory, 60 * 1024 * 1024);
1288   ASSERT_TRUE(memory != nullptr);
1289   memory = realloc(memory, 80 * 1024 * 1024);
1290   ASSERT_TRUE(memory != nullptr);
1291   // Now push past limit.
1292   memory = realloc(memory, 130 * 1024 * 1024);
1293   ASSERT_TRUE(memory == nullptr);
1294 
1295   VerifyMaxPointers(max_pointers);
1296 #else
1297   GTEST_SKIP() << "bionic extension";
1298 #endif
1299 }
1300 
TEST(android_mallopt,set_allocation_limit_realloc_decrease)1301 TEST(android_mallopt, set_allocation_limit_realloc_decrease) {
1302 #if defined(__BIONIC__)
1303   size_t limit = 100 * 1024 * 1024;
1304   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1305 
1306   size_t max_pointers = GetMaxAllocations();
1307   ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1308 
1309   void* memory = malloc(80 * 1024 * 1024);
1310   ASSERT_TRUE(memory != nullptr);
1311 
1312   // Decrease size.
1313   memory = realloc(memory, 60 * 1024 * 1024);
1314   ASSERT_TRUE(memory != nullptr);
1315   memory = realloc(memory, 40 * 1024 * 1024);
1316   ASSERT_TRUE(memory != nullptr);
1317   memory = realloc(memory, 20 * 1024 * 1024);
1318   ASSERT_TRUE(memory != nullptr);
1319   memory = realloc(memory, 10 * 1024 * 1024);
1320   ASSERT_TRUE(memory != nullptr);
1321   free(memory);
1322 
1323   VerifyMaxPointers(max_pointers);
1324 #else
1325   GTEST_SKIP() << "bionic extension";
1326 #endif
1327 }
1328 
TEST(android_mallopt,set_allocation_limit_realloc_free)1329 TEST(android_mallopt, set_allocation_limit_realloc_free) {
1330 #if defined(__BIONIC__)
1331   size_t limit = 100 * 1024 * 1024;
1332   ASSERT_TRUE(android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit)));
1333 
1334   size_t max_pointers = GetMaxAllocations();
1335   ASSERT_TRUE(max_pointers != 0) << "Limit never reached.";
1336 
1337   void* memory = malloc(60 * 1024 * 1024);
1338   ASSERT_TRUE(memory != nullptr);
1339 
1340   memory = realloc(memory, 0);
1341   ASSERT_TRUE(memory == nullptr);
1342 
1343   VerifyMaxPointers(max_pointers);
1344 #else
1345   GTEST_SKIP() << "bionic extension";
1346 #endif
1347 }
1348 
1349 #if defined(__BIONIC__)
SetAllocationLimitMultipleThreads()1350 static void SetAllocationLimitMultipleThreads() {
1351   static constexpr size_t kNumThreads = 4;
1352   std::atomic_bool start_running = false;
1353   std::atomic<size_t> num_running;
1354   std::atomic<size_t> num_successful;
1355   std::unique_ptr<std::thread> threads[kNumThreads];
1356   for (size_t i = 0; i < kNumThreads; i++) {
1357     threads[i].reset(new std::thread([&num_running, &start_running, &num_successful] {
1358       ++num_running;
1359       while (!start_running) {
1360       }
1361       size_t limit = 500 * 1024 * 1024;
1362       if (android_mallopt(M_SET_ALLOCATION_LIMIT_BYTES, &limit, sizeof(limit))) {
1363         ++num_successful;
1364       }
1365     }));
1366   }
1367 
1368   // Wait until all of the threads have started.
1369   while (num_running != kNumThreads)
1370     ;
1371 
1372   // Now start all of the threads setting the mallopt at once.
1373   start_running = true;
1374 
1375   // Send hardcoded signal (BIONIC_SIGNAL_PROFILER with value 0) to trigger
1376   // heapprofd handler. This will verify that changing the limit while
1377   // the allocation handlers are being changed at the same time works,
1378   // or that the limit handler is changed first and this also works properly.
1379   union sigval signal_value {};
1380   ASSERT_EQ(0, sigqueue(getpid(), BIONIC_SIGNAL_PROFILER, signal_value));
1381 
1382   // Wait for all of the threads to finish.
1383   for (size_t i = 0; i < kNumThreads; i++) {
1384     threads[i]->join();
1385   }
1386   ASSERT_EQ(1U, num_successful) << "Only one thread should be able to set the limit.";
1387   _exit(0);
1388 }
1389 #endif
1390 
TEST(android_mallopt,set_allocation_limit_multiple_threads)1391 TEST(android_mallopt, set_allocation_limit_multiple_threads) {
1392 #if defined(__BIONIC__)
1393   if (IsDynamic()) {
1394     ASSERT_TRUE(android_mallopt(M_INIT_ZYGOTE_CHILD_PROFILING, nullptr, 0));
1395   }
1396 
1397   // Run this a number of times as a stress test.
1398   for (size_t i = 0; i < 100; i++) {
1399     // Not using ASSERT_EXIT because errors messages are not displayed.
1400     pid_t pid;
1401     if ((pid = fork()) == 0) {
1402       ASSERT_NO_FATAL_FAILURE(SetAllocationLimitMultipleThreads());
1403     }
1404     ASSERT_NE(-1, pid);
1405     int status;
1406     ASSERT_EQ(pid, wait(&status));
1407     ASSERT_EQ(0, WEXITSTATUS(status));
1408   }
1409 #else
1410   GTEST_SKIP() << "bionic extension";
1411 #endif
1412 }
1413 
1414 #if defined(__BIONIC__)
1415 using Mode = android_mallopt_gwp_asan_options_t::Mode;
TEST(android_mallopt,DISABLED_multiple_enable_gwp_asan)1416 TEST(android_mallopt, DISABLED_multiple_enable_gwp_asan) {
1417   android_mallopt_gwp_asan_options_t options;
1418   options.program_name = "";  // Don't infer GWP-ASan options from sysprops.
1419   options.mode = Mode::APP_MANIFEST_NEVER;
1420   // GWP-ASan should already be enabled. Trying to enable or disable it should
1421   // always pass.
1422   ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1423   options.mode = Mode::APP_MANIFEST_DEFAULT;
1424   ASSERT_TRUE(android_mallopt(M_INITIALIZE_GWP_ASAN, &options, sizeof(options)));
1425 }
1426 #endif  // defined(__BIONIC__)
1427 
TEST(android_mallopt,multiple_enable_gwp_asan)1428 TEST(android_mallopt, multiple_enable_gwp_asan) {
1429 #if defined(__BIONIC__)
1430   // Always enable GWP-Asan, with default options.
1431   RunGwpAsanTest("*.DISABLED_multiple_enable_gwp_asan");
1432 #else
1433   GTEST_SKIP() << "bionic extension";
1434 #endif
1435 }
1436 
TEST(android_mallopt,memtag_stack_is_on)1437 TEST(android_mallopt, memtag_stack_is_on) {
1438 #if defined(__BIONIC__)
1439   bool memtag_stack;
1440   EXPECT_TRUE(android_mallopt(M_MEMTAG_STACK_IS_ON, &memtag_stack, sizeof(memtag_stack)));
1441 #else
1442   GTEST_SKIP() << "bionic extension";
1443 #endif
1444 }
1445 
TestHeapZeroing(int num_iterations,int (* get_alloc_size)(int iteration))1446 void TestHeapZeroing(int num_iterations, int (*get_alloc_size)(int iteration)) {
1447   std::vector<void*> allocs;
1448   constexpr int kMaxBytesToCheckZero = 64;
1449   const char kBlankMemory[kMaxBytesToCheckZero] = {};
1450 
1451   for (int i = 0; i < num_iterations; ++i) {
1452     int size = get_alloc_size(i);
1453     allocs.push_back(malloc(size));
1454     memset(allocs.back(), 'X', std::min(size, kMaxBytesToCheckZero));
1455   }
1456 
1457   for (void* alloc : allocs) {
1458     free(alloc);
1459   }
1460   allocs.clear();
1461 
1462   for (int i = 0; i < num_iterations; ++i) {
1463     int size = get_alloc_size(i);
1464     allocs.push_back(malloc(size));
1465     ASSERT_EQ(0, memcmp(allocs.back(), kBlankMemory, std::min(size, kMaxBytesToCheckZero)));
1466   }
1467 
1468   for (void* alloc : allocs) {
1469     free(alloc);
1470   }
1471 }
1472 
TEST(malloc,zero_init)1473 TEST(malloc, zero_init) {
1474 #if defined(__BIONIC__)
1475   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1476   bool allocator_scudo;
1477   GetAllocatorVersion(&allocator_scudo);
1478   if (!allocator_scudo) {
1479     GTEST_SKIP() << "scudo allocator only test";
1480   }
1481 
1482   mallopt(M_BIONIC_ZERO_INIT, 1);
1483 
1484   // Test using a block of 4K small (1-32 byte) allocations.
1485   TestHeapZeroing(/* num_iterations */ 0x1000, [](int iteration) -> int {
1486     return 1 + iteration % 32;
1487   });
1488 
1489   // Also test large allocations that land in the scudo secondary, as this is
1490   // the only part of Scudo that's changed by enabling zero initialization with
1491   // MTE. Uses 32 allocations, totalling 60MiB memory. Decay time (time to
1492   // release secondary allocations back to the OS) was modified to 0ms/1ms by
1493   // mallopt_decay. Ensure that we delay for at least a second before releasing
1494   // pages to the OS in order to avoid implicit zeroing by the kernel.
1495   mallopt(M_DECAY_TIME, 1);
1496   TestHeapZeroing(/* num_iterations */ 32, [](int iteration) -> int {
1497     return 1 << (19 + iteration % 4);
1498   });
1499 
1500 #else
1501   GTEST_SKIP() << "bionic-only test";
1502 #endif
1503 }
1504 
1505 // Note that MTE is enabled on cc_tests on devices that support MTE.
TEST(malloc,disable_mte)1506 TEST(malloc, disable_mte) {
1507 #if defined(__BIONIC__)
1508   if (!mte_supported()) {
1509     GTEST_SKIP() << "This function can only be tested with MTE";
1510   }
1511 
1512   sem_t sem;
1513   ASSERT_EQ(0, sem_init(&sem, 0, 0));
1514 
1515   pthread_t thread;
1516   ASSERT_EQ(0, pthread_create(
1517                    &thread, nullptr,
1518                    [](void* ptr) -> void* {
1519                      auto* sem = reinterpret_cast<sem_t*>(ptr);
1520                      sem_wait(sem);
1521                      return reinterpret_cast<void*>(prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0));
1522                    },
1523                    &sem));
1524 
1525   ASSERT_EQ(1, mallopt(M_BIONIC_SET_HEAP_TAGGING_LEVEL, M_HEAP_TAGGING_LEVEL_NONE));
1526   ASSERT_EQ(0, sem_post(&sem));
1527 
1528   int my_tagged_addr_ctrl = prctl(PR_GET_TAGGED_ADDR_CTRL, 0, 0, 0, 0);
1529   ASSERT_EQ(static_cast<unsigned long>(PR_MTE_TCF_NONE), my_tagged_addr_ctrl & PR_MTE_TCF_MASK);
1530 
1531   void* retval;
1532   ASSERT_EQ(0, pthread_join(thread, &retval));
1533   int thread_tagged_addr_ctrl = reinterpret_cast<uintptr_t>(retval);
1534   ASSERT_EQ(my_tagged_addr_ctrl, thread_tagged_addr_ctrl);
1535 #else
1536   GTEST_SKIP() << "bionic extension";
1537 #endif
1538 }
1539 
TEST(malloc,allocation_slack)1540 TEST(malloc, allocation_slack) {
1541 #if defined(__BIONIC__)
1542   SKIP_WITH_NATIVE_BRIDGE;  // http://b/189606147
1543 
1544   bool allocator_scudo;
1545   GetAllocatorVersion(&allocator_scudo);
1546   if (!allocator_scudo) {
1547     GTEST_SKIP() << "scudo allocator only test";
1548   }
1549 
1550   // Test that older target SDK levels let you access a few bytes off the end of
1551   // a large allocation.
1552   android_set_application_target_sdk_version(29);
1553   auto p = std::make_unique<char[]>(131072);
1554   volatile char *vp = p.get();
1555   volatile char oob ATTRIBUTE_UNUSED = vp[131072];
1556 #else
1557   GTEST_SKIP() << "bionic extension";
1558 #endif
1559 }
1560 
1561 // Regression test for b/206701345 -- scudo bug, MTE only.
1562 // Fix: https://reviews.llvm.org/D105261
1563 // Fix: https://android-review.googlesource.com/c/platform/external/scudo/+/1763655
TEST(malloc,realloc_mte_crash_b206701345)1564 TEST(malloc, realloc_mte_crash_b206701345) {
1565   // We want to hit in-place realloc at the very end of an mmap-ed region.  Not
1566   // all size classes allow such placement - mmap size has to be divisible by
1567   // the block size. At the time of writing this could only be reproduced with
1568   // 64 byte size class (i.e. 48 byte allocations), but that may change in the
1569   // future. Try several different classes at the lower end.
1570   std::vector<void*> ptrs(10000);
1571   for (int i = 1; i < 32; ++i) {
1572     size_t sz = 16 * i - 1;
1573     for (void*& p : ptrs) {
1574       p = realloc(malloc(sz), sz + 1);
1575     }
1576 
1577     for (void* p : ptrs) {
1578       free(p);
1579     }
1580   }
1581 }
1582 
VerifyAllocationsAreZero(std::function<void * (size_t)> alloc_func,std::string function_name,std::vector<size_t> & test_sizes,size_t max_allocations)1583 void VerifyAllocationsAreZero(std::function<void*(size_t)> alloc_func, std::string function_name,
1584                               std::vector<size_t>& test_sizes, size_t max_allocations) {
1585   // Vector of zero'd data used for comparisons. Make it twice the largest size.
1586   std::vector<char> zero(test_sizes.back() * 2, 0);
1587 
1588   SCOPED_TRACE(testing::Message() << function_name << " failed to zero memory");
1589 
1590   for (size_t test_size : test_sizes) {
1591     std::vector<void*> ptrs(max_allocations);
1592     for (size_t i = 0; i < ptrs.size(); i++) {
1593       SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1594       ptrs[i] = alloc_func(test_size);
1595       ASSERT_TRUE(ptrs[i] != nullptr);
1596       size_t alloc_size = malloc_usable_size(ptrs[i]);
1597       ASSERT_LE(alloc_size, zero.size());
1598       ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1599 
1600       // Set the memory to non-zero to make sure if the pointer
1601       // is reused it's still zero.
1602       memset(ptrs[i], 0xab, alloc_size);
1603     }
1604     // Free the pointers.
1605     for (size_t i = 0; i < ptrs.size(); i++) {
1606       free(ptrs[i]);
1607     }
1608     for (size_t i = 0; i < ptrs.size(); i++) {
1609       SCOPED_TRACE(testing::Message() << "size " << test_size << " at iteration " << i);
1610       ptrs[i] = malloc(test_size);
1611       ASSERT_TRUE(ptrs[i] != nullptr);
1612       size_t alloc_size = malloc_usable_size(ptrs[i]);
1613       ASSERT_LE(alloc_size, zero.size());
1614       ASSERT_EQ(0, memcmp(ptrs[i], zero.data(), alloc_size));
1615     }
1616     // Free all of the pointers later to maximize the chance of reusing from
1617     // the first loop.
1618     for (size_t i = 0; i < ptrs.size(); i++) {
1619       free(ptrs[i]);
1620     }
1621   }
1622 }
1623 
1624 // Verify that small and medium allocations are always zero.
1625 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_small_medium_sizes)1626 TEST(malloc, zeroed_allocations_small_medium_sizes) {
1627 #if !defined(__BIONIC__)
1628   GTEST_SKIP() << "Only valid on bionic";
1629 #endif
1630 
1631   if (IsLowRamDevice()) {
1632     GTEST_SKIP() << "Skipped on low memory devices.";
1633   }
1634 
1635   constexpr size_t kMaxAllocations = 1024;
1636   std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1637   VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1638                            kMaxAllocations);
1639 
1640   VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1641                            test_sizes, kMaxAllocations);
1642 
1643   VerifyAllocationsAreZero(
1644       [](size_t size) -> void* {
1645         void* ptr;
1646         if (posix_memalign(&ptr, 64, size) == 0) {
1647           return ptr;
1648         }
1649         return nullptr;
1650       },
1651       "posix_memalign", test_sizes, kMaxAllocations);
1652 }
1653 
1654 // Verify that large allocations are always zero.
1655 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_large_sizes)1656 TEST(malloc, zeroed_allocations_large_sizes) {
1657 #if !defined(__BIONIC__)
1658   GTEST_SKIP() << "Only valid on bionic";
1659 #endif
1660 
1661   if (IsLowRamDevice()) {
1662     GTEST_SKIP() << "Skipped on low memory devices.";
1663   }
1664 
1665   constexpr size_t kMaxAllocations = 20;
1666   std::vector<size_t> test_sizes = {1000000, 2000000, 3000000, 4000000};
1667   VerifyAllocationsAreZero([](size_t size) -> void* { return malloc(size); }, "malloc", test_sizes,
1668                            kMaxAllocations);
1669 
1670   VerifyAllocationsAreZero([](size_t size) -> void* { return memalign(64, size); }, "memalign",
1671                            test_sizes, kMaxAllocations);
1672 
1673   VerifyAllocationsAreZero(
1674       [](size_t size) -> void* {
1675         void* ptr;
1676         if (posix_memalign(&ptr, 64, size) == 0) {
1677           return ptr;
1678         }
1679         return nullptr;
1680       },
1681       "posix_memalign", test_sizes, kMaxAllocations);
1682 }
1683 
1684 // Verify that reallocs are zeroed when expanded.
1685 // @CddTest = 9.7/C-4-1
TEST(malloc,zeroed_allocations_realloc)1686 TEST(malloc, zeroed_allocations_realloc) {
1687 #if !defined(__BIONIC__)
1688   GTEST_SKIP() << "Only valid on bionic";
1689 #endif
1690 
1691   if (IsLowRamDevice()) {
1692     GTEST_SKIP() << "Skipped on low memory devices.";
1693   }
1694 
1695   // Vector of zero'd data used for comparisons.
1696   constexpr size_t kMaxMemorySize = 131072;
1697   std::vector<char> zero(kMaxMemorySize, 0);
1698 
1699   constexpr size_t kMaxAllocations = 1024;
1700   std::vector<size_t> test_sizes = {16, 48, 128, 1024, 4096, 65536};
1701   // Do a number of allocations and set them to non-zero.
1702   for (size_t test_size : test_sizes) {
1703     std::vector<void*> ptrs(kMaxAllocations);
1704     for (size_t i = 0; i < kMaxAllocations; i++) {
1705       ptrs[i] = malloc(test_size);
1706       ASSERT_TRUE(ptrs[i] != nullptr);
1707 
1708       // Set the memory to non-zero to make sure if the pointer
1709       // is reused it's still zero.
1710       memset(ptrs[i], 0xab, malloc_usable_size(ptrs[i]));
1711     }
1712     // Free the pointers.
1713     for (size_t i = 0; i < kMaxAllocations; i++) {
1714       free(ptrs[i]);
1715     }
1716   }
1717 
1718   // Do the reallocs to a larger size and verify the rest of the allocation
1719   // is zero.
1720   constexpr size_t kInitialSize = 8;
1721   for (size_t test_size : test_sizes) {
1722     std::vector<void*> ptrs(kMaxAllocations);
1723     for (size_t i = 0; i < kMaxAllocations; i++) {
1724       ptrs[i] = malloc(kInitialSize);
1725       ASSERT_TRUE(ptrs[i] != nullptr);
1726       size_t orig_alloc_size = malloc_usable_size(ptrs[i]);
1727 
1728       ptrs[i] = realloc(ptrs[i], test_size);
1729       ASSERT_TRUE(ptrs[i] != nullptr);
1730       size_t new_alloc_size = malloc_usable_size(ptrs[i]);
1731       char* ptr = reinterpret_cast<char*>(ptrs[i]);
1732       ASSERT_EQ(0, memcmp(&ptr[orig_alloc_size], zero.data(), new_alloc_size - orig_alloc_size))
1733           << "realloc from " << kInitialSize << " to size " << test_size << " at iteration " << i;
1734     }
1735     for (size_t i = 0; i < kMaxAllocations; i++) {
1736       free(ptrs[i]);
1737     }
1738   }
1739 }
1740 
TEST(android_mallopt,get_decay_time_enabled_errors)1741 TEST(android_mallopt, get_decay_time_enabled_errors) {
1742 #if defined(__BIONIC__)
1743   errno = 0;
1744   EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, nullptr, sizeof(bool)));
1745   EXPECT_ERRNO(EINVAL);
1746 
1747   errno = 0;
1748   int value;
1749   EXPECT_FALSE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1750   EXPECT_ERRNO(EINVAL);
1751 #else
1752   GTEST_SKIP() << "bionic-only test";
1753 #endif
1754 }
1755 
TEST(android_mallopt,get_decay_time_enabled)1756 TEST(android_mallopt, get_decay_time_enabled) {
1757 #if defined(__BIONIC__)
1758   SKIP_WITH_HWASAN << "hwasan does not implement mallopt";
1759 
1760   EXPECT_EQ(1, mallopt(M_DECAY_TIME, 0));
1761 
1762   bool value;
1763   EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1764   EXPECT_FALSE(value);
1765 
1766   EXPECT_EQ(1, mallopt(M_DECAY_TIME, 1));
1767   EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1768   EXPECT_TRUE(value);
1769 
1770   EXPECT_EQ(1, mallopt(M_DECAY_TIME, -1));
1771   EXPECT_TRUE(android_mallopt(M_GET_DECAY_TIME_ENABLED, &value, sizeof(value)));
1772   EXPECT_FALSE(value);
1773 #else
1774   GTEST_SKIP() << "bionic-only test";
1775 #endif
1776 }
1777