1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator_internal.h"
16 #include "sanitizer_common/sanitizer_common.h"
17 
18 #include "sanitizer_test_utils.h"
19 #include "sanitizer_pthread_wrappers.h"
20 
21 #include "gtest/gtest.h"
22 
23 #include <stdlib.h>
24 #include <algorithm>
25 #include <vector>
26 #include <set>
27 
28 // Too slow for debug build
29 #if !SANITIZER_DEBUG
30 
31 #if SANITIZER_CAN_USE_ALLOCATOR64
32 static const uptr kAllocatorSpace = 0x700000000000ULL;
33 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
34 static const u64 kAddressSpaceSize = 1ULL << 47;
35 
36 typedef SizeClassAllocator64<
37   kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
38 
39 typedef SizeClassAllocator64<
40   kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
41 #elif defined(__mips64)
42 static const u64 kAddressSpaceSize = 1ULL << 40;
43 #else
44 static const u64 kAddressSpaceSize = 1ULL << 32;
45 #endif
46 
47 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
48 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
49 
50 typedef SizeClassAllocator32<
51   0, kAddressSpaceSize,
52   /*kMetadataSize*/16,
53   CompactSizeClassMap,
54   kRegionSizeLog,
55   FlatByteMap<kFlatByteMapSize> >
56   Allocator32Compact;
57 
58 template <class SizeClassMap>
TestSizeClassMap()59 void TestSizeClassMap() {
60   typedef SizeClassMap SCMap;
61   // SCMap::Print();
62   SCMap::Validate();
63 }
64 
TEST(SanitizerCommon,DefaultSizeClassMap)65 TEST(SanitizerCommon, DefaultSizeClassMap) {
66   TestSizeClassMap<DefaultSizeClassMap>();
67 }
68 
TEST(SanitizerCommon,CompactSizeClassMap)69 TEST(SanitizerCommon, CompactSizeClassMap) {
70   TestSizeClassMap<CompactSizeClassMap>();
71 }
72 
TEST(SanitizerCommon,InternalSizeClassMap)73 TEST(SanitizerCommon, InternalSizeClassMap) {
74   TestSizeClassMap<InternalSizeClassMap>();
75 }
76 
77 template <class Allocator>
TestSizeClassAllocator()78 void TestSizeClassAllocator() {
79   Allocator *a = new Allocator;
80   a->Init();
81   SizeClassAllocatorLocalCache<Allocator> cache;
82   memset(&cache, 0, sizeof(cache));
83   cache.Init(0);
84 
85   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
86     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
87 
88   std::vector<void *> allocated;
89 
90   uptr last_total_allocated = 0;
91   for (int i = 0; i < 3; i++) {
92     // Allocate a bunch of chunks.
93     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
94       uptr size = sizes[s];
95       if (!a->CanAllocate(size, 1)) continue;
96       // printf("s = %ld\n", size);
97       uptr n_iter = std::max((uptr)6, 8000000 / size);
98       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
99       for (uptr i = 0; i < n_iter; i++) {
100         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
101         char *x = (char*)cache.Allocate(a, class_id0);
102         x[0] = 0;
103         x[size - 1] = 0;
104         x[size / 2] = 0;
105         allocated.push_back(x);
106         CHECK_EQ(x, a->GetBlockBegin(x));
107         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
108         CHECK(a->PointerIsMine(x));
109         CHECK(a->PointerIsMine(x + size - 1));
110         CHECK(a->PointerIsMine(x + size / 2));
111         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
112         uptr class_id = a->GetSizeClass(x);
113         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
114         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
115         metadata[0] = reinterpret_cast<uptr>(x) + 1;
116         metadata[1] = 0xABCD;
117       }
118     }
119     // Deallocate all.
120     for (uptr i = 0; i < allocated.size(); i++) {
121       void *x = allocated[i];
122       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
123       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
124       CHECK_EQ(metadata[1], 0xABCD);
125       cache.Deallocate(a, a->GetSizeClass(x), x);
126     }
127     allocated.clear();
128     uptr total_allocated = a->TotalMemoryUsed();
129     if (last_total_allocated == 0)
130       last_total_allocated = total_allocated;
131     CHECK_EQ(last_total_allocated, total_allocated);
132   }
133 
134   // Check that GetBlockBegin never crashes.
135   for (uptr x = 0, step = kAddressSpaceSize / 100000;
136        x < kAddressSpaceSize - step; x += step)
137     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
138       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
139 
140   a->TestOnlyUnmap();
141   delete a;
142 }
143 
144 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64)145 TEST(SanitizerCommon, SizeClassAllocator64) {
146   TestSizeClassAllocator<Allocator64>();
147 }
148 
TEST(SanitizerCommon,SizeClassAllocator64Compact)149 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
150   TestSizeClassAllocator<Allocator64Compact>();
151 }
152 #endif
153 
TEST(SanitizerCommon,SizeClassAllocator32Compact)154 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
155   TestSizeClassAllocator<Allocator32Compact>();
156 }
157 
158 template <class Allocator>
SizeClassAllocatorMetadataStress()159 void SizeClassAllocatorMetadataStress() {
160   Allocator *a = new Allocator;
161   a->Init();
162   SizeClassAllocatorLocalCache<Allocator> cache;
163   memset(&cache, 0, sizeof(cache));
164   cache.Init(0);
165 
166   const uptr kNumAllocs = 1 << 13;
167   void *allocated[kNumAllocs];
168   void *meta[kNumAllocs];
169   for (uptr i = 0; i < kNumAllocs; i++) {
170     void *x = cache.Allocate(a, 1 + i % 50);
171     allocated[i] = x;
172     meta[i] = a->GetMetaData(x);
173   }
174   // Get Metadata kNumAllocs^2 times.
175   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
176     uptr idx = i % kNumAllocs;
177     void *m = a->GetMetaData(allocated[idx]);
178     EXPECT_EQ(m, meta[idx]);
179   }
180   for (uptr i = 0; i < kNumAllocs; i++) {
181     cache.Deallocate(a, 1 + i % 50, allocated[i]);
182   }
183 
184   a->TestOnlyUnmap();
185   delete a;
186 }
187 
188 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64MetadataStress)189 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
190   SizeClassAllocatorMetadataStress<Allocator64>();
191 }
192 
TEST(SanitizerCommon,SizeClassAllocator64CompactMetadataStress)193 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
194   SizeClassAllocatorMetadataStress<Allocator64Compact>();
195 }
196 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator32CompactMetadataStress)197 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
198   SizeClassAllocatorMetadataStress<Allocator32Compact>();
199 }
200 
201 template <class Allocator>
SizeClassAllocatorGetBlockBeginStress()202 void SizeClassAllocatorGetBlockBeginStress() {
203   Allocator *a = new Allocator;
204   a->Init();
205   SizeClassAllocatorLocalCache<Allocator> cache;
206   memset(&cache, 0, sizeof(cache));
207   cache.Init(0);
208 
209   uptr max_size_class = Allocator::kNumClasses - 1;
210   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
211   u64 G8 = 1ULL << 33;
212   // Make sure we correctly compute GetBlockBegin() w/o overflow.
213   for (size_t i = 0; i <= G8 / size; i++) {
214     void *x = cache.Allocate(a, max_size_class);
215     void *beg = a->GetBlockBegin(x);
216     // if ((i & (i - 1)) == 0)
217     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
218     EXPECT_EQ(x, beg);
219   }
220 
221   a->TestOnlyUnmap();
222   delete a;
223 }
224 
225 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64GetBlockBegin)226 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
227   SizeClassAllocatorGetBlockBeginStress<Allocator64>();
228 }
TEST(SanitizerCommon,SizeClassAllocator64CompactGetBlockBegin)229 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
230   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
231 }
TEST(SanitizerCommon,SizeClassAllocator32CompactGetBlockBegin)232 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
233   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
234 }
235 #endif  // SANITIZER_CAN_USE_ALLOCATOR64
236 
237 struct TestMapUnmapCallback {
238   static int map_count, unmap_count;
OnMapTestMapUnmapCallback239   void OnMap(uptr p, uptr size) const { map_count++; }
OnUnmapTestMapUnmapCallback240   void OnUnmap(uptr p, uptr size) const { unmap_count++; }
241 };
242 int TestMapUnmapCallback::map_count;
243 int TestMapUnmapCallback::unmap_count;
244 
245 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64MapUnmapCallback)246 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
247   TestMapUnmapCallback::map_count = 0;
248   TestMapUnmapCallback::unmap_count = 0;
249   typedef SizeClassAllocator64<
250       kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
251       TestMapUnmapCallback> Allocator64WithCallBack;
252   Allocator64WithCallBack *a = new Allocator64WithCallBack;
253   a->Init();
254   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
255   SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
256   memset(&cache, 0, sizeof(cache));
257   cache.Init(0);
258   AllocatorStats stats;
259   stats.Init();
260   a->AllocateBatch(&stats, &cache, 32);
261   EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
262   a->TestOnlyUnmap();
263   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
264   delete a;
265 }
266 #endif
267 
TEST(SanitizerCommon,SizeClassAllocator32MapUnmapCallback)268 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
269   TestMapUnmapCallback::map_count = 0;
270   TestMapUnmapCallback::unmap_count = 0;
271   typedef SizeClassAllocator32<
272       0, kAddressSpaceSize,
273       /*kMetadataSize*/16,
274       CompactSizeClassMap,
275       kRegionSizeLog,
276       FlatByteMap<kFlatByteMapSize>,
277       TestMapUnmapCallback>
278     Allocator32WithCallBack;
279   Allocator32WithCallBack *a = new Allocator32WithCallBack;
280   a->Init();
281   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
282   SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
283   memset(&cache, 0, sizeof(cache));
284   cache.Init(0);
285   AllocatorStats stats;
286   stats.Init();
287   a->AllocateBatch(&stats, &cache, 32);
288   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
289   a->TestOnlyUnmap();
290   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
291   delete a;
292   // fprintf(stderr, "Map: %d Unmap: %d\n",
293   //         TestMapUnmapCallback::map_count,
294   //         TestMapUnmapCallback::unmap_count);
295 }
296 
TEST(SanitizerCommon,LargeMmapAllocatorMapUnmapCallback)297 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
298   TestMapUnmapCallback::map_count = 0;
299   TestMapUnmapCallback::unmap_count = 0;
300   LargeMmapAllocator<TestMapUnmapCallback> a;
301   a.Init(/* may_return_null */ false);
302   AllocatorStats stats;
303   stats.Init();
304   void *x = a.Allocate(&stats, 1 << 20, 1);
305   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
306   a.Deallocate(&stats, x);
307   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
308 }
309 
310 template<class Allocator>
FailInAssertionOnOOM()311 void FailInAssertionOnOOM() {
312   Allocator a;
313   a.Init();
314   SizeClassAllocatorLocalCache<Allocator> cache;
315   memset(&cache, 0, sizeof(cache));
316   cache.Init(0);
317   AllocatorStats stats;
318   stats.Init();
319   for (int i = 0; i < 1000000; i++) {
320     a.AllocateBatch(&stats, &cache, 52);
321   }
322 
323   a.TestOnlyUnmap();
324 }
325 
326 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64Overflow)327 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
328   EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
329 }
330 #endif
331 
332 #if !defined(_WIN32)  // FIXME: This currently fails on Windows.
TEST(SanitizerCommon,LargeMmapAllocator)333 TEST(SanitizerCommon, LargeMmapAllocator) {
334   LargeMmapAllocator<> a;
335   a.Init(/* may_return_null */ false);
336   AllocatorStats stats;
337   stats.Init();
338 
339   static const int kNumAllocs = 1000;
340   char *allocated[kNumAllocs];
341   static const uptr size = 4000;
342   // Allocate some.
343   for (int i = 0; i < kNumAllocs; i++) {
344     allocated[i] = (char *)a.Allocate(&stats, size, 1);
345     CHECK(a.PointerIsMine(allocated[i]));
346   }
347   // Deallocate all.
348   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
349   for (int i = 0; i < kNumAllocs; i++) {
350     char *p = allocated[i];
351     CHECK(a.PointerIsMine(p));
352     a.Deallocate(&stats, p);
353   }
354   // Check that non left.
355   CHECK_EQ(a.TotalMemoryUsed(), 0);
356 
357   // Allocate some more, also add metadata.
358   for (int i = 0; i < kNumAllocs; i++) {
359     char *x = (char *)a.Allocate(&stats, size, 1);
360     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
361     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
362     *meta = i;
363     allocated[i] = x;
364   }
365   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
366     char *p = allocated[i % kNumAllocs];
367     CHECK(a.PointerIsMine(p));
368     CHECK(a.PointerIsMine(p + 2000));
369   }
370   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
371   // Deallocate all in reverse order.
372   for (int i = 0; i < kNumAllocs; i++) {
373     int idx = kNumAllocs - i - 1;
374     char *p = allocated[idx];
375     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
376     CHECK_EQ(*meta, idx);
377     CHECK(a.PointerIsMine(p));
378     a.Deallocate(&stats, p);
379   }
380   CHECK_EQ(a.TotalMemoryUsed(), 0);
381 
382   // Test alignments.
383   uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
384   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
385     const uptr kNumAlignedAllocs = 100;
386     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
387       uptr size = ((i % 10) + 1) * 4096;
388       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
389       CHECK_EQ(p, a.GetBlockBegin(p));
390       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
391       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
392       CHECK_EQ(0, (uptr)allocated[i] % alignment);
393       p[0] = p[size - 1] = 0;
394     }
395     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
396       a.Deallocate(&stats, allocated[i]);
397     }
398   }
399 
400   // Regression test for boundary condition in GetBlockBegin().
401   uptr page_size = GetPageSizeCached();
402   char *p = (char *)a.Allocate(&stats, page_size, 1);
403   CHECK_EQ(p, a.GetBlockBegin(p));
404   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
405   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
406   a.Deallocate(&stats, p);
407 }
408 #endif
409 
410 template
411 <class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
TestCombinedAllocator()412 void TestCombinedAllocator() {
413   typedef
414       CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
415       Allocator;
416   Allocator *a = new Allocator;
417   a->Init(/* may_return_null */ true);
418 
419   AllocatorCache cache;
420   memset(&cache, 0, sizeof(cache));
421   a->InitCache(&cache);
422 
423   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
424   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
425   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
426   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
427   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
428 
429   // Set to false
430   a->SetMayReturnNull(false);
431   EXPECT_DEATH(a->Allocate(&cache, -1, 1),
432                "allocator is terminating the process");
433 
434   const uptr kNumAllocs = 100000;
435   const uptr kNumIter = 10;
436   for (uptr iter = 0; iter < kNumIter; iter++) {
437     std::vector<void*> allocated;
438     for (uptr i = 0; i < kNumAllocs; i++) {
439       uptr size = (i % (1 << 14)) + 1;
440       if ((i % 1024) == 0)
441         size = 1 << (10 + (i % 14));
442       void *x = a->Allocate(&cache, size, 1);
443       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
444       CHECK_EQ(*meta, 0);
445       *meta = size;
446       allocated.push_back(x);
447     }
448 
449     random_shuffle(allocated.begin(), allocated.end());
450 
451     for (uptr i = 0; i < kNumAllocs; i++) {
452       void *x = allocated[i];
453       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
454       CHECK_NE(*meta, 0);
455       CHECK(a->PointerIsMine(x));
456       *meta = 0;
457       a->Deallocate(&cache, x);
458     }
459     allocated.clear();
460     a->SwallowCache(&cache);
461   }
462   a->DestroyCache(&cache);
463   a->TestOnlyUnmap();
464 }
465 
466 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,CombinedAllocator64)467 TEST(SanitizerCommon, CombinedAllocator64) {
468   TestCombinedAllocator<Allocator64,
469       LargeMmapAllocator<>,
470       SizeClassAllocatorLocalCache<Allocator64> > ();
471 }
472 
TEST(SanitizerCommon,CombinedAllocator64Compact)473 TEST(SanitizerCommon, CombinedAllocator64Compact) {
474   TestCombinedAllocator<Allocator64Compact,
475       LargeMmapAllocator<>,
476       SizeClassAllocatorLocalCache<Allocator64Compact> > ();
477 }
478 #endif
479 
480 #if !defined(_WIN32)  // FIXME: This currently fails on Windows.
TEST(SanitizerCommon,CombinedAllocator32Compact)481 TEST(SanitizerCommon, CombinedAllocator32Compact) {
482   TestCombinedAllocator<Allocator32Compact,
483       LargeMmapAllocator<>,
484       SizeClassAllocatorLocalCache<Allocator32Compact> > ();
485 }
486 #endif
487 
488 template <class AllocatorCache>
TestSizeClassAllocatorLocalCache()489 void TestSizeClassAllocatorLocalCache() {
490   AllocatorCache cache;
491   typedef typename AllocatorCache::Allocator Allocator;
492   Allocator *a = new Allocator();
493 
494   a->Init();
495   memset(&cache, 0, sizeof(cache));
496   cache.Init(0);
497 
498   const uptr kNumAllocs = 10000;
499   const int kNumIter = 100;
500   uptr saved_total = 0;
501   for (int class_id = 1; class_id <= 5; class_id++) {
502     for (int it = 0; it < kNumIter; it++) {
503       void *allocated[kNumAllocs];
504       for (uptr i = 0; i < kNumAllocs; i++) {
505         allocated[i] = cache.Allocate(a, class_id);
506       }
507       for (uptr i = 0; i < kNumAllocs; i++) {
508         cache.Deallocate(a, class_id, allocated[i]);
509       }
510       cache.Drain(a);
511       uptr total_allocated = a->TotalMemoryUsed();
512       if (it)
513         CHECK_EQ(saved_total, total_allocated);
514       saved_total = total_allocated;
515     }
516   }
517 
518   a->TestOnlyUnmap();
519   delete a;
520 }
521 
522 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64LocalCache)523 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
524   TestSizeClassAllocatorLocalCache<
525       SizeClassAllocatorLocalCache<Allocator64> >();
526 }
527 
TEST(SanitizerCommon,SizeClassAllocator64CompactLocalCache)528 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
529   TestSizeClassAllocatorLocalCache<
530       SizeClassAllocatorLocalCache<Allocator64Compact> >();
531 }
532 #endif
533 
TEST(SanitizerCommon,SizeClassAllocator32CompactLocalCache)534 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
535   TestSizeClassAllocatorLocalCache<
536       SizeClassAllocatorLocalCache<Allocator32Compact> >();
537 }
538 
539 #if SANITIZER_CAN_USE_ALLOCATOR64
540 typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
541 static AllocatorCache static_allocator_cache;
542 
AllocatorLeakTestWorker(void * arg)543 void *AllocatorLeakTestWorker(void *arg) {
544   typedef AllocatorCache::Allocator Allocator;
545   Allocator *a = (Allocator*)(arg);
546   static_allocator_cache.Allocate(a, 10);
547   static_allocator_cache.Drain(a);
548   return 0;
549 }
550 
TEST(SanitizerCommon,AllocatorLeakTest)551 TEST(SanitizerCommon, AllocatorLeakTest) {
552   typedef AllocatorCache::Allocator Allocator;
553   Allocator a;
554   a.Init();
555   uptr total_used_memory = 0;
556   for (int i = 0; i < 100; i++) {
557     pthread_t t;
558     PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
559     PTHREAD_JOIN(t, 0);
560     if (i == 0)
561       total_used_memory = a.TotalMemoryUsed();
562     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
563   }
564 
565   a.TestOnlyUnmap();
566 }
567 
568 // Struct which is allocated to pass info to new threads.  The new thread frees
569 // it.
570 struct NewThreadParams {
571   AllocatorCache *thread_cache;
572   AllocatorCache::Allocator *allocator;
573   uptr class_id;
574 };
575 
576 // Called in a new thread.  Just frees its argument.
DeallocNewThreadWorker(void * arg)577 static void *DeallocNewThreadWorker(void *arg) {
578   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
579   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
580   return NULL;
581 }
582 
583 // The allocator cache is supposed to be POD and zero initialized.  We should be
584 // able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator,AllocatorCacheDeallocNewThread)585 TEST(Allocator, AllocatorCacheDeallocNewThread) {
586   AllocatorCache::Allocator allocator;
587   allocator.Init();
588   AllocatorCache main_cache;
589   AllocatorCache child_cache;
590   memset(&main_cache, 0, sizeof(main_cache));
591   memset(&child_cache, 0, sizeof(child_cache));
592 
593   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
594   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
595       main_cache.Allocate(&allocator, class_id));
596   params->thread_cache = &child_cache;
597   params->allocator = &allocator;
598   params->class_id = class_id;
599   pthread_t t;
600   PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
601   PTHREAD_JOIN(t, 0);
602 }
603 #endif
604 
TEST(Allocator,Basic)605 TEST(Allocator, Basic) {
606   char *p = (char*)InternalAlloc(10);
607   EXPECT_NE(p, (char*)0);
608   char *p2 = (char*)InternalAlloc(20);
609   EXPECT_NE(p2, (char*)0);
610   EXPECT_NE(p2, p);
611   InternalFree(p);
612   InternalFree(p2);
613 }
614 
TEST(Allocator,Stress)615 TEST(Allocator, Stress) {
616   const int kCount = 1000;
617   char *ptrs[kCount];
618   unsigned rnd = 42;
619   for (int i = 0; i < kCount; i++) {
620     uptr sz = my_rand_r(&rnd) % 1000;
621     char *p = (char*)InternalAlloc(sz);
622     EXPECT_NE(p, (char*)0);
623     ptrs[i] = p;
624   }
625   for (int i = 0; i < kCount; i++) {
626     InternalFree(ptrs[i]);
627   }
628 }
629 
TEST(Allocator,LargeAlloc)630 TEST(Allocator, LargeAlloc) {
631   void *p = InternalAlloc(10 << 20);
632   InternalFree(p);
633 }
634 
TEST(Allocator,ScopedBuffer)635 TEST(Allocator, ScopedBuffer) {
636   const int kSize = 512;
637   {
638     InternalScopedBuffer<int> int_buf(kSize);
639     EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
640   }
641   InternalScopedBuffer<char> char_buf(kSize);
642   EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
643   internal_memset(char_buf.data(), 'c', kSize);
644   for (int i = 0; i < kSize; i++) {
645     EXPECT_EQ('c', char_buf[i]);
646   }
647 }
648 
IterationTestCallback(uptr chunk,void * arg)649 void IterationTestCallback(uptr chunk, void *arg) {
650   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
651 }
652 
653 template <class Allocator>
TestSizeClassAllocatorIteration()654 void TestSizeClassAllocatorIteration() {
655   Allocator *a = new Allocator;
656   a->Init();
657   SizeClassAllocatorLocalCache<Allocator> cache;
658   memset(&cache, 0, sizeof(cache));
659   cache.Init(0);
660 
661   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
662     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
663 
664   std::vector<void *> allocated;
665 
666   // Allocate a bunch of chunks.
667   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
668     uptr size = sizes[s];
669     if (!a->CanAllocate(size, 1)) continue;
670     // printf("s = %ld\n", size);
671     uptr n_iter = std::max((uptr)6, 80000 / size);
672     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
673     for (uptr j = 0; j < n_iter; j++) {
674       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
675       void *x = cache.Allocate(a, class_id0);
676       allocated.push_back(x);
677     }
678   }
679 
680   std::set<uptr> reported_chunks;
681   a->ForceLock();
682   a->ForEachChunk(IterationTestCallback, &reported_chunks);
683   a->ForceUnlock();
684 
685   for (uptr i = 0; i < allocated.size(); i++) {
686     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
687     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
688               reported_chunks.end());
689   }
690 
691   a->TestOnlyUnmap();
692   delete a;
693 }
694 
695 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64Iteration)696 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
697   TestSizeClassAllocatorIteration<Allocator64>();
698 }
699 #endif
700 
TEST(SanitizerCommon,SizeClassAllocator32Iteration)701 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
702   TestSizeClassAllocatorIteration<Allocator32Compact>();
703 }
704 
TEST(SanitizerCommon,LargeMmapAllocatorIteration)705 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
706   LargeMmapAllocator<> a;
707   a.Init(/* may_return_null */ false);
708   AllocatorStats stats;
709   stats.Init();
710 
711   static const uptr kNumAllocs = 1000;
712   char *allocated[kNumAllocs];
713   static const uptr size = 40;
714   // Allocate some.
715   for (uptr i = 0; i < kNumAllocs; i++)
716     allocated[i] = (char *)a.Allocate(&stats, size, 1);
717 
718   std::set<uptr> reported_chunks;
719   a.ForceLock();
720   a.ForEachChunk(IterationTestCallback, &reported_chunks);
721   a.ForceUnlock();
722 
723   for (uptr i = 0; i < kNumAllocs; i++) {
724     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
725     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
726               reported_chunks.end());
727   }
728   for (uptr i = 0; i < kNumAllocs; i++)
729     a.Deallocate(&stats, allocated[i]);
730 }
731 
TEST(SanitizerCommon,LargeMmapAllocatorBlockBegin)732 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
733   LargeMmapAllocator<> a;
734   a.Init(/* may_return_null */ false);
735   AllocatorStats stats;
736   stats.Init();
737 
738   static const uptr kNumAllocs = 1024;
739   static const uptr kNumExpectedFalseLookups = 10000000;
740   char *allocated[kNumAllocs];
741   static const uptr size = 4096;
742   // Allocate some.
743   for (uptr i = 0; i < kNumAllocs; i++) {
744     allocated[i] = (char *)a.Allocate(&stats, size, 1);
745   }
746 
747   a.ForceLock();
748   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
749     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
750     char *p1 = allocated[i % kNumAllocs];
751     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
752     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
753     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
754     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
755   }
756 
757   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
758     void *p = reinterpret_cast<void *>(i % 1024);
759     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
760     p = reinterpret_cast<void *>(~0L - (i % 1024));
761     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
762   }
763   a.ForceUnlock();
764 
765   for (uptr i = 0; i < kNumAllocs; i++)
766     a.Deallocate(&stats, allocated[i]);
767 }
768 
769 
770 #if SANITIZER_CAN_USE_ALLOCATOR64
771 // Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon,SizeClassAllocator64PopulateFreeListOOM)772 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
773   // In a world where regions are small and chunks are huge...
774   typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
775   typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
776                                SpecialSizeClassMap> SpecialAllocator64;
777   const uptr kRegionSize =
778       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
779   SpecialAllocator64 *a = new SpecialAllocator64;
780   a->Init();
781   SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
782   memset(&cache, 0, sizeof(cache));
783   cache.Init(0);
784 
785   // ...one man is on a mission to overflow a region with a series of
786   // successive allocations.
787   const uptr kClassID = 107;
788   const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
789   ASSERT_LT(2 * kAllocationSize, kRegionSize);
790   ASSERT_GT(3 * kAllocationSize, kRegionSize);
791   cache.Allocate(a, kClassID);
792   EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
793                "The process has exhausted");
794   a->TestOnlyUnmap();
795   delete a;
796 }
797 #endif
798 
TEST(SanitizerCommon,TwoLevelByteMap)799 TEST(SanitizerCommon, TwoLevelByteMap) {
800   const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
801   const u64 n = kSize1 * kSize2;
802   TwoLevelByteMap<kSize1, kSize2> m;
803   m.TestOnlyInit();
804   for (u64 i = 0; i < n; i += 7) {
805     m.set(i, (i % 100) + 1);
806   }
807   for (u64 j = 0; j < n; j++) {
808     if (j % 7)
809       EXPECT_EQ(m[j], 0);
810     else
811       EXPECT_EQ(m[j], (j % 100) + 1);
812   }
813 
814   m.TestOnlyUnmap();
815 }
816 
817 
818 typedef TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback> TestByteMap;
819 
820 struct TestByteMapParam {
821   TestByteMap *m;
822   size_t shard;
823   size_t num_shards;
824 };
825 
TwoLevelByteMapUserThread(void * param)826 void *TwoLevelByteMapUserThread(void *param) {
827   TestByteMapParam *p = (TestByteMapParam*)param;
828   for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
829     size_t val = (i % 100) + 1;
830     p->m->set(i, val);
831     EXPECT_EQ((*p->m)[i], val);
832   }
833   return 0;
834 }
835 
TEST(SanitizerCommon,ThreadedTwoLevelByteMap)836 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
837   TestByteMap m;
838   m.TestOnlyInit();
839   TestMapUnmapCallback::map_count = 0;
840   TestMapUnmapCallback::unmap_count = 0;
841   static const int kNumThreads = 4;
842   pthread_t t[kNumThreads];
843   TestByteMapParam p[kNumThreads];
844   for (int i = 0; i < kNumThreads; i++) {
845     p[i].m = &m;
846     p[i].shard = i;
847     p[i].num_shards = kNumThreads;
848     PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
849   }
850   for (int i = 0; i < kNumThreads; i++) {
851     PTHREAD_JOIN(t[i], 0);
852   }
853   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
854   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
855   m.TestOnlyUnmap();
856   EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
857   EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
858 }
859 
860 #endif  // #if !SANITIZER_DEBUG
861