1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
11 // Tests for sanitizer_allocator.h.
12 //
13 //===----------------------------------------------------------------------===//
14 #include "sanitizer_common/sanitizer_allocator.h"
15 #include "sanitizer_common/sanitizer_allocator_internal.h"
16 #include "sanitizer_common/sanitizer_common.h"
17
18 #include "sanitizer_test_utils.h"
19 #include "sanitizer_pthread_wrappers.h"
20
21 #include "gtest/gtest.h"
22
23 #include <stdlib.h>
24 #include <algorithm>
25 #include <vector>
26 #include <set>
27
28 // Too slow for debug build
29 #if !SANITIZER_DEBUG
30
31 #if SANITIZER_CAN_USE_ALLOCATOR64
32 static const uptr kAllocatorSpace = 0x700000000000ULL;
33 static const uptr kAllocatorSize = 0x010000000000ULL; // 1T.
34 static const u64 kAddressSpaceSize = 1ULL << 47;
35
36 typedef SizeClassAllocator64<
37 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
38
39 typedef SizeClassAllocator64<
40 kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
41 #elif defined(__mips64)
42 static const u64 kAddressSpaceSize = 1ULL << 40;
43 #elif defined(__aarch64__)
44 static const u64 kAddressSpaceSize = 1ULL << 39;
45 #else
46 static const u64 kAddressSpaceSize = 1ULL << 32;
47 #endif
48
49 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
50 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
51
52 typedef SizeClassAllocator32<
53 0, kAddressSpaceSize,
54 /*kMetadataSize*/16,
55 CompactSizeClassMap,
56 kRegionSizeLog,
57 FlatByteMap<kFlatByteMapSize> >
58 Allocator32Compact;
59
60 template <class SizeClassMap>
TestSizeClassMap()61 void TestSizeClassMap() {
62 typedef SizeClassMap SCMap;
63 // SCMap::Print();
64 SCMap::Validate();
65 }
66
TEST(SanitizerCommon,DefaultSizeClassMap)67 TEST(SanitizerCommon, DefaultSizeClassMap) {
68 TestSizeClassMap<DefaultSizeClassMap>();
69 }
70
TEST(SanitizerCommon,CompactSizeClassMap)71 TEST(SanitizerCommon, CompactSizeClassMap) {
72 TestSizeClassMap<CompactSizeClassMap>();
73 }
74
TEST(SanitizerCommon,InternalSizeClassMap)75 TEST(SanitizerCommon, InternalSizeClassMap) {
76 TestSizeClassMap<InternalSizeClassMap>();
77 }
78
79 template <class Allocator>
TestSizeClassAllocator()80 void TestSizeClassAllocator() {
81 Allocator *a = new Allocator;
82 a->Init();
83 SizeClassAllocatorLocalCache<Allocator> cache;
84 memset(&cache, 0, sizeof(cache));
85 cache.Init(0);
86
87 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
88 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
89
90 std::vector<void *> allocated;
91
92 uptr last_total_allocated = 0;
93 for (int i = 0; i < 3; i++) {
94 // Allocate a bunch of chunks.
95 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
96 uptr size = sizes[s];
97 if (!a->CanAllocate(size, 1)) continue;
98 // printf("s = %ld\n", size);
99 uptr n_iter = std::max((uptr)6, 4000000 / size);
100 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
101 for (uptr i = 0; i < n_iter; i++) {
102 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
103 char *x = (char*)cache.Allocate(a, class_id0);
104 x[0] = 0;
105 x[size - 1] = 0;
106 x[size / 2] = 0;
107 allocated.push_back(x);
108 CHECK_EQ(x, a->GetBlockBegin(x));
109 CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
110 CHECK(a->PointerIsMine(x));
111 CHECK(a->PointerIsMine(x + size - 1));
112 CHECK(a->PointerIsMine(x + size / 2));
113 CHECK_GE(a->GetActuallyAllocatedSize(x), size);
114 uptr class_id = a->GetSizeClass(x);
115 CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
116 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
117 metadata[0] = reinterpret_cast<uptr>(x) + 1;
118 metadata[1] = 0xABCD;
119 }
120 }
121 // Deallocate all.
122 for (uptr i = 0; i < allocated.size(); i++) {
123 void *x = allocated[i];
124 uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
125 CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
126 CHECK_EQ(metadata[1], 0xABCD);
127 cache.Deallocate(a, a->GetSizeClass(x), x);
128 }
129 allocated.clear();
130 uptr total_allocated = a->TotalMemoryUsed();
131 if (last_total_allocated == 0)
132 last_total_allocated = total_allocated;
133 CHECK_EQ(last_total_allocated, total_allocated);
134 }
135
136 // Check that GetBlockBegin never crashes.
137 for (uptr x = 0, step = kAddressSpaceSize / 100000;
138 x < kAddressSpaceSize - step; x += step)
139 if (a->PointerIsMine(reinterpret_cast<void *>(x)))
140 Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
141
142 a->TestOnlyUnmap();
143 delete a;
144 }
145
146 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64)147 TEST(SanitizerCommon, SizeClassAllocator64) {
148 TestSizeClassAllocator<Allocator64>();
149 }
150
TEST(SanitizerCommon,SizeClassAllocator64Compact)151 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
152 TestSizeClassAllocator<Allocator64Compact>();
153 }
154 #endif
155
TEST(SanitizerCommon,SizeClassAllocator32Compact)156 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
157 TestSizeClassAllocator<Allocator32Compact>();
158 }
159
160 template <class Allocator>
SizeClassAllocatorMetadataStress()161 void SizeClassAllocatorMetadataStress() {
162 Allocator *a = new Allocator;
163 a->Init();
164 SizeClassAllocatorLocalCache<Allocator> cache;
165 memset(&cache, 0, sizeof(cache));
166 cache.Init(0);
167
168 const uptr kNumAllocs = 1 << 13;
169 void *allocated[kNumAllocs];
170 void *meta[kNumAllocs];
171 for (uptr i = 0; i < kNumAllocs; i++) {
172 void *x = cache.Allocate(a, 1 + i % 50);
173 allocated[i] = x;
174 meta[i] = a->GetMetaData(x);
175 }
176 // Get Metadata kNumAllocs^2 times.
177 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
178 uptr idx = i % kNumAllocs;
179 void *m = a->GetMetaData(allocated[idx]);
180 EXPECT_EQ(m, meta[idx]);
181 }
182 for (uptr i = 0; i < kNumAllocs; i++) {
183 cache.Deallocate(a, 1 + i % 50, allocated[i]);
184 }
185
186 a->TestOnlyUnmap();
187 delete a;
188 }
189
190 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64MetadataStress)191 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
192 SizeClassAllocatorMetadataStress<Allocator64>();
193 }
194
TEST(SanitizerCommon,SizeClassAllocator64CompactMetadataStress)195 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
196 SizeClassAllocatorMetadataStress<Allocator64Compact>();
197 }
198 #endif // SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator32CompactMetadataStress)199 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
200 SizeClassAllocatorMetadataStress<Allocator32Compact>();
201 }
202
203 template <class Allocator>
SizeClassAllocatorGetBlockBeginStress()204 void SizeClassAllocatorGetBlockBeginStress() {
205 Allocator *a = new Allocator;
206 a->Init();
207 SizeClassAllocatorLocalCache<Allocator> cache;
208 memset(&cache, 0, sizeof(cache));
209 cache.Init(0);
210
211 uptr max_size_class = Allocator::kNumClasses - 1;
212 uptr size = Allocator::SizeClassMapT::Size(max_size_class);
213 u64 G8 = 1ULL << 33;
214 // Make sure we correctly compute GetBlockBegin() w/o overflow.
215 for (size_t i = 0; i <= G8 / size; i++) {
216 void *x = cache.Allocate(a, max_size_class);
217 void *beg = a->GetBlockBegin(x);
218 // if ((i & (i - 1)) == 0)
219 // fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
220 EXPECT_EQ(x, beg);
221 }
222
223 a->TestOnlyUnmap();
224 delete a;
225 }
226
227 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64GetBlockBegin)228 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
229 SizeClassAllocatorGetBlockBeginStress<Allocator64>();
230 }
TEST(SanitizerCommon,SizeClassAllocator64CompactGetBlockBegin)231 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
232 SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
233 }
TEST(SanitizerCommon,SizeClassAllocator32CompactGetBlockBegin)234 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
235 SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
236 }
237 #endif // SANITIZER_CAN_USE_ALLOCATOR64
238
239 struct TestMapUnmapCallback {
240 static int map_count, unmap_count;
OnMapTestMapUnmapCallback241 void OnMap(uptr p, uptr size) const { map_count++; }
OnUnmapTestMapUnmapCallback242 void OnUnmap(uptr p, uptr size) const { unmap_count++; }
243 };
244 int TestMapUnmapCallback::map_count;
245 int TestMapUnmapCallback::unmap_count;
246
247 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64MapUnmapCallback)248 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
249 TestMapUnmapCallback::map_count = 0;
250 TestMapUnmapCallback::unmap_count = 0;
251 typedef SizeClassAllocator64<
252 kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
253 TestMapUnmapCallback> Allocator64WithCallBack;
254 Allocator64WithCallBack *a = new Allocator64WithCallBack;
255 a->Init();
256 EXPECT_EQ(TestMapUnmapCallback::map_count, 1); // Allocator state.
257 SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
258 memset(&cache, 0, sizeof(cache));
259 cache.Init(0);
260 AllocatorStats stats;
261 stats.Init();
262 a->AllocateBatch(&stats, &cache, 32);
263 EXPECT_EQ(TestMapUnmapCallback::map_count, 3); // State + alloc + metadata.
264 a->TestOnlyUnmap();
265 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1); // The whole thing.
266 delete a;
267 }
268 #endif
269
TEST(SanitizerCommon,SizeClassAllocator32MapUnmapCallback)270 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
271 TestMapUnmapCallback::map_count = 0;
272 TestMapUnmapCallback::unmap_count = 0;
273 typedef SizeClassAllocator32<
274 0, kAddressSpaceSize,
275 /*kMetadataSize*/16,
276 CompactSizeClassMap,
277 kRegionSizeLog,
278 FlatByteMap<kFlatByteMapSize>,
279 TestMapUnmapCallback>
280 Allocator32WithCallBack;
281 Allocator32WithCallBack *a = new Allocator32WithCallBack;
282 a->Init();
283 EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
284 SizeClassAllocatorLocalCache<Allocator32WithCallBack> cache;
285 memset(&cache, 0, sizeof(cache));
286 cache.Init(0);
287 AllocatorStats stats;
288 stats.Init();
289 a->AllocateBatch(&stats, &cache, 32);
290 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
291 a->TestOnlyUnmap();
292 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
293 delete a;
294 // fprintf(stderr, "Map: %d Unmap: %d\n",
295 // TestMapUnmapCallback::map_count,
296 // TestMapUnmapCallback::unmap_count);
297 }
298
TEST(SanitizerCommon,LargeMmapAllocatorMapUnmapCallback)299 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
300 TestMapUnmapCallback::map_count = 0;
301 TestMapUnmapCallback::unmap_count = 0;
302 LargeMmapAllocator<TestMapUnmapCallback> a;
303 a.Init(/* may_return_null */ false);
304 AllocatorStats stats;
305 stats.Init();
306 void *x = a.Allocate(&stats, 1 << 20, 1);
307 EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
308 a.Deallocate(&stats, x);
309 EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
310 }
311
312 template<class Allocator>
FailInAssertionOnOOM()313 void FailInAssertionOnOOM() {
314 Allocator a;
315 a.Init();
316 SizeClassAllocatorLocalCache<Allocator> cache;
317 memset(&cache, 0, sizeof(cache));
318 cache.Init(0);
319 AllocatorStats stats;
320 stats.Init();
321 for (int i = 0; i < 1000000; i++) {
322 a.AllocateBatch(&stats, &cache, 52);
323 }
324
325 a.TestOnlyUnmap();
326 }
327
328 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64Overflow)329 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
330 EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
331 }
332 #endif
333
334 #if !defined(_WIN32) // FIXME: This currently fails on Windows.
TEST(SanitizerCommon,LargeMmapAllocator)335 TEST(SanitizerCommon, LargeMmapAllocator) {
336 LargeMmapAllocator<> a;
337 a.Init(/* may_return_null */ false);
338 AllocatorStats stats;
339 stats.Init();
340
341 static const int kNumAllocs = 1000;
342 char *allocated[kNumAllocs];
343 static const uptr size = 4000;
344 // Allocate some.
345 for (int i = 0; i < kNumAllocs; i++) {
346 allocated[i] = (char *)a.Allocate(&stats, size, 1);
347 CHECK(a.PointerIsMine(allocated[i]));
348 }
349 // Deallocate all.
350 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
351 for (int i = 0; i < kNumAllocs; i++) {
352 char *p = allocated[i];
353 CHECK(a.PointerIsMine(p));
354 a.Deallocate(&stats, p);
355 }
356 // Check that non left.
357 CHECK_EQ(a.TotalMemoryUsed(), 0);
358
359 // Allocate some more, also add metadata.
360 for (int i = 0; i < kNumAllocs; i++) {
361 char *x = (char *)a.Allocate(&stats, size, 1);
362 CHECK_GE(a.GetActuallyAllocatedSize(x), size);
363 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
364 *meta = i;
365 allocated[i] = x;
366 }
367 for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
368 char *p = allocated[i % kNumAllocs];
369 CHECK(a.PointerIsMine(p));
370 CHECK(a.PointerIsMine(p + 2000));
371 }
372 CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
373 // Deallocate all in reverse order.
374 for (int i = 0; i < kNumAllocs; i++) {
375 int idx = kNumAllocs - i - 1;
376 char *p = allocated[idx];
377 uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
378 CHECK_EQ(*meta, idx);
379 CHECK(a.PointerIsMine(p));
380 a.Deallocate(&stats, p);
381 }
382 CHECK_EQ(a.TotalMemoryUsed(), 0);
383
384 // Test alignments.
385 uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
386 for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
387 const uptr kNumAlignedAllocs = 100;
388 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
389 uptr size = ((i % 10) + 1) * 4096;
390 char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
391 CHECK_EQ(p, a.GetBlockBegin(p));
392 CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
393 CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
394 CHECK_EQ(0, (uptr)allocated[i] % alignment);
395 p[0] = p[size - 1] = 0;
396 }
397 for (uptr i = 0; i < kNumAlignedAllocs; i++) {
398 a.Deallocate(&stats, allocated[i]);
399 }
400 }
401
402 // Regression test for boundary condition in GetBlockBegin().
403 uptr page_size = GetPageSizeCached();
404 char *p = (char *)a.Allocate(&stats, page_size, 1);
405 CHECK_EQ(p, a.GetBlockBegin(p));
406 CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
407 CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
408 a.Deallocate(&stats, p);
409 }
410 #endif
411
412 template
413 <class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
TestCombinedAllocator()414 void TestCombinedAllocator() {
415 typedef
416 CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
417 Allocator;
418 Allocator *a = new Allocator;
419 a->Init(/* may_return_null */ true);
420
421 AllocatorCache cache;
422 memset(&cache, 0, sizeof(cache));
423 a->InitCache(&cache);
424
425 EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
426 EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
427 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
428 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
429 EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
430
431 // Set to false
432 a->SetMayReturnNull(false);
433 EXPECT_DEATH(a->Allocate(&cache, -1, 1),
434 "allocator is terminating the process");
435
436 const uptr kNumAllocs = 100000;
437 const uptr kNumIter = 10;
438 for (uptr iter = 0; iter < kNumIter; iter++) {
439 std::vector<void*> allocated;
440 for (uptr i = 0; i < kNumAllocs; i++) {
441 uptr size = (i % (1 << 14)) + 1;
442 if ((i % 1024) == 0)
443 size = 1 << (10 + (i % 14));
444 void *x = a->Allocate(&cache, size, 1);
445 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
446 CHECK_EQ(*meta, 0);
447 *meta = size;
448 allocated.push_back(x);
449 }
450
451 random_shuffle(allocated.begin(), allocated.end());
452
453 for (uptr i = 0; i < kNumAllocs; i++) {
454 void *x = allocated[i];
455 uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
456 CHECK_NE(*meta, 0);
457 CHECK(a->PointerIsMine(x));
458 *meta = 0;
459 a->Deallocate(&cache, x);
460 }
461 allocated.clear();
462 a->SwallowCache(&cache);
463 }
464 a->DestroyCache(&cache);
465 a->TestOnlyUnmap();
466 }
467
468 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,CombinedAllocator64)469 TEST(SanitizerCommon, CombinedAllocator64) {
470 TestCombinedAllocator<Allocator64,
471 LargeMmapAllocator<>,
472 SizeClassAllocatorLocalCache<Allocator64> > ();
473 }
474
TEST(SanitizerCommon,CombinedAllocator64Compact)475 TEST(SanitizerCommon, CombinedAllocator64Compact) {
476 TestCombinedAllocator<Allocator64Compact,
477 LargeMmapAllocator<>,
478 SizeClassAllocatorLocalCache<Allocator64Compact> > ();
479 }
480 #endif
481
482 #if !defined(_WIN32) // FIXME: This currently fails on Windows.
TEST(SanitizerCommon,CombinedAllocator32Compact)483 TEST(SanitizerCommon, CombinedAllocator32Compact) {
484 TestCombinedAllocator<Allocator32Compact,
485 LargeMmapAllocator<>,
486 SizeClassAllocatorLocalCache<Allocator32Compact> > ();
487 }
488 #endif
489
490 template <class AllocatorCache>
TestSizeClassAllocatorLocalCache()491 void TestSizeClassAllocatorLocalCache() {
492 AllocatorCache cache;
493 typedef typename AllocatorCache::Allocator Allocator;
494 Allocator *a = new Allocator();
495
496 a->Init();
497 memset(&cache, 0, sizeof(cache));
498 cache.Init(0);
499
500 const uptr kNumAllocs = 10000;
501 const int kNumIter = 100;
502 uptr saved_total = 0;
503 for (int class_id = 1; class_id <= 5; class_id++) {
504 for (int it = 0; it < kNumIter; it++) {
505 void *allocated[kNumAllocs];
506 for (uptr i = 0; i < kNumAllocs; i++) {
507 allocated[i] = cache.Allocate(a, class_id);
508 }
509 for (uptr i = 0; i < kNumAllocs; i++) {
510 cache.Deallocate(a, class_id, allocated[i]);
511 }
512 cache.Drain(a);
513 uptr total_allocated = a->TotalMemoryUsed();
514 if (it)
515 CHECK_EQ(saved_total, total_allocated);
516 saved_total = total_allocated;
517 }
518 }
519
520 a->TestOnlyUnmap();
521 delete a;
522 }
523
524 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64LocalCache)525 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
526 TestSizeClassAllocatorLocalCache<
527 SizeClassAllocatorLocalCache<Allocator64> >();
528 }
529
TEST(SanitizerCommon,SizeClassAllocator64CompactLocalCache)530 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
531 TestSizeClassAllocatorLocalCache<
532 SizeClassAllocatorLocalCache<Allocator64Compact> >();
533 }
534 #endif
535
TEST(SanitizerCommon,SizeClassAllocator32CompactLocalCache)536 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
537 TestSizeClassAllocatorLocalCache<
538 SizeClassAllocatorLocalCache<Allocator32Compact> >();
539 }
540
541 #if SANITIZER_CAN_USE_ALLOCATOR64
542 typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
543 static AllocatorCache static_allocator_cache;
544
AllocatorLeakTestWorker(void * arg)545 void *AllocatorLeakTestWorker(void *arg) {
546 typedef AllocatorCache::Allocator Allocator;
547 Allocator *a = (Allocator*)(arg);
548 static_allocator_cache.Allocate(a, 10);
549 static_allocator_cache.Drain(a);
550 return 0;
551 }
552
TEST(SanitizerCommon,AllocatorLeakTest)553 TEST(SanitizerCommon, AllocatorLeakTest) {
554 typedef AllocatorCache::Allocator Allocator;
555 Allocator a;
556 a.Init();
557 uptr total_used_memory = 0;
558 for (int i = 0; i < 100; i++) {
559 pthread_t t;
560 PTHREAD_CREATE(&t, 0, AllocatorLeakTestWorker, &a);
561 PTHREAD_JOIN(t, 0);
562 if (i == 0)
563 total_used_memory = a.TotalMemoryUsed();
564 EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
565 }
566
567 a.TestOnlyUnmap();
568 }
569
570 // Struct which is allocated to pass info to new threads. The new thread frees
571 // it.
572 struct NewThreadParams {
573 AllocatorCache *thread_cache;
574 AllocatorCache::Allocator *allocator;
575 uptr class_id;
576 };
577
578 // Called in a new thread. Just frees its argument.
DeallocNewThreadWorker(void * arg)579 static void *DeallocNewThreadWorker(void *arg) {
580 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
581 params->thread_cache->Deallocate(params->allocator, params->class_id, params);
582 return NULL;
583 }
584
585 // The allocator cache is supposed to be POD and zero initialized. We should be
586 // able to call Deallocate on a zeroed cache, and it will self-initialize.
TEST(Allocator,AllocatorCacheDeallocNewThread)587 TEST(Allocator, AllocatorCacheDeallocNewThread) {
588 AllocatorCache::Allocator allocator;
589 allocator.Init();
590 AllocatorCache main_cache;
591 AllocatorCache child_cache;
592 memset(&main_cache, 0, sizeof(main_cache));
593 memset(&child_cache, 0, sizeof(child_cache));
594
595 uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
596 NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
597 main_cache.Allocate(&allocator, class_id));
598 params->thread_cache = &child_cache;
599 params->allocator = &allocator;
600 params->class_id = class_id;
601 pthread_t t;
602 PTHREAD_CREATE(&t, 0, DeallocNewThreadWorker, params);
603 PTHREAD_JOIN(t, 0);
604 }
605 #endif
606
TEST(Allocator,Basic)607 TEST(Allocator, Basic) {
608 char *p = (char*)InternalAlloc(10);
609 EXPECT_NE(p, (char*)0);
610 char *p2 = (char*)InternalAlloc(20);
611 EXPECT_NE(p2, (char*)0);
612 EXPECT_NE(p2, p);
613 InternalFree(p);
614 InternalFree(p2);
615 }
616
TEST(Allocator,Stress)617 TEST(Allocator, Stress) {
618 const int kCount = 1000;
619 char *ptrs[kCount];
620 unsigned rnd = 42;
621 for (int i = 0; i < kCount; i++) {
622 uptr sz = my_rand_r(&rnd) % 1000;
623 char *p = (char*)InternalAlloc(sz);
624 EXPECT_NE(p, (char*)0);
625 ptrs[i] = p;
626 }
627 for (int i = 0; i < kCount; i++) {
628 InternalFree(ptrs[i]);
629 }
630 }
631
TEST(Allocator,LargeAlloc)632 TEST(Allocator, LargeAlloc) {
633 void *p = InternalAlloc(10 << 20);
634 InternalFree(p);
635 }
636
TEST(Allocator,ScopedBuffer)637 TEST(Allocator, ScopedBuffer) {
638 const int kSize = 512;
639 {
640 InternalScopedBuffer<int> int_buf(kSize);
641 EXPECT_EQ(sizeof(int) * kSize, int_buf.size()); // NOLINT
642 }
643 InternalScopedBuffer<char> char_buf(kSize);
644 EXPECT_EQ(sizeof(char) * kSize, char_buf.size()); // NOLINT
645 internal_memset(char_buf.data(), 'c', kSize);
646 for (int i = 0; i < kSize; i++) {
647 EXPECT_EQ('c', char_buf[i]);
648 }
649 }
650
IterationTestCallback(uptr chunk,void * arg)651 void IterationTestCallback(uptr chunk, void *arg) {
652 reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
653 }
654
655 template <class Allocator>
TestSizeClassAllocatorIteration()656 void TestSizeClassAllocatorIteration() {
657 Allocator *a = new Allocator;
658 a->Init();
659 SizeClassAllocatorLocalCache<Allocator> cache;
660 memset(&cache, 0, sizeof(cache));
661 cache.Init(0);
662
663 static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
664 50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
665
666 std::vector<void *> allocated;
667
668 // Allocate a bunch of chunks.
669 for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
670 uptr size = sizes[s];
671 if (!a->CanAllocate(size, 1)) continue;
672 // printf("s = %ld\n", size);
673 uptr n_iter = std::max((uptr)6, 80000 / size);
674 // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
675 for (uptr j = 0; j < n_iter; j++) {
676 uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
677 void *x = cache.Allocate(a, class_id0);
678 allocated.push_back(x);
679 }
680 }
681
682 std::set<uptr> reported_chunks;
683 a->ForceLock();
684 a->ForEachChunk(IterationTestCallback, &reported_chunks);
685 a->ForceUnlock();
686
687 for (uptr i = 0; i < allocated.size(); i++) {
688 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
689 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
690 reported_chunks.end());
691 }
692
693 a->TestOnlyUnmap();
694 delete a;
695 }
696
697 #if SANITIZER_CAN_USE_ALLOCATOR64
TEST(SanitizerCommon,SizeClassAllocator64Iteration)698 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
699 TestSizeClassAllocatorIteration<Allocator64>();
700 }
701 #endif
702
TEST(SanitizerCommon,SizeClassAllocator32Iteration)703 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
704 TestSizeClassAllocatorIteration<Allocator32Compact>();
705 }
706
TEST(SanitizerCommon,LargeMmapAllocatorIteration)707 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
708 LargeMmapAllocator<> a;
709 a.Init(/* may_return_null */ false);
710 AllocatorStats stats;
711 stats.Init();
712
713 static const uptr kNumAllocs = 1000;
714 char *allocated[kNumAllocs];
715 static const uptr size = 40;
716 // Allocate some.
717 for (uptr i = 0; i < kNumAllocs; i++)
718 allocated[i] = (char *)a.Allocate(&stats, size, 1);
719
720 std::set<uptr> reported_chunks;
721 a.ForceLock();
722 a.ForEachChunk(IterationTestCallback, &reported_chunks);
723 a.ForceUnlock();
724
725 for (uptr i = 0; i < kNumAllocs; i++) {
726 // Don't use EXPECT_NE. Reporting the first mismatch is enough.
727 ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
728 reported_chunks.end());
729 }
730 for (uptr i = 0; i < kNumAllocs; i++)
731 a.Deallocate(&stats, allocated[i]);
732 }
733
TEST(SanitizerCommon,LargeMmapAllocatorBlockBegin)734 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
735 LargeMmapAllocator<> a;
736 a.Init(/* may_return_null */ false);
737 AllocatorStats stats;
738 stats.Init();
739
740 static const uptr kNumAllocs = 1024;
741 static const uptr kNumExpectedFalseLookups = 10000000;
742 char *allocated[kNumAllocs];
743 static const uptr size = 4096;
744 // Allocate some.
745 for (uptr i = 0; i < kNumAllocs; i++) {
746 allocated[i] = (char *)a.Allocate(&stats, size, 1);
747 }
748
749 a.ForceLock();
750 for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
751 // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
752 char *p1 = allocated[i % kNumAllocs];
753 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
754 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
755 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
756 EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
757 }
758
759 for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
760 void *p = reinterpret_cast<void *>(i % 1024);
761 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
762 p = reinterpret_cast<void *>(~0L - (i % 1024));
763 EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
764 }
765 a.ForceUnlock();
766
767 for (uptr i = 0; i < kNumAllocs; i++)
768 a.Deallocate(&stats, allocated[i]);
769 }
770
771
772 #if SANITIZER_CAN_USE_ALLOCATOR64
773 // Regression test for out-of-memory condition in PopulateFreeList().
TEST(SanitizerCommon,SizeClassAllocator64PopulateFreeListOOM)774 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
775 // In a world where regions are small and chunks are huge...
776 typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
777 typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
778 SpecialSizeClassMap> SpecialAllocator64;
779 const uptr kRegionSize =
780 kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
781 SpecialAllocator64 *a = new SpecialAllocator64;
782 a->Init();
783 SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
784 memset(&cache, 0, sizeof(cache));
785 cache.Init(0);
786
787 // ...one man is on a mission to overflow a region with a series of
788 // successive allocations.
789 const uptr kClassID = 107;
790 const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
791 ASSERT_LT(2 * kAllocationSize, kRegionSize);
792 ASSERT_GT(3 * kAllocationSize, kRegionSize);
793 cache.Allocate(a, kClassID);
794 EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
795 "The process has exhausted");
796 a->TestOnlyUnmap();
797 delete a;
798 }
799 #endif
800
TEST(SanitizerCommon,TwoLevelByteMap)801 TEST(SanitizerCommon, TwoLevelByteMap) {
802 const u64 kSize1 = 1 << 6, kSize2 = 1 << 12;
803 const u64 n = kSize1 * kSize2;
804 TwoLevelByteMap<kSize1, kSize2> m;
805 m.TestOnlyInit();
806 for (u64 i = 0; i < n; i += 7) {
807 m.set(i, (i % 100) + 1);
808 }
809 for (u64 j = 0; j < n; j++) {
810 if (j % 7)
811 EXPECT_EQ(m[j], 0);
812 else
813 EXPECT_EQ(m[j], (j % 100) + 1);
814 }
815
816 m.TestOnlyUnmap();
817 }
818
819
820 typedef TwoLevelByteMap<1 << 12, 1 << 13, TestMapUnmapCallback> TestByteMap;
821
822 struct TestByteMapParam {
823 TestByteMap *m;
824 size_t shard;
825 size_t num_shards;
826 };
827
TwoLevelByteMapUserThread(void * param)828 void *TwoLevelByteMapUserThread(void *param) {
829 TestByteMapParam *p = (TestByteMapParam*)param;
830 for (size_t i = p->shard; i < p->m->size(); i += p->num_shards) {
831 size_t val = (i % 100) + 1;
832 p->m->set(i, val);
833 EXPECT_EQ((*p->m)[i], val);
834 }
835 return 0;
836 }
837
TEST(SanitizerCommon,ThreadedTwoLevelByteMap)838 TEST(SanitizerCommon, ThreadedTwoLevelByteMap) {
839 TestByteMap m;
840 m.TestOnlyInit();
841 TestMapUnmapCallback::map_count = 0;
842 TestMapUnmapCallback::unmap_count = 0;
843 static const int kNumThreads = 4;
844 pthread_t t[kNumThreads];
845 TestByteMapParam p[kNumThreads];
846 for (int i = 0; i < kNumThreads; i++) {
847 p[i].m = &m;
848 p[i].shard = i;
849 p[i].num_shards = kNumThreads;
850 PTHREAD_CREATE(&t[i], 0, TwoLevelByteMapUserThread, &p[i]);
851 }
852 for (int i = 0; i < kNumThreads; i++) {
853 PTHREAD_JOIN(t[i], 0);
854 }
855 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
856 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, 0UL);
857 m.TestOnlyUnmap();
858 EXPECT_EQ((uptr)TestMapUnmapCallback::map_count, m.size1());
859 EXPECT_EQ((uptr)TestMapUnmapCallback::unmap_count, m.size1());
860 }
861
862 #endif // #if !SANITIZER_DEBUG
863