1 //===-- secondary_test.cpp --------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "tests/scudo_unit_test.h"
10
11 #include "allocator_config.h"
12 #include "secondary.h"
13
14 #include <stdio.h>
15
16 #include <condition_variable>
17 #include <mutex>
18 #include <random>
19 #include <thread>
20 #include <vector>
21
testSecondaryBasic(void)22 template <typename Config> static void testSecondaryBasic(void) {
23 using SecondaryT = scudo::MapAllocator<Config>;
24
25 scudo::GlobalStats S;
26 S.init();
27 std::unique_ptr<SecondaryT> L(new SecondaryT);
28 L->init(&S);
29 const scudo::uptr Size = 1U << 16;
30 void *P = L->allocate(scudo::Options{}, Size);
31 EXPECT_NE(P, nullptr);
32 memset(P, 'A', Size);
33 EXPECT_GE(SecondaryT::getBlockSize(P), Size);
34 L->deallocate(scudo::Options{}, P);
35 // If the Secondary can't cache that pointer, it will be unmapped.
36 if (!L->canCache(Size))
37 EXPECT_DEATH(memset(P, 'A', Size), "");
38
39 const scudo::uptr Align = 1U << 16;
40 P = L->allocate(scudo::Options{}, Size + Align, Align);
41 EXPECT_NE(P, nullptr);
42 void *AlignedP = reinterpret_cast<void *>(
43 scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
44 memset(AlignedP, 'A', Size);
45 L->deallocate(scudo::Options{}, P);
46
47 std::vector<void *> V;
48 for (scudo::uptr I = 0; I < 32U; I++)
49 V.push_back(L->allocate(scudo::Options{}, Size));
50 std::shuffle(V.begin(), V.end(), std::mt19937(std::random_device()()));
51 while (!V.empty()) {
52 L->deallocate(scudo::Options{}, V.back());
53 V.pop_back();
54 }
55 scudo::ScopedString Str(1024);
56 L->getStats(&Str);
57 Str.output();
58 }
59
60 struct NoCacheConfig {
61 typedef scudo::MapAllocatorNoCache SecondaryCache;
62 static const bool MaySupportMemoryTagging = false;
63 };
64
65 struct TestConfig {
66 typedef scudo::MapAllocatorCache<TestConfig> SecondaryCache;
67 static const bool MaySupportMemoryTagging = false;
68 static const scudo::u32 SecondaryCacheEntriesArraySize = 128U;
69 static const scudo::u32 SecondaryCacheQuarantineSize = 0U;
70 static const scudo::u32 SecondaryCacheDefaultMaxEntriesCount = 64U;
71 static const scudo::uptr SecondaryCacheDefaultMaxEntrySize = 1UL << 20;
72 static const scudo::s32 SecondaryCacheMinReleaseToOsIntervalMs = INT32_MIN;
73 static const scudo::s32 SecondaryCacheMaxReleaseToOsIntervalMs = INT32_MAX;
74 };
75
TEST(ScudoSecondaryTest,SecondaryBasic)76 TEST(ScudoSecondaryTest, SecondaryBasic) {
77 testSecondaryBasic<NoCacheConfig>();
78 testSecondaryBasic<scudo::DefaultConfig>();
79 testSecondaryBasic<TestConfig>();
80 }
81
82 using LargeAllocator = scudo::MapAllocator<scudo::DefaultConfig>;
83
84 // This exercises a variety of combinations of size and alignment for the
85 // MapAllocator. The size computation done here mimic the ones done by the
86 // combined allocator.
TEST(ScudoSecondaryTest,SecondaryCombinations)87 TEST(ScudoSecondaryTest, SecondaryCombinations) {
88 constexpr scudo::uptr MinAlign = FIRST_32_SECOND_64(8, 16);
89 constexpr scudo::uptr HeaderSize = scudo::roundUpTo(8, MinAlign);
90 std::unique_ptr<LargeAllocator> L(new LargeAllocator);
91 L->init(nullptr);
92 for (scudo::uptr SizeLog = 0; SizeLog <= 20; SizeLog++) {
93 for (scudo::uptr AlignLog = FIRST_32_SECOND_64(3, 4); AlignLog <= 16;
94 AlignLog++) {
95 const scudo::uptr Align = 1U << AlignLog;
96 for (scudo::sptr Delta = -128; Delta <= 128; Delta += 8) {
97 if (static_cast<scudo::sptr>(1U << SizeLog) + Delta <= 0)
98 continue;
99 const scudo::uptr UserSize =
100 scudo::roundUpTo((1U << SizeLog) + Delta, MinAlign);
101 const scudo::uptr Size =
102 HeaderSize + UserSize + (Align > MinAlign ? Align - HeaderSize : 0);
103 void *P = L->allocate(scudo::Options{}, Size, Align);
104 EXPECT_NE(P, nullptr);
105 void *AlignedP = reinterpret_cast<void *>(
106 scudo::roundUpTo(reinterpret_cast<scudo::uptr>(P), Align));
107 memset(AlignedP, 0xff, UserSize);
108 L->deallocate(scudo::Options{}, P);
109 }
110 }
111 }
112 scudo::ScopedString Str(1024);
113 L->getStats(&Str);
114 Str.output();
115 }
116
TEST(ScudoSecondaryTest,SecondaryIterate)117 TEST(ScudoSecondaryTest, SecondaryIterate) {
118 std::unique_ptr<LargeAllocator> L(new LargeAllocator);
119 L->init(nullptr);
120 std::vector<void *> V;
121 const scudo::uptr PageSize = scudo::getPageSizeCached();
122 for (scudo::uptr I = 0; I < 32U; I++)
123 V.push_back(L->allocate(scudo::Options{}, (std::rand() % 16) * PageSize));
124 auto Lambda = [V](scudo::uptr Block) {
125 EXPECT_NE(std::find(V.begin(), V.end(), reinterpret_cast<void *>(Block)),
126 V.end());
127 };
128 L->disable();
129 L->iterateOverBlocks(Lambda);
130 L->enable();
131 while (!V.empty()) {
132 L->deallocate(scudo::Options{}, V.back());
133 V.pop_back();
134 }
135 scudo::ScopedString Str(1024);
136 L->getStats(&Str);
137 Str.output();
138 }
139
TEST(ScudoSecondaryTest,SecondaryOptions)140 TEST(ScudoSecondaryTest, SecondaryOptions) {
141 std::unique_ptr<LargeAllocator> L(new LargeAllocator);
142 L->init(nullptr);
143 // Attempt to set a maximum number of entries higher than the array size.
144 EXPECT_FALSE(L->setOption(scudo::Option::MaxCacheEntriesCount, 4096U));
145 // A negative number will be cast to a scudo::u32, and fail.
146 EXPECT_FALSE(L->setOption(scudo::Option::MaxCacheEntriesCount, -1));
147 if (L->canCache(0U)) {
148 // Various valid combinations.
149 EXPECT_TRUE(L->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
150 EXPECT_TRUE(L->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
151 EXPECT_TRUE(L->canCache(1UL << 18));
152 EXPECT_TRUE(L->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 17));
153 EXPECT_FALSE(L->canCache(1UL << 18));
154 EXPECT_TRUE(L->canCache(1UL << 16));
155 EXPECT_TRUE(L->setOption(scudo::Option::MaxCacheEntriesCount, 0U));
156 EXPECT_FALSE(L->canCache(1UL << 16));
157 EXPECT_TRUE(L->setOption(scudo::Option::MaxCacheEntriesCount, 4U));
158 EXPECT_TRUE(L->setOption(scudo::Option::MaxCacheEntrySize, 1UL << 20));
159 EXPECT_TRUE(L->canCache(1UL << 16));
160 }
161 }
162
163 static std::mutex Mutex;
164 static std::condition_variable Cv;
165 static bool Ready;
166
performAllocations(LargeAllocator * L)167 static void performAllocations(LargeAllocator *L) {
168 std::vector<void *> V;
169 const scudo::uptr PageSize = scudo::getPageSizeCached();
170 {
171 std::unique_lock<std::mutex> Lock(Mutex);
172 while (!Ready)
173 Cv.wait(Lock);
174 }
175 for (scudo::uptr I = 0; I < 128U; I++) {
176 // Deallocate 75% of the blocks.
177 const bool Deallocate = (rand() & 3) != 0;
178 void *P = L->allocate(scudo::Options{}, (std::rand() % 16) * PageSize);
179 if (Deallocate)
180 L->deallocate(scudo::Options{}, P);
181 else
182 V.push_back(P);
183 }
184 while (!V.empty()) {
185 L->deallocate(scudo::Options{}, V.back());
186 V.pop_back();
187 }
188 }
189
TEST(ScudoSecondaryTest,SecondaryThreadsRace)190 TEST(ScudoSecondaryTest, SecondaryThreadsRace) {
191 Ready = false;
192 std::unique_ptr<LargeAllocator> L(new LargeAllocator);
193 L->init(nullptr, /*ReleaseToOsInterval=*/0);
194 std::thread Threads[16];
195 for (scudo::uptr I = 0; I < ARRAY_SIZE(Threads); I++)
196 Threads[I] = std::thread(performAllocations, L.get());
197 {
198 std::unique_lock<std::mutex> Lock(Mutex);
199 Ready = true;
200 Cv.notify_all();
201 }
202 for (auto &T : Threads)
203 T.join();
204 scudo::ScopedString Str(1024);
205 L->getStats(&Str);
206 Str.output();
207 }
208