// Copyright 2015 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "base/metrics/persistent_memory_allocator.h" #include #include "base/files/file.h" #include "base/files/file_util.h" #include "base/files/memory_mapped_file.h" #include "base/files/scoped_temp_dir.h" #include "base/memory/shared_memory.h" #include "base/metrics/histogram.h" #include "base/rand_util.h" #include "base/strings/safe_sprintf.h" #include "base/synchronization/condition_variable.h" #include "base/synchronization/lock.h" #include "base/threading/simple_thread.h" #include "testing/gmock/include/gmock/gmock.h" namespace { const uint32_t TEST_MEMORY_SIZE = 1 << 20; // 1 MiB const uint32_t TEST_MEMORY_PAGE = 64 << 10; // 64 KiB const uint32_t TEST_ID = 12345; const char TEST_NAME[] = "TestAllocator"; } // namespace namespace base { typedef PersistentMemoryAllocator::Reference Reference; class PersistentMemoryAllocatorTest : public testing::Test { public: // This can't be statically initialized because it's value isn't defined // in the PersistentMemoryAllocator header file. Instead, it's simply set // in the constructor. uint32_t kAllocAlignment; struct TestObject1 { int onething; char oranother; }; struct TestObject2 { int thiis; long that; float andthe; char other; double thing; }; PersistentMemoryAllocatorTest() { kAllocAlignment = GetAllocAlignment(); mem_segment_.reset(new char[TEST_MEMORY_SIZE]); } void SetUp() override { allocator_.reset(); ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE); allocator_.reset(new PersistentMemoryAllocator( mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, TEST_ID, TEST_NAME, false)); allocator_->CreateTrackingHistograms(allocator_->Name()); } void TearDown() override { allocator_.reset(); } unsigned CountIterables() { PersistentMemoryAllocator::Iterator iter(allocator_.get()); uint32_t type; unsigned count = 0; while (iter.GetNext(&type) != 0) { ++count; } return count; } static uint32_t GetAllocAlignment() { return PersistentMemoryAllocator::kAllocAlignment; } protected: std::unique_ptr mem_segment_; std::unique_ptr allocator_; }; TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) { std::string base_name(TEST_NAME); EXPECT_EQ(TEST_ID, allocator_->Id()); EXPECT_TRUE(allocator_->used_histogram_); EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct", allocator_->used_histogram_->histogram_name()); EXPECT_TRUE(allocator_->allocs_histogram_); EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".Allocs", allocator_->allocs_histogram_->histogram_name()); // Get base memory info for later comparison. PersistentMemoryAllocator::MemoryInfo meminfo0; allocator_->GetMemoryInfo(&meminfo0); EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total); EXPECT_GT(meminfo0.total, meminfo0.free); // Validate allocation of test object and make sure it can be referenced // and all metadata looks correct. Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1); EXPECT_NE(0U, block1); EXPECT_NE(nullptr, allocator_->GetAsObject(block1, 1)); EXPECT_EQ(nullptr, allocator_->GetAsObject(block1, 1)); EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1)); EXPECT_GT(sizeof(TestObject1) + kAllocAlignment, allocator_->GetAllocSize(block1)); PersistentMemoryAllocator::MemoryInfo meminfo1; allocator_->GetMemoryInfo(&meminfo1); EXPECT_EQ(meminfo0.total, meminfo1.total); EXPECT_GT(meminfo0.free, meminfo1.free); // Ensure that the test-object can be made iterable. PersistentMemoryAllocator::Iterator iter1a(allocator_.get()); uint32_t type; EXPECT_EQ(0U, iter1a.GetNext(&type)); allocator_->MakeIterable(block1); EXPECT_EQ(block1, iter1a.GetNext(&type)); EXPECT_EQ(1U, type); EXPECT_EQ(0U, iter1a.GetNext(&type)); // Create second test-object and ensure everything is good and it cannot // be confused with test-object of another type. Reference block2 = allocator_->Allocate(sizeof(TestObject2), 2); EXPECT_NE(0U, block2); EXPECT_NE(nullptr, allocator_->GetAsObject(block2, 2)); EXPECT_EQ(nullptr, allocator_->GetAsObject(block2, 1)); EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2)); EXPECT_GT(sizeof(TestObject2) + kAllocAlignment, allocator_->GetAllocSize(block2)); PersistentMemoryAllocator::MemoryInfo meminfo2; allocator_->GetMemoryInfo(&meminfo2); EXPECT_EQ(meminfo1.total, meminfo2.total); EXPECT_GT(meminfo1.free, meminfo2.free); // Ensure that second test-object can also be made iterable. allocator_->MakeIterable(block2); EXPECT_EQ(block2, iter1a.GetNext(&type)); EXPECT_EQ(2U, type); EXPECT_EQ(0U, iter1a.GetNext(&type)); // Check that iteration can begin after an arbitrary location. PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1); EXPECT_EQ(block2, iter1b.GetNext(&type)); EXPECT_EQ(0U, iter1b.GetNext(&type)); // Ensure nothing has gone noticably wrong. EXPECT_FALSE(allocator_->IsFull()); EXPECT_FALSE(allocator_->IsCorrupt()); // Check the internal histogram record of used memory. allocator_->UpdateTrackingHistograms(); std::unique_ptr used_samples( allocator_->used_histogram_->SnapshotSamples()); EXPECT_TRUE(used_samples); EXPECT_EQ(1, used_samples->TotalCount()); // Check the internal histogram record of allocation requests. std::unique_ptr allocs_samples( allocator_->allocs_histogram_->SnapshotSamples()); EXPECT_TRUE(allocs_samples); EXPECT_EQ(2, allocs_samples->TotalCount()); EXPECT_EQ(0, allocs_samples->GetCount(0)); EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject1))); EXPECT_EQ(1, allocs_samples->GetCount(sizeof(TestObject2))); #if !DCHECK_IS_ON() // DCHECK builds will die at a NOTREACHED(). EXPECT_EQ(0U, allocator_->Allocate(TEST_MEMORY_SIZE + 1, 0)); allocs_samples = allocator_->allocs_histogram_->SnapshotSamples(); EXPECT_EQ(3, allocs_samples->TotalCount()); EXPECT_EQ(1, allocs_samples->GetCount(0)); #endif // Check that an objcet's type can be changed. EXPECT_EQ(2U, allocator_->GetType(block2)); allocator_->ChangeType(block2, 3, 2); EXPECT_EQ(3U, allocator_->GetType(block2)); allocator_->ChangeType(block2, 2, 3); EXPECT_EQ(2U, allocator_->GetType(block2)); // Create second allocator (read/write) using the same memory segment. std::unique_ptr allocator2( new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, 0, "", false)); EXPECT_EQ(TEST_ID, allocator2->Id()); EXPECT_FALSE(allocator2->used_histogram_); EXPECT_FALSE(allocator2->allocs_histogram_); EXPECT_NE(allocator2->allocs_histogram_, allocator_->allocs_histogram_); // Ensure that iteration and access through second allocator works. PersistentMemoryAllocator::Iterator iter2(allocator2.get()); EXPECT_EQ(block1, iter2.GetNext(&type)); EXPECT_EQ(block2, iter2.GetNext(&type)); EXPECT_EQ(0U, iter2.GetNext(&type)); EXPECT_NE(nullptr, allocator2->GetAsObject(block1, 1)); EXPECT_NE(nullptr, allocator2->GetAsObject(block2, 2)); // Create a third allocator (read-only) using the same memory segment. std::unique_ptr allocator3( new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE, 0, "", true)); EXPECT_EQ(TEST_ID, allocator3->Id()); EXPECT_FALSE(allocator3->used_histogram_); EXPECT_FALSE(allocator3->allocs_histogram_); // Ensure that iteration and access through third allocator works. PersistentMemoryAllocator::Iterator iter3(allocator3.get()); EXPECT_EQ(block1, iter3.GetNext(&type)); EXPECT_EQ(block2, iter3.GetNext(&type)); EXPECT_EQ(0U, iter3.GetNext(&type)); EXPECT_NE(nullptr, allocator3->GetAsObject(block1, 1)); EXPECT_NE(nullptr, allocator3->GetAsObject(block2, 2)); // Ensure that GetNextOfType works. PersistentMemoryAllocator::Iterator iter1c(allocator_.get()); EXPECT_EQ(block2, iter1c.GetNextOfType(2)); EXPECT_EQ(0U, iter1c.GetNextOfType(2)); } TEST_F(PersistentMemoryAllocatorTest, PageTest) { // This allocation will go into the first memory page. Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1); EXPECT_LT(0U, block1); EXPECT_GT(TEST_MEMORY_PAGE, block1); // This allocation won't fit in same page as previous block. Reference block2 = allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2); EXPECT_EQ(TEST_MEMORY_PAGE, block2); // This allocation will also require a new page. Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3); EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3); } // A simple thread that takes an allocator and repeatedly allocates random- // sized chunks from it until no more can be done. class AllocatorThread : public SimpleThread { public: AllocatorThread(const std::string& name, void* base, uint32_t size, uint32_t page_size) : SimpleThread(name, Options()), count_(0), iterable_(0), allocator_(base, size, page_size, 0, std::string(), false) {} void Run() override { for (;;) { uint32_t size = RandInt(1, 99); uint32_t type = RandInt(100, 999); Reference block = allocator_.Allocate(size, type); if (!block) break; count_++; if (RandInt(0, 1)) { allocator_.MakeIterable(block); iterable_++; } } } unsigned iterable() { return iterable_; } unsigned count() { return count_; } private: unsigned count_; unsigned iterable_; PersistentMemoryAllocator allocator_; }; // Test parallel allocation/iteration and ensure consistency across all // instances. TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) { void* memory = mem_segment_.get(); AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); t1.Start(); t2.Start(); t3.Start(); t4.Start(); t5.Start(); unsigned last_count = 0; do { unsigned count = CountIterables(); EXPECT_LE(last_count, count); } while (!allocator_->IsCorrupt() && !allocator_->IsFull()); t1.Join(); t2.Join(); t3.Join(); t4.Join(); t5.Join(); EXPECT_FALSE(allocator_->IsCorrupt()); EXPECT_TRUE(allocator_->IsFull()); EXPECT_EQ(CountIterables(), t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() + t5.iterable()); } // A simple thread that counts objects by iterating through an allocator. class CounterThread : public SimpleThread { public: CounterThread(const std::string& name, PersistentMemoryAllocator::Iterator* iterator, Lock* lock, ConditionVariable* condition, bool* wake_up) : SimpleThread(name, Options()), iterator_(iterator), lock_(lock), condition_(condition), count_(0), wake_up_(wake_up) {} void Run() override { // Wait so all threads can start at approximately the same time. // Best performance comes from releasing a single worker which then // releases the next, etc., etc. { AutoLock autolock(*lock_); // Before calling Wait(), make sure that the wake up condition // has not already passed. Also, since spurious signal events // are possible, check the condition in a while loop to make // sure that the wake up condition is met when this thread // returns from the Wait(). // See usage comments in src/base/synchronization/condition_variable.h. while (!*wake_up_) { condition_->Wait(); condition_->Signal(); } } uint32_t type; while (iterator_->GetNext(&type) != 0) { ++count_; } } unsigned count() { return count_; } private: PersistentMemoryAllocator::Iterator* iterator_; Lock* lock_; ConditionVariable* condition_; unsigned count_; bool* wake_up_; DISALLOW_COPY_AND_ASSIGN(CounterThread); }; // Ensure that parallel iteration returns the same number of objects as // single-threaded iteration. TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) { // Fill the memory segment with random allocations. unsigned iterable_count = 0; for (;;) { uint32_t size = RandInt(1, 99); uint32_t type = RandInt(100, 999); Reference block = allocator_->Allocate(size, type); if (!block) break; allocator_->MakeIterable(block); ++iterable_count; } EXPECT_FALSE(allocator_->IsCorrupt()); EXPECT_TRUE(allocator_->IsFull()); EXPECT_EQ(iterable_count, CountIterables()); PersistentMemoryAllocator::Iterator iter(allocator_.get()); Lock lock; ConditionVariable condition(&lock); bool wake_up = false; CounterThread t1("t1", &iter, &lock, &condition, &wake_up); CounterThread t2("t2", &iter, &lock, &condition, &wake_up); CounterThread t3("t3", &iter, &lock, &condition, &wake_up); CounterThread t4("t4", &iter, &lock, &condition, &wake_up); CounterThread t5("t5", &iter, &lock, &condition, &wake_up); t1.Start(); t2.Start(); t3.Start(); t4.Start(); t5.Start(); // Take the lock and set the wake up condition to true. This helps to // avoid a race condition where the Signal() event is called before // all the threads have reached the Wait() and thus never get woken up. { AutoLock autolock(lock); wake_up = true; } // This will release all the waiting threads. condition.Signal(); t1.Join(); t2.Join(); t3.Join(); t4.Join(); t5.Join(); EXPECT_EQ(iterable_count, t1.count() + t2.count() + t3.count() + t4.count() + t5.count()); #if 0 // These ensure that the threads don't run sequentially. It shouldn't be // enabled in general because it could lead to a flaky test if it happens // simply by chance but it is useful during development to ensure that the // test is working correctly. EXPECT_NE(iterable_count, t1.count()); EXPECT_NE(iterable_count, t2.count()); EXPECT_NE(iterable_count, t3.count()); EXPECT_NE(iterable_count, t4.count()); EXPECT_NE(iterable_count, t5.count()); #endif } // This test doesn't verify anything other than it doesn't crash. Its goal // is to find coding errors that aren't otherwise tested for, much like a // "fuzzer" would. // This test is suppsoed to fail on TSAN bot (crbug.com/579867). #if defined(THREAD_SANITIZER) #define MAYBE_CorruptionTest DISABLED_CorruptionTest #else #define MAYBE_CorruptionTest CorruptionTest #endif TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) { char* memory = mem_segment_.get(); AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE); t1.Start(); t2.Start(); t3.Start(); t4.Start(); t5.Start(); do { size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1); char value = RandInt(0, 255); memory[offset] = value; } while (!allocator_->IsCorrupt() && !allocator_->IsFull()); t1.Join(); t2.Join(); t3.Join(); t4.Join(); t5.Join(); CountIterables(); } // Attempt to cause crashes or loops by expressly creating dangerous conditions. TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) { Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1); Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2); Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3); Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3); Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3); allocator_->MakeIterable(block1); allocator_->MakeIterable(block2); allocator_->MakeIterable(block3); allocator_->MakeIterable(block4); allocator_->MakeIterable(block5); EXPECT_EQ(5U, CountIterables()); EXPECT_FALSE(allocator_->IsCorrupt()); // Create loop in iterable list and ensure it doesn't hang. The return value // from CountIterables() in these cases is unpredictable. If there is a // failure, the call will hang and the test killed for taking too long. uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4); EXPECT_EQ(block5, header4[3]); header4[3] = block4; CountIterables(); // loop: 1-2-3-4-4 EXPECT_TRUE(allocator_->IsCorrupt()); // Test where loop goes back to previous block. header4[3] = block3; CountIterables(); // loop: 1-2-3-4-3 // Test where loop goes back to the beginning. header4[3] = block1; CountIterables(); // loop: 1-2-3-4-1 } //----- LocalPersistentMemoryAllocator ----------------------------------------- TEST(LocalPersistentMemoryAllocatorTest, CreationTest) { LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, ""); EXPECT_EQ(42U, allocator.Id()); EXPECT_NE(0U, allocator.Allocate(24, 1)); EXPECT_FALSE(allocator.IsFull()); EXPECT_FALSE(allocator.IsCorrupt()); } //----- SharedPersistentMemoryAllocator ---------------------------------------- TEST(SharedPersistentMemoryAllocatorTest, CreationTest) { SharedMemoryHandle shared_handle_1; SharedMemoryHandle shared_handle_2; PersistentMemoryAllocator::MemoryInfo meminfo1; Reference r123, r456, r789; { std::unique_ptr shmem1(new SharedMemory()); ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE)); SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "", false); EXPECT_FALSE(local.IsReadonly()); r123 = local.Allocate(123, 123); r456 = local.Allocate(456, 456); r789 = local.Allocate(789, 789); local.MakeIterable(r123); local.ChangeType(r456, 654, 456); local.MakeIterable(r789); local.GetMemoryInfo(&meminfo1); EXPECT_FALSE(local.IsFull()); EXPECT_FALSE(local.IsCorrupt()); ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(), &shared_handle_1)); ASSERT_TRUE(local.shared_memory()->ShareToProcess(GetCurrentProcessHandle(), &shared_handle_2)); } // Read-only test. std::unique_ptr shmem2(new SharedMemory(shared_handle_1, /*readonly=*/true)); ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE)); SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true); EXPECT_TRUE(shalloc2.IsReadonly()); EXPECT_EQ(TEST_ID, shalloc2.Id()); EXPECT_FALSE(shalloc2.IsFull()); EXPECT_FALSE(shalloc2.IsCorrupt()); PersistentMemoryAllocator::Iterator iter2(&shalloc2); uint32_t type; EXPECT_EQ(r123, iter2.GetNext(&type)); EXPECT_EQ(r789, iter2.GetNext(&type)); EXPECT_EQ(0U, iter2.GetNext(&type)); EXPECT_EQ(123U, shalloc2.GetType(r123)); EXPECT_EQ(654U, shalloc2.GetType(r456)); EXPECT_EQ(789U, shalloc2.GetType(r789)); PersistentMemoryAllocator::MemoryInfo meminfo2; shalloc2.GetMemoryInfo(&meminfo2); EXPECT_EQ(meminfo1.total, meminfo2.total); EXPECT_EQ(meminfo1.free, meminfo2.free); // Read/write test. std::unique_ptr shmem3(new SharedMemory(shared_handle_2, /*readonly=*/false)); ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE)); SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false); EXPECT_FALSE(shalloc3.IsReadonly()); EXPECT_EQ(TEST_ID, shalloc3.Id()); EXPECT_FALSE(shalloc3.IsFull()); EXPECT_FALSE(shalloc3.IsCorrupt()); PersistentMemoryAllocator::Iterator iter3(&shalloc3); EXPECT_EQ(r123, iter3.GetNext(&type)); EXPECT_EQ(r789, iter3.GetNext(&type)); EXPECT_EQ(0U, iter3.GetNext(&type)); EXPECT_EQ(123U, shalloc3.GetType(r123)); EXPECT_EQ(654U, shalloc3.GetType(r456)); EXPECT_EQ(789U, shalloc3.GetType(r789)); PersistentMemoryAllocator::MemoryInfo meminfo3; shalloc3.GetMemoryInfo(&meminfo3); EXPECT_EQ(meminfo1.total, meminfo3.total); EXPECT_EQ(meminfo1.free, meminfo3.free); // Interconnectivity test. Reference obj = shalloc3.Allocate(42, 42); ASSERT_TRUE(obj); shalloc3.MakeIterable(obj); EXPECT_EQ(obj, iter2.GetNext(&type)); EXPECT_EQ(42U, type); } #if !defined(OS_NACL) //----- FilePersistentMemoryAllocator ------------------------------------------ TEST(FilePersistentMemoryAllocatorTest, CreationTest) { ScopedTempDir temp_dir; ASSERT_TRUE(temp_dir.CreateUniqueTempDir()); FilePath file_path = temp_dir.path().AppendASCII("persistent_memory"); PersistentMemoryAllocator::MemoryInfo meminfo1; Reference r123, r456, r789; { LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, ""); EXPECT_FALSE(local.IsReadonly()); r123 = local.Allocate(123, 123); r456 = local.Allocate(456, 456); r789 = local.Allocate(789, 789); local.MakeIterable(r123); local.ChangeType(r456, 654, 456); local.MakeIterable(r789); local.GetMemoryInfo(&meminfo1); EXPECT_FALSE(local.IsFull()); EXPECT_FALSE(local.IsCorrupt()); File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE); ASSERT_TRUE(writer.IsValid()); writer.Write(0, (const char*)local.data(), local.used()); } std::unique_ptr mmfile(new MemoryMappedFile()); mmfile->Initialize(file_path); EXPECT_TRUE(mmfile->IsValid()); const size_t mmlength = mmfile->length(); EXPECT_GE(meminfo1.total, mmlength); FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", true); EXPECT_TRUE(file.IsReadonly()); EXPECT_EQ(TEST_ID, file.Id()); EXPECT_FALSE(file.IsFull()); EXPECT_FALSE(file.IsCorrupt()); PersistentMemoryAllocator::Iterator iter(&file); uint32_t type; EXPECT_EQ(r123, iter.GetNext(&type)); EXPECT_EQ(r789, iter.GetNext(&type)); EXPECT_EQ(0U, iter.GetNext(&type)); EXPECT_EQ(123U, file.GetType(r123)); EXPECT_EQ(654U, file.GetType(r456)); EXPECT_EQ(789U, file.GetType(r789)); PersistentMemoryAllocator::MemoryInfo meminfo2; file.GetMemoryInfo(&meminfo2); EXPECT_GE(meminfo1.total, meminfo2.total); EXPECT_GE(meminfo1.free, meminfo2.free); EXPECT_EQ(mmlength, meminfo2.total); EXPECT_EQ(0U, meminfo2.free); } TEST(FilePersistentMemoryAllocatorTest, ExtendTest) { ScopedTempDir temp_dir; ASSERT_TRUE(temp_dir.CreateUniqueTempDir()); FilePath file_path = temp_dir.path().AppendASCII("extend_test"); MemoryMappedFile::Region region = {0, 16 << 10}; // 16KiB maximum size. // Start with a small but valid file of persistent data. ASSERT_FALSE(PathExists(file_path)); { LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, ""); local.Allocate(1, 1); local.Allocate(11, 11); File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE); ASSERT_TRUE(writer.IsValid()); writer.Write(0, (const char*)local.data(), local.used()); } ASSERT_TRUE(PathExists(file_path)); int64_t before_size; ASSERT_TRUE(GetFileSize(file_path, &before_size)); // Map it as an extendable read/write file and append to it. { std::unique_ptr mmfile(new MemoryMappedFile()); mmfile->Initialize( File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE), region, MemoryMappedFile::READ_WRITE_EXTEND); FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0, "", false); EXPECT_EQ(static_cast(before_size), allocator.used()); allocator.Allocate(111, 111); EXPECT_LT(static_cast(before_size), allocator.used()); } // Validate that append worked. int64_t after_size; ASSERT_TRUE(GetFileSize(file_path, &after_size)); EXPECT_LT(before_size, after_size); // Verify that it's still an acceptable file. { std::unique_ptr mmfile(new MemoryMappedFile()); mmfile->Initialize( File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE), region, MemoryMappedFile::READ_WRITE_EXTEND); EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true)); EXPECT_TRUE( FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false)); } } TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) { const uint32_t kAllocAlignment = PersistentMemoryAllocatorTest::GetAllocAlignment(); ScopedTempDir temp_dir; ASSERT_TRUE(temp_dir.CreateUniqueTempDir()); LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, ""); local.MakeIterable(local.Allocate(1, 1)); local.MakeIterable(local.Allocate(11, 11)); const size_t minsize = local.used(); std::unique_ptr garbage(new char[minsize]); RandBytes(garbage.get(), minsize); std::unique_ptr mmfile; char filename[100]; for (size_t filesize = minsize; filesize > 0; --filesize) { strings::SafeSPrintf(filename, "memory_%d_A", filesize); FilePath file_path = temp_dir.path().AppendASCII(filename); ASSERT_FALSE(PathExists(file_path)); { File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE); ASSERT_TRUE(writer.IsValid()); writer.Write(0, (const char*)local.data(), filesize); } ASSERT_TRUE(PathExists(file_path)); // Request read/write access for some sizes that are a multple of the // allocator's alignment size. The allocator is strict about file size // being a multiple of its internal alignment when doing read/write access. const bool read_only = (filesize % (2 * kAllocAlignment)) != 0; const uint32_t file_flags = File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE); const MemoryMappedFile::Access map_access = read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE; mmfile.reset(new MemoryMappedFile()); mmfile->Initialize(File(file_path, file_flags), map_access); EXPECT_EQ(filesize, mmfile->length()); if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) { // Make sure construction doesn't crash. It will, however, cause // error messages warning about about a corrupted memory segment. FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "", read_only); // Also make sure that iteration doesn't crash. PersistentMemoryAllocator::Iterator iter(&allocator); uint32_t type_id; Reference ref; while ((ref = iter.GetNext(&type_id)) != 0) { const char* data = allocator.GetAsObject(ref, 0); uint32_t type = allocator.GetType(ref); size_t size = allocator.GetAllocSize(ref); // Ensure compiler can't optimize-out above variables. (void)data; (void)type; (void)size; } // Ensure that short files are detected as corrupt and full files are not. EXPECT_EQ(filesize != minsize, allocator.IsCorrupt()); } else { // For filesize >= minsize, the file must be acceptable. This // else clause (file-not-acceptable) should be reached only if // filesize < minsize. EXPECT_LT(filesize, minsize); } strings::SafeSPrintf(filename, "memory_%d_B", filesize); file_path = temp_dir.path().AppendASCII(filename); ASSERT_FALSE(PathExists(file_path)); { File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE); ASSERT_TRUE(writer.IsValid()); writer.Write(0, (const char*)garbage.get(), filesize); } ASSERT_TRUE(PathExists(file_path)); mmfile.reset(new MemoryMappedFile()); mmfile->Initialize(File(file_path, file_flags), map_access); EXPECT_EQ(filesize, mmfile->length()); if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) { // Make sure construction doesn't crash. It will, however, cause // error messages warning about about a corrupted memory segment. FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "", read_only); EXPECT_TRUE(allocator.IsCorrupt()); // Garbage data so it should be. } else { // For filesize >= minsize, the file must be acceptable. This // else clause (file-not-acceptable) should be reached only if // filesize < minsize. EXPECT_GT(minsize, filesize); } } } #endif // !defined(OS_NACL) } // namespace base