1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Simple memory allocation tests.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktMemoryAllocationTests.hpp"
25
26 #include "vktTestCaseUtil.hpp"
27
28 #include "tcuMaybe.hpp"
29 #include "tcuResultCollector.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuPlatform.hpp"
32 #include "tcuCommandLine.hpp"
33
34 #include "vkPlatform.hpp"
35 #include "vkStrUtil.hpp"
36 #include "vkRef.hpp"
37 #include "vkDeviceUtil.hpp"
38 #include "vkQueryUtil.hpp"
39 #include "vkRefUtil.hpp"
40 #include "vkAllocationCallbackUtil.hpp"
41
42 #include "deUniquePtr.hpp"
43 #include "deStringUtil.hpp"
44 #include "deRandom.hpp"
45
46 using tcu::Maybe;
47 using tcu::TestLog;
48
49 using std::string;
50 using std::vector;
51
52 using namespace vk;
53
54 namespace vkt
55 {
56 namespace memory
57 {
58 namespace
59 {
60
61 template<typename T>
roundUpToMultiple(const T & a,const T & b)62 T roundUpToMultiple(const T& a, const T& b)
63 {
64 return b * (a / b + (a % b != 0 ? 1 : 0));
65 }
66
67 enum
68 {
69 // The min max for allocation count is 4096. Use 4000 to take into account
70 // possible memory allocations made by layers etc.
71 MAX_ALLOCATION_COUNT = 4000
72 };
73
74 struct TestConfig
75 {
76 enum Order
77 {
78 ALLOC_FREE,
79 ALLOC_REVERSE_FREE,
80 MIXED_ALLOC_FREE,
81 ORDER_LAST
82 };
83
84 Maybe<VkDeviceSize> memorySize;
85 Maybe<float> memoryPercentage;
86 deUint32 memoryAllocationCount;
87 Order order;
88 bool useDeviceGroups;
89
TestConfigvkt::memory::__anon9eb46d4e0111::TestConfig90 TestConfig (void)
91 : memoryAllocationCount ((deUint32)-1)
92 , order (ORDER_LAST)
93 , useDeviceGroups (false)
94 {
95 }
96 };
97
98 struct TestConfigRandom
99 {
100 const deUint32 seed;
101 const bool useDeviceGroups;
102
TestConfigRandomvkt::memory::__anon9eb46d4e0111::TestConfigRandom103 TestConfigRandom (const deUint32 _seed, const bool _useDeviceGroups)
104 : seed (_seed)
105 , useDeviceGroups (_useDeviceGroups)
106 {
107 }
108 };
109
110 template<typename T>
roundUpToNextMultiple(T value,T multiple)111 T roundUpToNextMultiple (T value, T multiple)
112 {
113 if (value % multiple == 0)
114 return value;
115 else
116 return value + multiple - (value % multiple);
117 }
118
119 class BaseAllocateTestInstance : public TestInstance
120 {
121 public:
BaseAllocateTestInstance(Context & context,bool useDeviceGroups)122 BaseAllocateTestInstance (Context& context, bool useDeviceGroups)
123 : TestInstance (context)
124 , m_useDeviceGroups (useDeviceGroups)
125 , m_subsetAllocationAllowed (false)
126 , m_numPhysDevices (1)
127 , m_memoryProperties (getPhysicalDeviceMemoryProperties(context.getInstanceInterface(), context.getPhysicalDevice()))
128 {
129 if (m_useDeviceGroups)
130 createDeviceGroup();
131 m_allocFlagsInfo.sType = VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO_KHR;
132 m_allocFlagsInfo.pNext = DE_NULL;
133 m_allocFlagsInfo.flags = VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT;
134 m_allocFlagsInfo.deviceMask = 0;
135 }
136
137 void createDeviceGroup (void);
getDeviceInterface(void)138 const vk::DeviceInterface& getDeviceInterface (void) { return m_useDeviceGroups ? *m_deviceDriver : m_context.getDeviceInterface(); }
getDevice(void)139 vk::VkDevice getDevice (void) { return m_useDeviceGroups ? m_logicalDevice.get() : m_context.getDevice(); }
140
141 protected:
142 bool m_useDeviceGroups;
143 bool m_subsetAllocationAllowed;
144 VkMemoryAllocateFlagsInfo m_allocFlagsInfo;
145 deUint32 m_numPhysDevices;
146 VkPhysicalDeviceMemoryProperties m_memoryProperties;
147
148 private:
149 vk::Move<vk::VkInstance> m_deviceGroupInstance;
150 vk::Move<vk::VkDevice> m_logicalDevice;
151 de::MovePtr<vk::DeviceDriver> m_deviceDriver;
152 };
153
createDeviceGroup(void)154 void BaseAllocateTestInstance::createDeviceGroup (void)
155 {
156 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
157 const deUint32 devGroupIdx = cmdLine.getVKDeviceGroupId() - 1;
158 const deUint32 physDeviceIdx = cmdLine.getVKDeviceId() - 1;
159 const float queuePriority = 1.0f;
160 deUint32 queueFamilyIndex = 0;
161 const std::vector<std::string> requiredExtensions (1, "VK_KHR_device_group_creation");
162 m_deviceGroupInstance = createInstanceWithExtensions(m_context.getPlatformInterface(), m_context.getUsedApiVersion(), requiredExtensions);
163 std::vector<VkPhysicalDeviceGroupProperties> devGroupProperties = enumeratePhysicalDeviceGroups(m_context.getInstanceInterface(), m_deviceGroupInstance.get());
164 m_numPhysDevices = devGroupProperties[devGroupIdx].physicalDeviceCount;
165 m_subsetAllocationAllowed = devGroupProperties[devGroupIdx].subsetAllocation;
166 if (m_numPhysDevices < 2)
167 TCU_THROW(NotSupportedError, "Device group allocation tests not supported with 1 physical device");
168 std::vector<const char*> deviceExtensions;
169
170 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
171 deviceExtensions.push_back("VK_KHR_device_group");
172
173 VkDeviceGroupDeviceCreateInfo deviceGroupInfo =
174 {
175 VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO_KHR, //stype
176 DE_NULL, //pNext
177 devGroupProperties[devGroupIdx].physicalDeviceCount, //physicalDeviceCount
178 devGroupProperties[devGroupIdx].physicalDevices //physicalDevices
179 };
180 VkInstance instance (m_useDeviceGroups ? m_deviceGroupInstance.get() : m_context.getInstance());
181 InstanceDriver instanceDriver (m_context.getPlatformInterface(), instance);
182 const VkPhysicalDeviceFeatures deviceFeatures = getPhysicalDeviceFeatures(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
183
184 const std::vector<VkQueueFamilyProperties> queueProps = getPhysicalDeviceQueueFamilyProperties(instanceDriver, devGroupProperties[devGroupIdx].physicalDevices[physDeviceIdx]);
185 for (size_t queueNdx = 0; queueNdx < queueProps.size(); queueNdx++)
186 {
187 if (queueProps[queueNdx].queueFlags & VK_QUEUE_COMPUTE_BIT)
188 queueFamilyIndex = (deUint32)queueNdx;
189 }
190
191 VkDeviceQueueCreateInfo queueInfo =
192 {
193 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // VkStructureType sType;
194 DE_NULL, // const void* pNext;
195 (VkDeviceQueueCreateFlags)0u, // VkDeviceQueueCreateFlags flags;
196 queueFamilyIndex, // deUint32 queueFamilyIndex;
197 1u, // deUint32 queueCount;
198 &queuePriority // const float* pQueuePriorities;
199 };
200
201 const VkDeviceCreateInfo deviceInfo =
202 {
203 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // VkStructureType sType;
204 m_useDeviceGroups ? &deviceGroupInfo : DE_NULL, // const void* pNext;
205 (VkDeviceCreateFlags)0, // VkDeviceCreateFlags flags;
206 1u , // uint32_t queueCreateInfoCount;
207 &queueInfo, // const VkDeviceQueueCreateInfo* pQueueCreateInfos;
208 0u, // uint32_t enabledLayerCount;
209 DE_NULL, // const char* const* ppEnabledLayerNames;
210 deUint32(deviceExtensions.size()), // uint32_t enabledExtensionCount;
211 deviceExtensions.empty() ? DE_NULL : &deviceExtensions[0], // const char* const* ppEnabledExtensionNames;
212 &deviceFeatures, // const VkPhysicalDeviceFeatures* pEnabledFeatures;
213 };
214 m_logicalDevice = createDevice(m_context.getPlatformInterface(), instance, instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx], &deviceInfo);
215 m_deviceDriver = de::MovePtr<DeviceDriver>(new DeviceDriver(m_context.getPlatformInterface(), instance, *m_logicalDevice));
216 m_memoryProperties = getPhysicalDeviceMemoryProperties(instanceDriver, deviceGroupInfo.pPhysicalDevices[physDeviceIdx]);
217 }
218
219 class AllocateFreeTestInstance : public BaseAllocateTestInstance
220 {
221 public:
AllocateFreeTestInstance(Context & context,const TestConfig config)222 AllocateFreeTestInstance (Context& context, const TestConfig config)
223 : BaseAllocateTestInstance (context, config.useDeviceGroups)
224 , m_config (config)
225 , m_result (m_context.getTestContext().getLog())
226 , m_memoryTypeIndex (0)
227 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
228 {
229 DE_ASSERT(!!m_config.memorySize != !!m_config.memoryPercentage);
230 }
231
232 tcu::TestStatus iterate (void);
233
234 private:
235 const TestConfig m_config;
236 tcu::ResultCollector m_result;
237 deUint32 m_memoryTypeIndex;
238 const PlatformMemoryLimits m_memoryLimits;
239 };
240
241
iterate(void)242 tcu::TestStatus AllocateFreeTestInstance::iterate (void)
243 {
244 TestLog& log = m_context.getTestContext().getLog();
245 const VkDevice device = getDevice();
246 const DeviceInterface& vkd = getDeviceInterface();
247 VkMemoryRequirements memReqs;
248 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
249 VkBufferCreateFlags createFlags = (vk::VkBufferCreateFlagBits)0u;
250 VkBufferUsageFlags usageFlags = vk::VK_BUFFER_USAGE_TRANSFER_SRC_BIT|VK_BUFFER_USAGE_TRANSFER_DST_BIT;
251 VkSharingMode sharingMode = vk::VK_SHARING_MODE_EXCLUSIVE;
252 Move<VkBuffer> buffer;
253
254 if ((m_memoryProperties.memoryTypes[m_memoryTypeIndex].propertyFlags & vk::VK_MEMORY_PROPERTY_PROTECTED_BIT) == vk::VK_MEMORY_PROPERTY_PROTECTED_BIT)
255 {
256 createFlags |= vk::VK_BUFFER_CREATE_PROTECTED_BIT;
257 }
258
259 // Create a minimal buffer first to get the supported memory types
260 VkBufferCreateInfo bufferParams =
261 {
262 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
263 DE_NULL, // const void* pNext;
264 createFlags, // VkBufferCreateFlags flags;
265 1u, // VkDeviceSize size;
266 usageFlags, // VkBufferUsageFlags usage;
267 sharingMode, // VkSharingMode sharingMode;
268 1u, // uint32_t queueFamilyIndexCount;
269 &queueFamilyIndex, // const uint32_t* pQueueFamilyIndices;
270 };
271
272 buffer = createBuffer(vkd, device, &bufferParams);
273 vkd.getBufferMemoryRequirements(device, *buffer, &memReqs);
274
275 DE_ASSERT(m_config.memoryAllocationCount <= MAX_ALLOCATION_COUNT);
276
277 if (m_memoryTypeIndex == 0)
278 {
279 log << TestLog::Message << "Memory allocation count: " << m_config.memoryAllocationCount << TestLog::EndMessage;
280 log << TestLog::Message << "Single allocation size: " << (m_config.memorySize ? de::toString(*m_config.memorySize) : de::toString(100.0f * (*m_config.memoryPercentage)) + " percent of the heap size.") << TestLog::EndMessage;
281
282 if (m_config.order == TestConfig::ALLOC_REVERSE_FREE)
283 log << TestLog::Message << "Memory is freed in reversed order. " << TestLog::EndMessage;
284 else if (m_config.order == TestConfig::ALLOC_FREE)
285 log << TestLog::Message << "Memory is freed in same order as allocated. " << TestLog::EndMessage;
286 else if (m_config.order == TestConfig::MIXED_ALLOC_FREE)
287 log << TestLog::Message << "Memory is freed right after allocation. " << TestLog::EndMessage;
288 else
289 DE_FATAL("Unknown allocation order");
290 }
291
292 try
293 {
294 const VkMemoryType memoryType = m_memoryProperties.memoryTypes[m_memoryTypeIndex];
295 const VkMemoryHeap memoryHeap = m_memoryProperties.memoryHeaps[memoryType.heapIndex];
296
297 const VkDeviceSize allocationSize = (m_config.memorySize ? memReqs.size : (VkDeviceSize)(*m_config.memoryPercentage * (float)memoryHeap.size));
298 const VkDeviceSize roundedUpAllocationSize = roundUpToNextMultiple(allocationSize, m_memoryLimits.deviceMemoryAllocationGranularity);
299 vector<VkDeviceMemory> memoryObjects (m_config.memoryAllocationCount, (VkDeviceMemory)0);
300
301 log << TestLog::Message << "Memory type index: " << m_memoryTypeIndex << TestLog::EndMessage;
302
303 if (memoryType.heapIndex >= m_memoryProperties.memoryHeapCount)
304 m_result.fail("Invalid heap index defined for memory type.");
305
306 {
307 log << TestLog::Message << "Memory type: " << memoryType << TestLog::EndMessage;
308 log << TestLog::Message << "Memory heap: " << memoryHeap << TestLog::EndMessage;
309
310 if (roundedUpAllocationSize * m_config.memoryAllocationCount > memoryHeap.size)
311 TCU_THROW(NotSupportedError, "Memory heap doesn't have enough memory.");
312
313 #if (DE_PTR_SIZE == 4)
314 // For 32-bit binaries we cap the total host visible allocations to 1.5GB to
315 // avoid exhausting CPU virtual address space and throwing a false negative result.
316 if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) &&
317 allocationSize * m_config.memoryAllocationCount * (m_subsetAllocationAllowed ? 1 : m_numPhysDevices) >= 1610612736)
318
319 log << TestLog::Message << " Skipping: Not enough CPU virtual address space for all host visible allocations." << TestLog::EndMessage;
320 else
321 {
322 #else
323 {
324 #endif
325
326 try
327 {
328 const deUint32 totalDeviceMaskCombinations = m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1;
329 for (deUint32 deviceMask = 1; deviceMask <= totalDeviceMaskCombinations; deviceMask++)
330 {
331 // Allocate on all physical devices if subset allocation is not allowed, do only once.
332 if (!m_subsetAllocationAllowed)
333 deviceMask = (1 << m_numPhysDevices) - 1;
334 m_allocFlagsInfo.deviceMask = deviceMask;
335
336 if (m_config.order == TestConfig::ALLOC_FREE || m_config.order == TestConfig::ALLOC_REVERSE_FREE)
337 {
338 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
339 {
340 VkMemoryAllocateInfo alloc =
341 {
342 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
343 m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL, // pNext
344 allocationSize, // allocationSize
345 m_memoryTypeIndex // memoryTypeIndex;
346 };
347
348 VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]));
349
350 TCU_CHECK(!!memoryObjects[ndx]);
351 }
352
353 if (m_config.order == TestConfig::ALLOC_FREE)
354 {
355 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
356 {
357 const VkDeviceMemory mem = memoryObjects[memoryObjects.size() - 1 - ndx];
358
359 vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
360 memoryObjects[memoryObjects.size() - 1 - ndx] = (VkDeviceMemory)0;
361 }
362 }
363 else
364 {
365 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
366 {
367 const VkDeviceMemory mem = memoryObjects[ndx];
368
369 vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
370 memoryObjects[ndx] = (VkDeviceMemory)0;
371 }
372 }
373 }
374 else
375 {
376 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
377 {
378 const VkMemoryAllocateInfo alloc =
379 {
380 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
381 m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL, // pNext
382 allocationSize, // allocationSize
383 m_memoryTypeIndex // memoryTypeIndex;
384 };
385
386 VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &memoryObjects[ndx]));
387 TCU_CHECK(!!memoryObjects[ndx]);
388
389 vkd.freeMemory(device, memoryObjects[ndx], (const VkAllocationCallbacks*)DE_NULL);
390 memoryObjects[ndx] = (VkDeviceMemory)0;
391 }
392 }
393 }
394 }
395 catch (...)
396 {
397 for (size_t ndx = 0; ndx < m_config.memoryAllocationCount; ndx++)
398 {
399 const VkDeviceMemory mem = memoryObjects[ndx];
400
401 if (!!mem)
402 {
403 vkd.freeMemory(device, mem, (const VkAllocationCallbacks*)DE_NULL);
404 memoryObjects[ndx] = (VkDeviceMemory)0;
405 }
406 }
407
408 throw;
409 }
410 }
411 }
412 }
413 catch (const tcu::TestError& error)
414 {
415 m_result.fail(error.getMessage());
416 }
417
418 m_memoryTypeIndex++;
419
420 if (m_memoryTypeIndex < m_memoryProperties.memoryTypeCount)
421 return tcu::TestStatus::incomplete();
422 else
423 return tcu::TestStatus(m_result.getResult(), m_result.getMessage());
424 }
425
426 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
427 {
428 AllocationCallbackRecorder callbackRecorder (getSystemAllocator());
429
430 {
431 // 1 B allocation from memory type 0
432 const VkMemoryAllocateInfo allocInfo =
433 {
434 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
435 DE_NULL,
436 1u,
437 0u,
438 };
439 const Unique<VkDeviceMemory> memory (allocateMemory(vk, device, &allocInfo));
440 AllocationCallbackValidationResults validateRes;
441
442 validateAllocationCallbacks(callbackRecorder, &validateRes);
443
444 TCU_CHECK(validateRes.violations.empty());
445
446 return getLiveSystemAllocationTotal(validateRes)
447 + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
448 }
449 }
450
451 struct MemoryType
452 {
453 deUint32 index;
454 VkMemoryType type;
455 };
456
457 struct MemoryObject
458 {
459 VkDeviceMemory memory;
460 VkDeviceSize size;
461 };
462
463 struct Heap
464 {
465 VkMemoryHeap heap;
466 VkDeviceSize memoryUsage;
467 VkDeviceSize maxMemoryUsage;
468 vector<MemoryType> types;
469 vector<MemoryObject> objects;
470 };
471
472 class RandomAllocFreeTestInstance : public BaseAllocateTestInstance
473 {
474 public:
475 RandomAllocFreeTestInstance (Context& context, TestConfigRandom config);
476 ~RandomAllocFreeTestInstance (void);
477
478 tcu::TestStatus iterate (void);
479
480 private:
481 const size_t m_opCount;
482 const size_t m_allocSysMemSize;
483 const PlatformMemoryLimits m_memoryLimits;
484 const deUint32 m_totalDeviceMaskCombinations;
485
486 deUint32 m_memoryObjectCount;
487 deUint32 m_currentDeviceMask;
488 size_t m_opNdx;
489 de::Random m_rng;
490 vector<Heap> m_heaps;
491 VkDeviceSize m_totalSystemMem;
492 VkDeviceSize m_totalDeviceMem;
493 };
494
495 RandomAllocFreeTestInstance::RandomAllocFreeTestInstance (Context& context, TestConfigRandom config)
496 : BaseAllocateTestInstance (context, config.useDeviceGroups)
497 , m_opCount (128)
498 , m_allocSysMemSize (computeDeviceMemorySystemMemFootprint(getDeviceInterface(), context.getDevice())
499 + sizeof(MemoryObject))
500 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
501 , m_totalDeviceMaskCombinations (m_subsetAllocationAllowed ? (1 << m_numPhysDevices) - 1 : 1)
502 , m_memoryObjectCount (0)
503 , m_currentDeviceMask (m_subsetAllocationAllowed ? 1 : (1 << m_numPhysDevices) - 1)
504 , m_opNdx (0)
505 , m_rng (config.seed)
506 , m_totalSystemMem (0)
507 , m_totalDeviceMem (0)
508 {
509 TCU_CHECK(m_memoryProperties.memoryHeapCount <= 32);
510 TCU_CHECK(m_memoryProperties.memoryTypeCount <= 32);
511
512 m_heaps.resize(m_memoryProperties.memoryHeapCount);
513
514 for (deUint32 heapNdx = 0; heapNdx < m_memoryProperties.memoryHeapCount; heapNdx++)
515 {
516 m_heaps[heapNdx].heap = m_memoryProperties.memoryHeaps[heapNdx];
517 m_heaps[heapNdx].memoryUsage = 0;
518 m_heaps[heapNdx].maxMemoryUsage = m_heaps[heapNdx].heap.size / 8; /* Use at maximum 12.5% of heap */
519
520 m_heaps[heapNdx].objects.reserve(100);
521 }
522
523 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < m_memoryProperties.memoryTypeCount; memoryTypeNdx++)
524 {
525 const MemoryType type =
526 {
527 memoryTypeNdx,
528 m_memoryProperties.memoryTypes[memoryTypeNdx]
529 };
530
531 TCU_CHECK(type.type.heapIndex < m_memoryProperties.memoryHeapCount);
532
533 m_heaps[type.type.heapIndex].types.push_back(type);
534 }
535 }
536
537 RandomAllocFreeTestInstance::~RandomAllocFreeTestInstance (void)
538 {
539 const VkDevice device = getDevice();
540 const DeviceInterface& vkd = getDeviceInterface();
541
542 for (deUint32 heapNdx = 0; heapNdx < (deUint32)m_heaps.size(); heapNdx++)
543 {
544 const Heap& heap = m_heaps[heapNdx];
545
546 for (size_t objectNdx = 0; objectNdx < heap.objects.size(); objectNdx++)
547 {
548 if (!!heap.objects[objectNdx].memory)
549 vkd.freeMemory(device, heap.objects[objectNdx].memory, (const VkAllocationCallbacks*)DE_NULL);
550 }
551 }
552 }
553
554 tcu::TestStatus RandomAllocFreeTestInstance::iterate (void)
555 {
556 const VkDevice device = getDevice();
557 const DeviceInterface& vkd = getDeviceInterface();
558 TestLog& log = m_context.getTestContext().getLog();
559 const bool isUMA = m_memoryLimits.totalDeviceLocalMemory == 0;
560 const VkDeviceSize usedSysMem = isUMA ? (m_totalDeviceMem+m_totalSystemMem) : m_totalSystemMem;
561 const bool canAllocateSys = usedSysMem + m_allocSysMemSize + 1024 < m_memoryLimits.totalSystemMemory; // \note Always leave room for 1 KiB sys mem alloc
562 const bool canAllocateDev = isUMA ? canAllocateSys : (m_totalDeviceMem + 16 < m_memoryLimits.totalDeviceLocalMemory);
563 vector<size_t> nonFullHeaps;
564 vector<size_t> nonEmptyHeaps;
565 bool allocateMore;
566
567 if (m_opNdx == 0)
568 {
569 log << TestLog::Message << "Performing " << m_opCount << " random VkAllocMemory() / VkFreeMemory() calls before freeing all memory." << TestLog::EndMessage;
570 log << TestLog::Message << "Using max 1/8 of the memory in each memory heap." << TestLog::EndMessage;
571 }
572
573 // Sort heaps based on whether allocations or frees are possible
574 for (size_t heapNdx = 0; heapNdx < m_heaps.size(); ++heapNdx)
575 {
576 const bool isDeviceLocal = (m_heaps[heapNdx].heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
577 const bool isHeapFull = m_heaps[heapNdx].memoryUsage >= m_heaps[heapNdx].maxMemoryUsage;
578 const bool isHeapEmpty = m_heaps[heapNdx].memoryUsage == 0;
579
580 if (!isHeapEmpty)
581 nonEmptyHeaps.push_back(heapNdx);
582
583 if (!isHeapFull && ((isUMA && canAllocateSys) ||
584 (!isUMA && isDeviceLocal && canAllocateDev) ||
585 (!isUMA && !isDeviceLocal && canAllocateSys)))
586 nonFullHeaps.push_back(heapNdx);
587 }
588
589 if (m_opNdx >= m_opCount)
590 {
591 if (nonEmptyHeaps.empty())
592 {
593 m_currentDeviceMask++;
594 if (m_currentDeviceMask > m_totalDeviceMaskCombinations)
595 return tcu::TestStatus::pass("Pass");
596 else
597 {
598 m_opNdx = 0;
599 return tcu::TestStatus::incomplete();
600 }
601 }
602 else
603 allocateMore = false;
604 }
605 else if (!nonEmptyHeaps.empty() &&
606 !nonFullHeaps.empty() &&
607 (m_memoryObjectCount < MAX_ALLOCATION_COUNT) &&
608 canAllocateSys)
609 allocateMore = m_rng.getBool(); // Randomize if both operations are doable.
610 else if (nonEmptyHeaps.empty())
611 {
612 DE_ASSERT(canAllocateSys);
613 allocateMore = true; // Allocate more if there are no objects to free.
614 }
615 else if (nonFullHeaps.empty() || !canAllocateSys)
616 allocateMore = false; // Free objects if there is no free space for new objects.
617 else
618 {
619 allocateMore = false;
620 DE_FATAL("Fail");
621 }
622
623 if (allocateMore)
624 {
625 const size_t nonFullHeapNdx = (size_t)(m_rng.getUint32() % (deUint32)nonFullHeaps.size());
626 const size_t heapNdx = nonFullHeaps[nonFullHeapNdx];
627 Heap& heap = m_heaps[heapNdx];
628 const MemoryType& memoryType = m_rng.choose<MemoryType>(heap.types.begin(), heap.types.end());
629 const bool isDeviceLocal = (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
630 const VkDeviceSize maxAllocSize = (isDeviceLocal && !isUMA)
631 ? de::min(heap.maxMemoryUsage - heap.memoryUsage, (VkDeviceSize)m_memoryLimits.totalDeviceLocalMemory - m_totalDeviceMem)
632 : de::min(heap.maxMemoryUsage - heap.memoryUsage, (VkDeviceSize)m_memoryLimits.totalSystemMemory - usedSysMem - m_allocSysMemSize);
633 const VkDeviceSize allocationSize = 1 + (m_rng.getUint64() % maxAllocSize);
634
635 if ((allocationSize > (deUint64)(heap.maxMemoryUsage - heap.memoryUsage)) && (allocationSize != 1))
636 TCU_THROW(InternalError, "Test Error: trying to allocate memory more than the available heap size.");
637
638 const MemoryObject object =
639 {
640 (VkDeviceMemory)0,
641 allocationSize
642 };
643
644 heap.objects.push_back(object);
645
646 m_allocFlagsInfo.deviceMask = m_currentDeviceMask;
647 const VkMemoryAllocateInfo alloc =
648 {
649 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
650 m_useDeviceGroups ? &m_allocFlagsInfo : DE_NULL, // pNext
651 object.size, // allocationSize
652 memoryType.index // memoryTypeIndex;
653 };
654
655 VK_CHECK(vkd.allocateMemory(device, &alloc, (const VkAllocationCallbacks*)DE_NULL, &heap.objects.back().memory));
656 TCU_CHECK(!!heap.objects.back().memory);
657 m_memoryObjectCount++;
658
659 heap.memoryUsage += allocationSize;
660 (isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem) += allocationSize;
661 m_totalSystemMem += m_allocSysMemSize;
662 }
663 else
664 {
665 const size_t nonEmptyHeapNdx = (size_t)(m_rng.getUint32() % (deUint32)nonEmptyHeaps.size());
666 const size_t heapNdx = nonEmptyHeaps[nonEmptyHeapNdx];
667 Heap& heap = m_heaps[heapNdx];
668 const size_t memoryObjectNdx = m_rng.getUint32() % heap.objects.size();
669 MemoryObject& memoryObject = heap.objects[memoryObjectNdx];
670 const bool isDeviceLocal = (heap.heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0;
671
672 vkd.freeMemory(device, memoryObject.memory, (const VkAllocationCallbacks*)DE_NULL);
673 memoryObject.memory = (VkDeviceMemory)0;
674 m_memoryObjectCount--;
675
676 heap.memoryUsage -= memoryObject.size;
677 (isDeviceLocal ? m_totalDeviceMem : m_totalSystemMem) -= memoryObject.size;
678 m_totalSystemMem -= m_allocSysMemSize;
679
680 heap.objects[memoryObjectNdx] = heap.objects.back();
681 heap.objects.pop_back();
682
683 DE_ASSERT(heap.memoryUsage == 0 || !heap.objects.empty());
684 }
685
686 m_opNdx++;
687 return tcu::TestStatus::incomplete();
688 }
689
690
691 } // anonymous
692
createAllocationTestsCommon(tcu::TestContext & testCtx,bool useDeviceGroups)693 tcu::TestCaseGroup* createAllocationTestsCommon (tcu::TestContext& testCtx, bool useDeviceGroups)
694 {
695 const char* name = useDeviceGroups ? "device_group_allocation" : "allocation";
696 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, name, "Memory allocation tests."));
697
698 const VkDeviceSize KiB = 1024;
699 const VkDeviceSize MiB = 1024 * KiB;
700
701 const struct
702 {
703 const char* const str;
704 VkDeviceSize size;
705 } allocationSizes[] =
706 {
707 { "64", 64 },
708 { "128", 128 },
709 { "256", 256 },
710 { "512", 512 },
711 { "1KiB", 1*KiB },
712 { "4KiB", 4*KiB },
713 { "8KiB", 8*KiB },
714 { "1MiB", 1*MiB }
715 };
716
717 const int allocationPercents[] =
718 {
719 1
720 };
721
722 const int allocationCounts[] =
723 {
724 1, 10, 100, 1000, -1
725 };
726
727 const struct
728 {
729 const char* const str;
730 const TestConfig::Order order;
731 } orders[] =
732 {
733 { "forward", TestConfig::ALLOC_FREE },
734 { "reverse", TestConfig::ALLOC_REVERSE_FREE },
735 { "mixed", TestConfig::MIXED_ALLOC_FREE }
736 };
737
738 {
739 de::MovePtr<tcu::TestCaseGroup> basicGroup(new tcu::TestCaseGroup(testCtx, "basic", "Basic memory allocation and free tests"));
740
741 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
742 {
743 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx].size;
744 const char* const allocationSizeName = allocationSizes[allocationSizeNdx].str;
745 de::MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(testCtx, ("size_" + string(allocationSizeName)).c_str(), ("Test different allocation sizes " + de::toString(allocationSize)).c_str()));
746
747 for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
748 {
749 const TestConfig::Order order = orders[orderNdx].order;
750 const char* const orderName = orders[orderNdx].str;
751 const char* const orderDescription = orderName;
752 de::MovePtr<tcu::TestCaseGroup> orderGroup (new tcu::TestCaseGroup(testCtx, orderName, orderDescription));
753
754 for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts); allocationCountNdx++)
755 {
756 const int allocationCount = allocationCounts[allocationCountNdx];
757
758 if (allocationCount != -1 && allocationCount * allocationSize > 50 * MiB)
759 continue;
760
761 TestConfig config;
762
763 config.memorySize = allocationSize;
764 config.order = order;
765 config.useDeviceGroups = useDeviceGroups;
766 if (allocationCount == -1)
767 {
768 if (allocationSize < 4096)
769 continue;
770
771 config.memoryAllocationCount = de::min((deUint32)(50 * MiB / allocationSize), (deUint32)MAX_ALLOCATION_COUNT);
772
773 if (config.memoryAllocationCount == 0
774 || config.memoryAllocationCount == 1
775 || config.memoryAllocationCount == 10
776 || config.memoryAllocationCount == 100
777 || config.memoryAllocationCount == 1000)
778 continue;
779 }
780 else
781 config.memoryAllocationCount = allocationCount;
782
783 orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(testCtx, tcu::NODETYPE_SELF_VALIDATE, "count_" + de::toString(config.memoryAllocationCount), "", config));
784 }
785
786 sizeGroup->addChild(orderGroup.release());
787 }
788
789 basicGroup->addChild(sizeGroup.release());
790 }
791
792 for (size_t allocationPercentNdx = 0; allocationPercentNdx < DE_LENGTH_OF_ARRAY(allocationPercents); allocationPercentNdx++)
793 {
794 const int allocationPercent = allocationPercents[allocationPercentNdx];
795 de::MovePtr<tcu::TestCaseGroup> percentGroup (new tcu::TestCaseGroup(testCtx, ("percent_" + de::toString(allocationPercent)).c_str(), ("Test different allocation percents " + de::toString(allocationPercent)).c_str()));
796
797 for (size_t orderNdx = 0; orderNdx < DE_LENGTH_OF_ARRAY(orders); orderNdx++)
798 {
799 const TestConfig::Order order = orders[orderNdx].order;
800 const char* const orderName = orders[orderNdx].str;
801 const char* const orderDescription = orderName;
802 de::MovePtr<tcu::TestCaseGroup> orderGroup (new tcu::TestCaseGroup(testCtx, orderName, orderDescription));
803
804 for (size_t allocationCountNdx = 0; allocationCountNdx < DE_LENGTH_OF_ARRAY(allocationCounts); allocationCountNdx++)
805 {
806 const int allocationCount = allocationCounts[allocationCountNdx];
807
808 if ((allocationCount != -1) && ((float)allocationCount * (float)allocationPercent >= 1.00f / 8.00f))
809 continue;
810
811 TestConfig config;
812
813 config.memoryPercentage = (float)allocationPercent / 100.0f;
814 config.order = order;
815 config.useDeviceGroups = useDeviceGroups;
816
817 if (allocationCount == -1)
818 {
819 config.memoryAllocationCount = de::min((deUint32)((1.00f / 8.00f) / ((float)allocationPercent / 100.0f)), (deUint32)MAX_ALLOCATION_COUNT);
820
821 if (config.memoryAllocationCount == 0
822 || config.memoryAllocationCount == 1
823 || config.memoryAllocationCount == 10
824 || config.memoryAllocationCount == 100
825 || config.memoryAllocationCount == 1000)
826 continue;
827 }
828 else
829 config.memoryAllocationCount = allocationCount;
830
831 orderGroup->addChild(new InstanceFactory1<AllocateFreeTestInstance, TestConfig>(testCtx, tcu::NODETYPE_SELF_VALIDATE, "count_" + de::toString(config.memoryAllocationCount), "", config));
832 }
833
834 percentGroup->addChild(orderGroup.release());
835 }
836
837 basicGroup->addChild(percentGroup.release());
838 }
839
840 group->addChild(basicGroup.release());
841 }
842
843 {
844 const deUint32 caseCount = 100;
845 de::MovePtr<tcu::TestCaseGroup> randomGroup (new tcu::TestCaseGroup(testCtx, "random", "Random memory allocation tests."));
846
847 for (deUint32 caseNdx = 0; caseNdx < caseCount; caseNdx++)
848 {
849 TestConfigRandom config(deInt32Hash(caseNdx ^ 32480), useDeviceGroups);
850
851 randomGroup->addChild(new InstanceFactory1<RandomAllocFreeTestInstance, TestConfigRandom>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(caseNdx), "Random case", config));
852 }
853
854 group->addChild(randomGroup.release());
855 }
856
857 return group.release();
858 }
859
createAllocationTests(tcu::TestContext & testCtx)860 tcu::TestCaseGroup* createAllocationTests (tcu::TestContext& testCtx)
861 {
862 return createAllocationTestsCommon(testCtx, false);
863 }
864
createDeviceGroupAllocationTests(tcu::TestContext & testCtx)865 tcu::TestCaseGroup* createDeviceGroupAllocationTests (tcu::TestContext& testCtx)
866 {
867 return createAllocationTestsCommon(testCtx, true);
868 }
869
870 } // memory
871 } // vkt
872