1 /*-------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Simple memory mapping tests.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktMemoryMappingTests.hpp"
25
26 #include "vktTestCaseUtil.hpp"
27
28 #include "tcuMaybe.hpp"
29 #include "tcuResultCollector.hpp"
30 #include "tcuTestLog.hpp"
31 #include "tcuPlatform.hpp"
32
33 #include "vkDeviceUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkRef.hpp"
37 #include "vkRefUtil.hpp"
38 #include "vkStrUtil.hpp"
39 #include "vkAllocationCallbackUtil.hpp"
40
41 #include "deRandom.hpp"
42 #include "deSharedPtr.hpp"
43 #include "deStringUtil.hpp"
44 #include "deUniquePtr.hpp"
45 #include "deSTLUtil.hpp"
46 #include "deMath.h"
47
48 #include <string>
49 #include <vector>
50 #include <algorithm>
51
52 using tcu::Maybe;
53 using tcu::TestLog;
54
55 using de::SharedPtr;
56
57 using std::string;
58 using std::vector;
59 using std::pair;
60
61 using namespace vk;
62
63 namespace vkt
64 {
65 namespace memory
66 {
67 namespace
68 {
69 template<typename T>
divRoundUp(const T & a,const T & b)70 T divRoundUp (const T& a, const T& b)
71 {
72 return (a / b) + (a % b == 0 ? 0 : 1);
73 }
74
75 template<typename T>
roundDownToMultiple(const T & a,const T & b)76 T roundDownToMultiple (const T& a, const T& b)
77 {
78 return b * (a / b);
79 }
80
81 template<typename T>
roundUpToMultiple(const T & a,const T & b)82 T roundUpToMultiple (const T& a, const T& b)
83 {
84 return b * (a / b + (a % b != 0 ? 1 : 0));
85 }
86
87 enum AllocationKind
88 {
89 ALLOCATION_KIND_SUBALLOCATED = 0,
90 ALLOCATION_KIND_DEDICATED_BUFFER = 1,
91 ALLOCATION_KIND_DEDICATED_IMAGE = 2,
92 ALLOCATION_KIND_LAST
93 };
94
95 // \note Bit vector that guarantees that each value takes only one bit.
96 // std::vector<bool> is often optimized to only take one bit for each bool, but
97 // that is implementation detail and in this case we really need to known how much
98 // memory is used.
99 class BitVector
100 {
101 public:
102 enum
103 {
104 BLOCK_BIT_SIZE = 8 * sizeof(deUint32)
105 };
106
BitVector(size_t size,bool value=false)107 BitVector (size_t size, bool value = false)
108 : m_data(divRoundUp<size_t>(size, (size_t)BLOCK_BIT_SIZE), value ? ~0x0u : 0x0u)
109 {
110 }
111
get(size_t ndx) const112 bool get (size_t ndx) const
113 {
114 return (m_data[ndx / BLOCK_BIT_SIZE] & (0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE))) != 0;
115 }
116
set(size_t ndx,bool value)117 void set (size_t ndx, bool value)
118 {
119 if (value)
120 m_data[ndx / BLOCK_BIT_SIZE] |= 0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE);
121 else
122 m_data[ndx / BLOCK_BIT_SIZE] &= ~(0x1u << (deUint32)(ndx % BLOCK_BIT_SIZE));
123 }
124
setRange(size_t offset,size_t count,bool value)125 void setRange (size_t offset, size_t count, bool value)
126 {
127 size_t ndx = offset;
128
129 for (; (ndx < offset + count) && ((ndx % BLOCK_BIT_SIZE) != 0); ndx++)
130 {
131 DE_ASSERT(ndx >= offset);
132 DE_ASSERT(ndx < offset + count);
133 set(ndx, value);
134 }
135
136 {
137 const size_t endOfFullBlockNdx = roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE);
138
139 if (ndx < endOfFullBlockNdx)
140 {
141 deMemset(&m_data[ndx / BLOCK_BIT_SIZE], (value ? 0xFF : 0x0), (endOfFullBlockNdx - ndx) / 8);
142 ndx = endOfFullBlockNdx;
143 }
144 }
145
146 for (; ndx < offset + count; ndx++)
147 {
148 DE_ASSERT(ndx >= offset);
149 DE_ASSERT(ndx < offset + count);
150 set(ndx, value);
151 }
152 }
153
vectorAnd(const BitVector & other,size_t offset,size_t count)154 void vectorAnd (const BitVector& other, size_t offset, size_t count)
155 {
156 size_t ndx = offset;
157
158 for (; ndx < offset + count && (ndx % BLOCK_BIT_SIZE) != 0; ndx++)
159 {
160 DE_ASSERT(ndx >= offset);
161 DE_ASSERT(ndx < offset + count);
162 set(ndx, other.get(ndx) && get(ndx));
163 }
164
165 for (; ndx < roundDownToMultiple<size_t>(offset + count, BLOCK_BIT_SIZE); ndx += BLOCK_BIT_SIZE)
166 {
167 DE_ASSERT(ndx >= offset);
168 DE_ASSERT(ndx < offset + count);
169 DE_ASSERT(ndx % BLOCK_BIT_SIZE == 0);
170 DE_ASSERT(ndx + BLOCK_BIT_SIZE <= offset + count);
171 m_data[ndx / BLOCK_BIT_SIZE] &= other.m_data[ndx / BLOCK_BIT_SIZE];
172 }
173
174 for (; ndx < offset + count; ndx++)
175 {
176 DE_ASSERT(ndx >= offset);
177 DE_ASSERT(ndx < offset + count);
178 set(ndx, other.get(ndx) && get(ndx));
179 }
180 }
181
182 private:
183 vector<deUint32> m_data;
184 };
185
186 class ReferenceMemory
187 {
188 public:
ReferenceMemory(size_t size,size_t atomSize)189 ReferenceMemory (size_t size, size_t atomSize)
190 : m_atomSize (atomSize)
191 , m_bytes (size, 0xDEu)
192 , m_defined (size, false)
193 , m_flushed (size / atomSize, false)
194 {
195 DE_ASSERT(size % m_atomSize == 0);
196 }
197
write(size_t pos,deUint8 value)198 void write (size_t pos, deUint8 value)
199 {
200 m_bytes[pos] = value;
201 m_defined.set(pos, true);
202 m_flushed.set(pos / m_atomSize, false);
203 }
204
read(size_t pos,deUint8 value)205 bool read (size_t pos, deUint8 value)
206 {
207 const bool isOk = !m_defined.get(pos)
208 || m_bytes[pos] == value;
209
210 m_bytes[pos] = value;
211 m_defined.set(pos, true);
212
213 return isOk;
214 }
215
modifyXor(size_t pos,deUint8 value,deUint8 mask)216 bool modifyXor (size_t pos, deUint8 value, deUint8 mask)
217 {
218 const bool isOk = !m_defined.get(pos)
219 || m_bytes[pos] == value;
220
221 m_bytes[pos] = value ^ mask;
222 m_defined.set(pos, true);
223 m_flushed.set(pos / m_atomSize, false);
224
225 return isOk;
226 }
227
flush(size_t offset,size_t size)228 void flush (size_t offset, size_t size)
229 {
230 DE_ASSERT((offset % m_atomSize) == 0);
231 DE_ASSERT((size % m_atomSize) == 0);
232
233 m_flushed.setRange(offset / m_atomSize, size / m_atomSize, true);
234 }
235
invalidate(size_t offset,size_t size)236 void invalidate (size_t offset, size_t size)
237 {
238 DE_ASSERT((offset % m_atomSize) == 0);
239 DE_ASSERT((size % m_atomSize) == 0);
240
241 if (m_atomSize == 1)
242 {
243 m_defined.vectorAnd(m_flushed, offset, size);
244 }
245 else
246 {
247 for (size_t ndx = 0; ndx < size / m_atomSize; ndx++)
248 {
249 if (!m_flushed.get((offset / m_atomSize) + ndx))
250 m_defined.setRange(offset + ndx * m_atomSize, m_atomSize, false);
251 }
252 }
253 }
254
255
256 private:
257 const size_t m_atomSize;
258 vector<deUint8> m_bytes;
259 BitVector m_defined;
260 BitVector m_flushed;
261 };
262
263 struct MemoryType
264 {
MemoryTypevkt::memory::__anon2c2930d40111::MemoryType265 MemoryType (deUint32 index_, const VkMemoryType& type_)
266 : index (index_)
267 , type (type_)
268 {
269 }
270
MemoryTypevkt::memory::__anon2c2930d40111::MemoryType271 MemoryType (void)
272 : index (~0u)
273 {
274 }
275
276 deUint32 index;
277 VkMemoryType type;
278 };
279
computeDeviceMemorySystemMemFootprint(const DeviceInterface & vk,VkDevice device)280 size_t computeDeviceMemorySystemMemFootprint (const DeviceInterface& vk, VkDevice device)
281 {
282 AllocationCallbackRecorder callbackRecorder (getSystemAllocator());
283
284 {
285 // 1 B allocation from memory type 0
286 const VkMemoryAllocateInfo allocInfo =
287 {
288 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
289 DE_NULL,
290 1u,
291 0u,
292 };
293 const Unique<VkDeviceMemory> memory (allocateMemory(vk, device, &allocInfo));
294 AllocationCallbackValidationResults validateRes;
295
296 validateAllocationCallbacks(callbackRecorder, &validateRes);
297
298 TCU_CHECK(validateRes.violations.empty());
299
300 return getLiveSystemAllocationTotal(validateRes)
301 + sizeof(void*)*validateRes.liveAllocations.size(); // allocation overhead
302 }
303 }
304
makeImage(const DeviceInterface & vk,VkDevice device,VkDeviceSize size,deUint32 queueFamilyIndex)305 Move<VkImage> makeImage (const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
306 {
307 const VkDeviceSize sizeInPixels = (size + 3u) / 4u;
308 const deUint32 sqrtSize = static_cast<deUint32>(deFloatCeil(deFloatSqrt(static_cast<float>(sizeInPixels))));
309 const deUint32 powerOfTwoSize = deSmallestGreaterOrEquallPowerOfTwoU32(sqrtSize);
310 const VkImageCreateInfo colorImageParams =
311 {
312 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType;
313 DE_NULL, // const void* pNext;
314 0u, // VkImageCreateFlags flags;
315 VK_IMAGE_TYPE_2D, // VkImageType imageType;
316 VK_FORMAT_R8G8B8A8_UINT, // VkFormat format;
317 {
318 powerOfTwoSize,
319 powerOfTwoSize,
320 1u
321 }, // VkExtent3D extent;
322 1u, // deUint32 mipLevels;
323 1u, // deUint32 arraySize;
324 VK_SAMPLE_COUNT_1_BIT, // deUint32 samples;
325 VK_IMAGE_TILING_LINEAR, // VkImageTiling tiling;
326 VK_IMAGE_USAGE_TRANSFER_SRC_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, // VkImageUsageFlags usage;
327 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
328 1u, // deUint32 queueFamilyCount;
329 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
330 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout;
331 };
332
333 return createImage(vk, device, &colorImageParams);
334 }
335
makeBuffer(const DeviceInterface & vk,VkDevice device,VkDeviceSize size,deUint32 queueFamilyIndex)336 Move<VkBuffer> makeBuffer(const DeviceInterface& vk, VkDevice device, VkDeviceSize size, deUint32 queueFamilyIndex)
337 {
338 const VkBufferCreateInfo bufferParams =
339 {
340 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
341 DE_NULL, // const void* pNext;
342 0u, // VkBufferCreateFlags flags;
343 size, // VkDeviceSize size;
344 VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // VkBufferUsageFlags usage;
345 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
346 1u, // deUint32 queueFamilyCount;
347 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices;
348 };
349 return vk::createBuffer(vk, device, &bufferParams, (const VkAllocationCallbacks*)DE_NULL);
350 }
351
getImageMemoryRequirements(const DeviceInterface & vk,VkDevice device,Move<VkImage> & image)352 VkMemoryRequirements getImageMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkImage>& image)
353 {
354 VkImageMemoryRequirementsInfo2 info =
355 {
356 VK_STRUCTURE_TYPE_IMAGE_MEMORY_REQUIREMENTS_INFO_2, // VkStructureType sType
357 DE_NULL, // const void* pNext
358 *image // VkImage image
359 };
360 VkMemoryDedicatedRequirements dedicatedRequirements =
361 {
362 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, // VkStructureType sType
363 DE_NULL, // const void* pNext
364 VK_FALSE, // VkBool32 prefersDedicatedAllocation
365 VK_FALSE // VkBool32 requiresDedicatedAllocation
366 };
367 VkMemoryRequirements2 req2 =
368 {
369 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, // VkStructureType sType
370 &dedicatedRequirements, // void* pNext
371 {0, 0, 0} // VkMemoryRequirements memoryRequirements
372 };
373
374 vk.getImageMemoryRequirements2(device, &info, &req2);
375
376 return req2.memoryRequirements;
377 }
378
getBufferMemoryRequirements(const DeviceInterface & vk,VkDevice device,Move<VkBuffer> & buffer)379 VkMemoryRequirements getBufferMemoryRequirements(const DeviceInterface& vk, VkDevice device, Move<VkBuffer>& buffer)
380 {
381 VkBufferMemoryRequirementsInfo2 info =
382 {
383 VK_STRUCTURE_TYPE_BUFFER_MEMORY_REQUIREMENTS_INFO_2, // VkStructureType sType
384 DE_NULL, // const void* pNext
385 *buffer // VkImage image
386 };
387 VkMemoryDedicatedRequirements dedicatedRequirements =
388 {
389 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_REQUIREMENTS, // VkStructureType sType
390 DE_NULL, // const void* pNext
391 VK_FALSE, // VkBool32 prefersDedicatedAllocation
392 VK_FALSE // VkBool32 requiresDedicatedAllocation
393 };
394 VkMemoryRequirements2 req2 =
395 {
396 VK_STRUCTURE_TYPE_MEMORY_REQUIREMENTS_2, // VkStructureType sType
397 &dedicatedRequirements, // void* pNext
398 {0, 0, 0} // VkMemoryRequirements memoryRequirements
399 };
400
401 vk.getBufferMemoryRequirements2(device, &info, &req2);
402
403 return req2.memoryRequirements;
404 }
405
allocMemory(const DeviceInterface & vk,VkDevice device,VkDeviceSize pAllocInfo_allocationSize,deUint32 pAllocInfo_memoryTypeIndex)406 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex)
407 {
408 const VkMemoryAllocateInfo pAllocInfo =
409 {
410 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
411 DE_NULL,
412 pAllocInfo_allocationSize,
413 pAllocInfo_memoryTypeIndex,
414 };
415 return allocateMemory(vk, device, &pAllocInfo);
416 }
417
allocMemory(const DeviceInterface & vk,VkDevice device,VkDeviceSize pAllocInfo_allocationSize,deUint32 pAllocInfo_memoryTypeIndex,Move<VkImage> & image,Move<VkBuffer> & buffer)418 Move<VkDeviceMemory> allocMemory (const DeviceInterface& vk, VkDevice device, VkDeviceSize pAllocInfo_allocationSize, deUint32 pAllocInfo_memoryTypeIndex, Move<VkImage>& image, Move<VkBuffer>& buffer)
419 {
420 DE_ASSERT((!image) || (!buffer));
421
422 const VkMemoryDedicatedAllocateInfo
423 dedicatedAllocateInfo =
424 {
425 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO_KHR, // VkStructureType sType
426 DE_NULL, // const void* pNext
427 *image, // VkImage image
428 *buffer // VkBuffer buffer
429 };
430
431 const VkMemoryAllocateInfo pAllocInfo =
432 {
433 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
434 !image && !buffer ? DE_NULL : &dedicatedAllocateInfo,
435 pAllocInfo_allocationSize,
436 pAllocInfo_memoryTypeIndex,
437 };
438 return allocateMemory(vk, device, &pAllocInfo);
439 }
440
441 struct MemoryRange
442 {
MemoryRangevkt::memory::__anon2c2930d40111::MemoryRange443 MemoryRange (VkDeviceSize offset_ = ~(VkDeviceSize)0, VkDeviceSize size_ = ~(VkDeviceSize)0)
444 : offset (offset_)
445 , size (size_)
446 {
447 }
448
449 VkDeviceSize offset;
450 VkDeviceSize size;
451 };
452
453 struct TestConfig
454 {
TestConfigvkt::memory::__anon2c2930d40111::TestConfig455 TestConfig (void)
456 : allocationSize (~(VkDeviceSize)0)
457 , allocationKind (ALLOCATION_KIND_SUBALLOCATED)
458 {
459 }
460
461 VkDeviceSize allocationSize;
462 deUint32 seed;
463
464 MemoryRange mapping;
465 vector<MemoryRange> flushMappings;
466 vector<MemoryRange> invalidateMappings;
467 bool remap;
468 AllocationKind allocationKind;
469 };
470
compareAndLogBuffer(TestLog & log,size_t size,const deUint8 * result,const deUint8 * reference)471 bool compareAndLogBuffer (TestLog& log, size_t size, const deUint8* result, const deUint8* reference)
472 {
473 size_t failedBytes = 0;
474 size_t firstFailed = (size_t)-1;
475
476 for (size_t ndx = 0; ndx < size; ndx++)
477 {
478 if (result[ndx] != reference[ndx])
479 {
480 failedBytes++;
481
482 if (firstFailed == (size_t)-1)
483 firstFailed = ndx;
484 }
485 }
486
487 if (failedBytes > 0)
488 {
489 log << TestLog::Message << "Comparison failed. Failed bytes " << failedBytes << ". First failed at offset " << firstFailed << "." << TestLog::EndMessage;
490
491 std::ostringstream expectedValues;
492 std::ostringstream resultValues;
493
494 for (size_t ndx = firstFailed; ndx < firstFailed + 10 && ndx < size; ndx++)
495 {
496 if (ndx != firstFailed)
497 {
498 expectedValues << ", ";
499 resultValues << ", ";
500 }
501
502 expectedValues << reference[ndx];
503 resultValues << result[ndx];
504 }
505
506 if (firstFailed + 10 < size)
507 {
508 expectedValues << "...";
509 resultValues << "...";
510 }
511
512 log << TestLog::Message << "Expected values at offset: " << firstFailed << ", " << expectedValues.str() << TestLog::EndMessage;
513 log << TestLog::Message << "Result values at offset: " << firstFailed << ", " << resultValues.str() << TestLog::EndMessage;
514
515 return false;
516 }
517 else
518 return true;
519 }
520
testMemoryMapping(Context & context,const TestConfig config)521 tcu::TestStatus testMemoryMapping (Context& context, const TestConfig config)
522 {
523 TestLog& log = context.getTestContext().getLog();
524 tcu::ResultCollector result (log);
525 bool atLeastOneTestPerformed = false;
526 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
527 const VkDevice device = context.getDevice();
528 const InstanceInterface& vki = context.getInstanceInterface();
529 const DeviceInterface& vkd = context.getDeviceInterface();
530 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
531 const VkDeviceSize nonCoherentAtomSize = context.getDeviceProperties().limits.nonCoherentAtomSize;
532 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
533
534 if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE
535 || config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
536 {
537 const std::vector<std::string>& extensions = context.getDeviceExtensions();
538 const deBool isSupported = isDeviceExtensionSupported(context.getUsedApiVersion(), extensions, "VK_KHR_dedicated_allocation");
539 if (!isSupported)
540 {
541 TCU_THROW(NotSupportedError, "Not supported");
542 }
543 }
544
545 {
546 const tcu::ScopedLogSection section (log, "TestCaseInfo", "TestCaseInfo");
547
548 log << TestLog::Message << "Seed: " << config.seed << TestLog::EndMessage;
549 log << TestLog::Message << "Allocation size: " << config.allocationSize << TestLog::EndMessage;
550 log << TestLog::Message << "Mapping, offset: " << config.mapping.offset << ", size: " << config.mapping.size << TestLog::EndMessage;
551
552 if (!config.flushMappings.empty())
553 {
554 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
555
556 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
557 log << TestLog::Message << "\tOffset: " << config.flushMappings[ndx].offset << ", Size: " << config.flushMappings[ndx].size << TestLog::EndMessage;
558 }
559
560 if (config.remap)
561 log << TestLog::Message << "Remapping memory between flush and invalidation." << TestLog::EndMessage;
562
563 if (!config.invalidateMappings.empty())
564 {
565 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
566
567 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
568 log << TestLog::Message << "\tOffset: " << config.invalidateMappings[ndx].offset << ", Size: " << config.invalidateMappings[ndx].size << TestLog::EndMessage;
569 }
570 }
571
572 for (deUint32 memoryTypeIndex = 0; memoryTypeIndex < memoryProperties.memoryTypeCount; memoryTypeIndex++)
573 {
574 try
575 {
576 const tcu::ScopedLogSection section (log, "MemoryType" + de::toString(memoryTypeIndex), "MemoryType" + de::toString(memoryTypeIndex));
577 const vk::VkMemoryType& memoryType = memoryProperties.memoryTypes[memoryTypeIndex];
578 const VkMemoryHeap& memoryHeap = memoryProperties.memoryHeaps[memoryType.heapIndex];
579 const VkDeviceSize atomSize = nonCoherentAtomSize;
580
581 VkDeviceSize allocationSize = (config.allocationSize % atomSize == 0) ? config.allocationSize : config.allocationSize + (atomSize - (config.allocationSize % atomSize));
582 vk::VkMemoryRequirements req =
583 {
584 (VkDeviceSize)allocationSize,
585 (VkDeviceSize)0,
586 ~(deUint32)0u
587 };
588 Move<VkImage> image;
589 Move<VkBuffer> buffer;
590
591 if (config.allocationKind == ALLOCATION_KIND_DEDICATED_IMAGE)
592 {
593 image = makeImage(vkd, device, allocationSize, queueFamilyIndex);
594 req = getImageMemoryRequirements(vkd, device, image);
595 }
596 else if (config.allocationKind == ALLOCATION_KIND_DEDICATED_BUFFER)
597 {
598 buffer = makeBuffer(vkd, device, allocationSize, queueFamilyIndex);
599 req = getBufferMemoryRequirements(vkd, device, buffer);
600 }
601 allocationSize = req.size;
602 VkDeviceSize mappingSize = (config.mapping.size % atomSize == 0) ? config.mapping.size : config.mapping.size + (atomSize - (config.mapping.size % atomSize));
603 VkDeviceSize mappingOffset = (config.mapping.offset % atomSize == 0) ? config.mapping.offset : config.mapping.offset - (config.mapping.offset % atomSize);
604 if (config.mapping.size == config.allocationSize && config.mapping.offset == 0u)
605 {
606 mappingSize = allocationSize;
607 }
608
609 log << TestLog::Message << "MemoryType: " << memoryType << TestLog::EndMessage;
610 log << TestLog::Message << "MemoryHeap: " << memoryHeap << TestLog::EndMessage;
611 log << TestLog::Message << "AtomSize: " << atomSize << TestLog::EndMessage;
612 log << TestLog::Message << "AllocationSize: " << allocationSize << TestLog::EndMessage;
613 log << TestLog::Message << "Mapping, offset: " << mappingOffset << ", size: " << mappingSize << TestLog::EndMessage;
614
615 if ((req.memoryTypeBits & (1u << memoryTypeIndex)) == 0)
616 {
617 static const char* const allocationKindName[] =
618 {
619 "suballocation",
620 "dedicated allocation of buffers",
621 "dedicated allocation of images"
622 };
623 log << TestLog::Message << "Memory type does not support " << allocationKindName[static_cast<deUint32>(config.allocationKind)] << '.' << TestLog::EndMessage;
624 continue;
625 }
626
627 if (!config.flushMappings.empty())
628 {
629 log << TestLog::Message << "Invalidating following ranges:" << TestLog::EndMessage;
630
631 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
632 {
633 const VkDeviceSize offset = (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset - (config.flushMappings[ndx].offset % atomSize);
634 const VkDeviceSize size = (config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize));
635 log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
636 }
637 }
638
639 if (!config.invalidateMappings.empty())
640 {
641 log << TestLog::Message << "Flushing following ranges:" << TestLog::EndMessage;
642
643 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
644 {
645 const VkDeviceSize offset = (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset - (config.invalidateMappings[ndx].offset % atomSize);
646 const VkDeviceSize size = (config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize));
647 log << TestLog::Message << "\tOffset: " << offset << ", Size: " << size << TestLog::EndMessage;
648 }
649 }
650
651 if ((memoryType.propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT) == 0)
652 {
653 log << TestLog::Message << "Memory type doesn't support mapping." << TestLog::EndMessage;
654 }
655 else if (memoryHeap.size <= 4 * allocationSize)
656 {
657 log << TestLog::Message << "Memory type's heap is too small." << TestLog::EndMessage;
658 }
659 else
660 {
661 atLeastOneTestPerformed = true;
662 const Unique<VkDeviceMemory> memory (allocMemory(vkd, device, allocationSize, memoryTypeIndex, image, buffer));
663 de::Random rng (config.seed);
664 vector<deUint8> reference ((size_t)(allocationSize));
665 deUint8* mapping = DE_NULL;
666
667 {
668 void* ptr;
669 VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
670 TCU_CHECK(ptr);
671
672 mapping = (deUint8*)ptr;
673 }
674
675 for (VkDeviceSize ndx = 0; ndx < mappingSize; ndx++)
676 {
677 const deUint8 val = rng.getUint8();
678
679 mapping[ndx] = val;
680 reference[(size_t)(mappingOffset + ndx)] = val;
681 }
682
683 if (!config.flushMappings.empty())
684 {
685 vector<VkMappedMemoryRange> ranges;
686
687 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
688 {
689 const VkMappedMemoryRange range =
690 {
691 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
692 DE_NULL,
693
694 *memory,
695 (config.flushMappings[ndx].offset % atomSize == 0) ? config.flushMappings[ndx].offset : config.flushMappings[ndx].offset - (config.flushMappings[ndx].offset % atomSize),
696 (config.flushMappings[ndx].size % atomSize == 0) ? config.flushMappings[ndx].size : config.flushMappings[ndx].size + (atomSize - (config.flushMappings[ndx].size % atomSize)),
697 };
698
699 ranges.push_back(range);
700 }
701
702 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), &ranges[0]));
703 }
704
705 if (config.remap)
706 {
707 void* ptr;
708 vkd.unmapMemory(device, *memory);
709 VK_CHECK(vkd.mapMemory(device, *memory, mappingOffset, mappingSize, 0u, &ptr));
710 TCU_CHECK(ptr);
711
712 mapping = (deUint8*)ptr;
713 }
714
715 if (!config.invalidateMappings.empty())
716 {
717 vector<VkMappedMemoryRange> ranges;
718
719 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
720 {
721 const VkMappedMemoryRange range =
722 {
723 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
724 DE_NULL,
725
726 *memory,
727 (config.invalidateMappings[ndx].offset % atomSize == 0) ? config.invalidateMappings[ndx].offset : config.invalidateMappings[ndx].offset - (config.invalidateMappings[ndx].offset % atomSize),
728 (config.invalidateMappings[ndx].size % atomSize == 0) ? config.invalidateMappings[ndx].size : config.invalidateMappings[ndx].size + (atomSize - (config.invalidateMappings[ndx].size % atomSize)),
729 };
730
731 ranges.push_back(range);
732 }
733
734 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, static_cast<deUint32>(ranges.size()), &ranges[0]));
735 }
736
737 if (!compareAndLogBuffer(log, static_cast<size_t>(mappingSize), mapping, &reference[static_cast<size_t>(mappingOffset)]))
738 result.fail("Unexpected values read from mapped memory.");
739
740 vkd.unmapMemory(device, *memory);
741 }
742 }
743 catch (const tcu::TestError& error)
744 {
745 result.fail(error.getMessage());
746 }
747 }
748
749 if (!atLeastOneTestPerformed)
750 result.addResult(QP_TEST_RESULT_NOT_SUPPORTED, "No suitable memory kind found to perform test.");
751
752 return tcu::TestStatus(result.getResult(), result.getMessage());
753 }
754
755 class MemoryMapping
756 {
757 public:
758 MemoryMapping (const MemoryRange& range,
759 void* ptr,
760 ReferenceMemory& reference);
761
762 void randomRead (de::Random& rng);
763 void randomWrite (de::Random& rng);
764 void randomModify (de::Random& rng);
765
getRange(void) const766 const MemoryRange& getRange (void) const { return m_range; }
767
768 private:
769 MemoryRange m_range;
770 void* m_ptr;
771 ReferenceMemory& m_reference;
772 };
773
MemoryMapping(const MemoryRange & range,void * ptr,ReferenceMemory & reference)774 MemoryMapping::MemoryMapping (const MemoryRange& range,
775 void* ptr,
776 ReferenceMemory& reference)
777 : m_range (range)
778 , m_ptr (ptr)
779 , m_reference (reference)
780 {
781 DE_ASSERT(range.size > 0);
782 }
783
randomRead(de::Random & rng)784 void MemoryMapping::randomRead (de::Random& rng)
785 {
786 const size_t count = (size_t)rng.getInt(0, 100);
787
788 for (size_t ndx = 0; ndx < count; ndx++)
789 {
790 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
791 const deUint8 val = ((deUint8*)m_ptr)[pos];
792
793 TCU_CHECK(m_reference.read((size_t)(m_range.offset + pos), val));
794 }
795 }
796
randomWrite(de::Random & rng)797 void MemoryMapping::randomWrite (de::Random& rng)
798 {
799 const size_t count = (size_t)rng.getInt(0, 100);
800
801 for (size_t ndx = 0; ndx < count; ndx++)
802 {
803 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
804 const deUint8 val = rng.getUint8();
805
806 ((deUint8*)m_ptr)[pos] = val;
807 m_reference.write((size_t)(m_range.offset + pos), val);
808 }
809 }
810
randomModify(de::Random & rng)811 void MemoryMapping::randomModify (de::Random& rng)
812 {
813 const size_t count = (size_t)rng.getInt(0, 100);
814
815 for (size_t ndx = 0; ndx < count; ndx++)
816 {
817 const size_t pos = (size_t)(rng.getUint64() % (deUint64)m_range.size);
818 const deUint8 val = ((deUint8*)m_ptr)[pos];
819 const deUint8 mask = rng.getUint8();
820
821 ((deUint8*)m_ptr)[pos] = val ^ mask;
822 TCU_CHECK(m_reference.modifyXor((size_t)(m_range.offset + pos), val, mask));
823 }
824 }
825
randomSize(de::Random & rng,VkDeviceSize atomSize,VkDeviceSize maxSize)826 VkDeviceSize randomSize (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxSize)
827 {
828 const VkDeviceSize maxSizeInAtoms = maxSize / atomSize;
829
830 DE_ASSERT(maxSizeInAtoms > 0);
831
832 return maxSizeInAtoms > 1
833 ? atomSize * (1 + (VkDeviceSize)(rng.getUint64() % (deUint64)maxSizeInAtoms))
834 : atomSize;
835 }
836
randomOffset(de::Random & rng,VkDeviceSize atomSize,VkDeviceSize maxOffset)837 VkDeviceSize randomOffset (de::Random& rng, VkDeviceSize atomSize, VkDeviceSize maxOffset)
838 {
839 const VkDeviceSize maxOffsetInAtoms = maxOffset / atomSize;
840
841 return maxOffsetInAtoms > 0
842 ? atomSize * (VkDeviceSize)(rng.getUint64() % (deUint64)(maxOffsetInAtoms + 1))
843 : 0;
844 }
845
randomRanges(de::Random & rng,vector<VkMappedMemoryRange> & ranges,size_t count,VkDeviceMemory memory,VkDeviceSize minOffset,VkDeviceSize maxSize,VkDeviceSize atomSize)846 void randomRanges (de::Random& rng, vector<VkMappedMemoryRange>& ranges, size_t count, VkDeviceMemory memory, VkDeviceSize minOffset, VkDeviceSize maxSize, VkDeviceSize atomSize)
847 {
848 ranges.resize(count);
849
850 for (size_t rangeNdx = 0; rangeNdx < count; rangeNdx++)
851 {
852 const VkDeviceSize size = randomSize(rng, atomSize, maxSize);
853 const VkDeviceSize offset = minOffset + randomOffset(rng, atomSize, maxSize - size);
854
855 const VkMappedMemoryRange range =
856 {
857 VK_STRUCTURE_TYPE_MAPPED_MEMORY_RANGE,
858 DE_NULL,
859
860 memory,
861 offset,
862 size
863 };
864 ranges[rangeNdx] = range;
865 }
866 }
867
868 class MemoryObject
869 {
870 public:
871 MemoryObject (const DeviceInterface& vkd,
872 VkDevice device,
873 VkDeviceSize size,
874 deUint32 memoryTypeIndex,
875 VkDeviceSize atomSize,
876 VkDeviceSize memoryUsage,
877 VkDeviceSize referenceMemoryUsage);
878
879 ~MemoryObject (void);
880
881 MemoryMapping* mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
882 void unmap (void);
883
884 void randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
885 void randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
886
getSize(void) const887 VkDeviceSize getSize (void) const { return m_size; }
getMapping(void)888 MemoryMapping* getMapping (void) { return m_mapping; }
889
getMemoryUsage(void) const890 VkDeviceSize getMemoryUsage (void) const { return m_memoryUsage; }
getReferenceMemoryUsage(void) const891 VkDeviceSize getReferenceMemoryUsage (void) const { return m_referenceMemoryUsage; }
892 private:
893 const DeviceInterface& m_vkd;
894 const VkDevice m_device;
895
896 const deUint32 m_memoryTypeIndex;
897 const VkDeviceSize m_size;
898 const VkDeviceSize m_atomSize;
899 const VkDeviceSize m_memoryUsage;
900 const VkDeviceSize m_referenceMemoryUsage;
901
902 Move<VkDeviceMemory> m_memory;
903
904 MemoryMapping* m_mapping;
905 ReferenceMemory m_referenceMemory;
906 };
907
MemoryObject(const DeviceInterface & vkd,VkDevice device,VkDeviceSize size,deUint32 memoryTypeIndex,VkDeviceSize atomSize,VkDeviceSize memoryUsage,VkDeviceSize referenceMemoryUsage)908 MemoryObject::MemoryObject (const DeviceInterface& vkd,
909 VkDevice device,
910 VkDeviceSize size,
911 deUint32 memoryTypeIndex,
912 VkDeviceSize atomSize,
913 VkDeviceSize memoryUsage,
914 VkDeviceSize referenceMemoryUsage)
915 : m_vkd (vkd)
916 , m_device (device)
917 , m_memoryTypeIndex (memoryTypeIndex)
918 , m_size (size)
919 , m_atomSize (atomSize)
920 , m_memoryUsage (memoryUsage)
921 , m_referenceMemoryUsage (referenceMemoryUsage)
922 , m_mapping (DE_NULL)
923 , m_referenceMemory ((size_t)size, (size_t)m_atomSize)
924 {
925 m_memory = allocMemory(m_vkd, m_device, m_size, m_memoryTypeIndex);
926 }
927
~MemoryObject(void)928 MemoryObject::~MemoryObject (void)
929 {
930 delete m_mapping;
931 }
932
mapRandom(const DeviceInterface & vkd,VkDevice device,de::Random & rng)933 MemoryMapping* MemoryObject::mapRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
934 {
935 const VkDeviceSize size = randomSize(rng, m_atomSize, m_size);
936 const VkDeviceSize offset = randomOffset(rng, m_atomSize, m_size - size);
937 void* ptr;
938
939 DE_ASSERT(!m_mapping);
940
941 VK_CHECK(vkd.mapMemory(device, *m_memory, offset, size, 0u, &ptr));
942 TCU_CHECK(ptr);
943 m_mapping = new MemoryMapping(MemoryRange(offset, size), ptr, m_referenceMemory);
944
945 return m_mapping;
946 }
947
unmap(void)948 void MemoryObject::unmap (void)
949 {
950 m_vkd.unmapMemory(m_device, *m_memory);
951
952 delete m_mapping;
953 m_mapping = DE_NULL;
954 }
955
randomFlush(const DeviceInterface & vkd,VkDevice device,de::Random & rng)956 void MemoryObject::randomFlush (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
957 {
958 const size_t rangeCount = (size_t)rng.getInt(1, 10);
959 vector<VkMappedMemoryRange> ranges (rangeCount);
960
961 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
962
963 for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
964 m_referenceMemory.flush((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
965
966 VK_CHECK(vkd.flushMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
967 }
968
randomInvalidate(const DeviceInterface & vkd,VkDevice device,de::Random & rng)969 void MemoryObject::randomInvalidate (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
970 {
971 const size_t rangeCount = (size_t)rng.getInt(1, 10);
972 vector<VkMappedMemoryRange> ranges (rangeCount);
973
974 randomRanges(rng, ranges, rangeCount, *m_memory, m_mapping->getRange().offset, m_mapping->getRange().size, m_atomSize);
975
976 for (size_t rangeNdx = 0; rangeNdx < ranges.size(); rangeNdx++)
977 m_referenceMemory.invalidate((size_t)ranges[rangeNdx].offset, (size_t)ranges[rangeNdx].size);
978
979 VK_CHECK(vkd.invalidateMappedMemoryRanges(device, (deUint32)ranges.size(), ranges.empty() ? DE_NULL : &ranges[0]));
980 }
981
982 enum
983 {
984 MAX_MEMORY_USAGE_DIV = 2, // Use only 1/2 of each memory heap.
985 MAX_MEMORY_ALLOC_DIV = 2, // Do not alloc more than 1/2 of available space.
986 };
987
988 template<typename T>
removeFirstEqual(vector<T> & vec,const T & val)989 void removeFirstEqual (vector<T>& vec, const T& val)
990 {
991 for (size_t ndx = 0; ndx < vec.size(); ndx++)
992 {
993 if (vec[ndx] == val)
994 {
995 vec[ndx] = vec.back();
996 vec.pop_back();
997 return;
998 }
999 }
1000 }
1001
1002 enum MemoryClass
1003 {
1004 MEMORY_CLASS_SYSTEM = 0,
1005 MEMORY_CLASS_DEVICE,
1006
1007 MEMORY_CLASS_LAST
1008 };
1009
1010 // \todo [2016-04-20 pyry] Consider estimating memory fragmentation
1011 class TotalMemoryTracker
1012 {
1013 public:
TotalMemoryTracker(void)1014 TotalMemoryTracker (void)
1015 {
1016 std::fill(DE_ARRAY_BEGIN(m_usage), DE_ARRAY_END(m_usage), 0);
1017 }
1018
allocate(MemoryClass memClass,VkDeviceSize size)1019 void allocate (MemoryClass memClass, VkDeviceSize size)
1020 {
1021 m_usage[memClass] += size;
1022 }
1023
free(MemoryClass memClass,VkDeviceSize size)1024 void free (MemoryClass memClass, VkDeviceSize size)
1025 {
1026 DE_ASSERT(size <= m_usage[memClass]);
1027 m_usage[memClass] -= size;
1028 }
1029
getUsage(MemoryClass memClass) const1030 VkDeviceSize getUsage (MemoryClass memClass) const
1031 {
1032 return m_usage[memClass];
1033 }
1034
getTotalUsage(void) const1035 VkDeviceSize getTotalUsage (void) const
1036 {
1037 VkDeviceSize total = 0;
1038 for (int ndx = 0; ndx < MEMORY_CLASS_LAST; ++ndx)
1039 total += getUsage((MemoryClass)ndx);
1040 return total;
1041 }
1042
1043 private:
1044 VkDeviceSize m_usage[MEMORY_CLASS_LAST];
1045 };
1046
getHostPageSize(void)1047 VkDeviceSize getHostPageSize (void)
1048 {
1049 return 4096;
1050 }
1051
1052 class MemoryHeap
1053 {
1054 public:
MemoryHeap(const VkMemoryHeap & heap,const vector<MemoryType> & memoryTypes,const PlatformMemoryLimits & memoryLimits,const VkDeviceSize nonCoherentAtomSize,TotalMemoryTracker & totalMemTracker)1055 MemoryHeap (const VkMemoryHeap& heap,
1056 const vector<MemoryType>& memoryTypes,
1057 const PlatformMemoryLimits& memoryLimits,
1058 const VkDeviceSize nonCoherentAtomSize,
1059 TotalMemoryTracker& totalMemTracker)
1060 : m_heap (heap)
1061 , m_memoryTypes (memoryTypes)
1062 , m_limits (memoryLimits)
1063 , m_nonCoherentAtomSize (nonCoherentAtomSize)
1064 , m_minAtomSize (nonCoherentAtomSize)
1065 , m_totalMemTracker (totalMemTracker)
1066 , m_usage (0)
1067 {
1068 }
1069
~MemoryHeap(void)1070 ~MemoryHeap (void)
1071 {
1072 for (vector<MemoryObject*>::iterator iter = m_objects.begin(); iter != m_objects.end(); ++iter)
1073 delete *iter;
1074 }
1075
1076 bool full (void) const;
empty(void) const1077 bool empty (void) const
1078 {
1079 return m_usage == 0 && !full();
1080 }
1081
1082 MemoryObject* allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng);
1083
getRandomObject(de::Random & rng) const1084 MemoryObject* getRandomObject (de::Random& rng) const
1085 {
1086 return rng.choose<MemoryObject*>(m_objects.begin(), m_objects.end());
1087 }
1088
free(MemoryObject * object)1089 void free (MemoryObject* object)
1090 {
1091 removeFirstEqual(m_objects, object);
1092 m_usage -= object->getMemoryUsage();
1093 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, object->getReferenceMemoryUsage());
1094 m_totalMemTracker.free(getMemoryClass(), object->getMemoryUsage());
1095 delete object;
1096 }
1097
1098 private:
getMemoryClass(void) const1099 MemoryClass getMemoryClass (void) const
1100 {
1101 if ((m_heap.flags & VK_MEMORY_HEAP_DEVICE_LOCAL_BIT) != 0)
1102 return MEMORY_CLASS_DEVICE;
1103 else
1104 return MEMORY_CLASS_SYSTEM;
1105 }
1106
1107 const VkMemoryHeap m_heap;
1108 const vector<MemoryType> m_memoryTypes;
1109 const PlatformMemoryLimits& m_limits;
1110 const VkDeviceSize m_nonCoherentAtomSize;
1111 const VkDeviceSize m_minAtomSize;
1112 TotalMemoryTracker& m_totalMemTracker;
1113
1114 VkDeviceSize m_usage;
1115 vector<MemoryObject*> m_objects;
1116 };
1117
1118 // Heap is full if there is not enough memory to allocate minimal memory object.
full(void) const1119 bool MemoryHeap::full (void) const
1120 {
1121 DE_ASSERT(m_usage <= m_heap.size/MAX_MEMORY_USAGE_DIV);
1122
1123 const VkDeviceSize availableInHeap = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1124 const bool isUMA = m_limits.totalDeviceLocalMemory == 0;
1125 const MemoryClass memClass = getMemoryClass();
1126 const VkDeviceSize minAllocationSize = de::max(m_minAtomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1127 // Memory required for reference. One byte and one bit for each byte and one bit per each m_atomSize.
1128 const VkDeviceSize minReferenceSize = minAllocationSize
1129 + divRoundUp<VkDeviceSize>(minAllocationSize, 8)
1130 + divRoundUp<VkDeviceSize>(minAllocationSize, m_minAtomSize * 8);
1131
1132 if (isUMA)
1133 {
1134 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1135 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1136
1137 DE_ASSERT(totalUsage <= totalSysMem);
1138
1139 return (minAllocationSize + minReferenceSize) > (totalSysMem - totalUsage)
1140 || minAllocationSize > availableInHeap;
1141 }
1142 else
1143 {
1144 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1145 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1146
1147 const VkDeviceSize totalMemClass = memClass == MEMORY_CLASS_SYSTEM
1148 ? m_limits.totalSystemMemory
1149 : m_limits.totalDeviceLocalMemory;
1150 const VkDeviceSize usedMemClass = m_totalMemTracker.getUsage(memClass);
1151
1152 DE_ASSERT(usedMemClass <= totalMemClass);
1153
1154 return minAllocationSize > availableInHeap
1155 || minAllocationSize > (totalMemClass - usedMemClass)
1156 || minReferenceSize > (totalSysMem - totalUsage);
1157 }
1158 }
1159
allocateRandom(const DeviceInterface & vkd,VkDevice device,de::Random & rng)1160 MemoryObject* MemoryHeap::allocateRandom (const DeviceInterface& vkd, VkDevice device, de::Random& rng)
1161 {
1162 pair<MemoryType, VkDeviceSize> memoryTypeMaxSizePair;
1163
1164 // Pick random memory type
1165 {
1166 vector<pair<MemoryType, VkDeviceSize> > memoryTypes;
1167
1168 const VkDeviceSize availableInHeap = m_heap.size/MAX_MEMORY_USAGE_DIV - m_usage;
1169 const bool isUMA = m_limits.totalDeviceLocalMemory == 0;
1170 const MemoryClass memClass = getMemoryClass();
1171
1172 // Collect memory types that can be allocated and the maximum size of allocation.
1173 // Memory type can be only allocated if minimal memory allocation is less than available memory.
1174 for (size_t memoryTypeNdx = 0; memoryTypeNdx < m_memoryTypes.size(); memoryTypeNdx++)
1175 {
1176 const MemoryType type = m_memoryTypes[memoryTypeNdx];
1177 const VkDeviceSize atomSize = m_nonCoherentAtomSize;
1178 const VkDeviceSize allocationSizeGranularity = de::max(atomSize, memClass == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1179 const VkDeviceSize minAllocationSize = allocationSizeGranularity;
1180 const VkDeviceSize minReferenceSize = minAllocationSize
1181 + divRoundUp<VkDeviceSize>(minAllocationSize, 8)
1182 + divRoundUp<VkDeviceSize>(minAllocationSize, atomSize * 8);
1183
1184 if (isUMA)
1185 {
1186 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1187 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1188 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1189 const VkDeviceSize availableBits = (totalSysMem - totalUsage) * 8;
1190 // availableBits == maxAllocationSizeBits + maxAllocationReferenceSizeBits
1191 // maxAllocationReferenceSizeBits == maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1192 // availableBits == maxAllocationSizeBits + maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1193 // availableBits == 2 * maxAllocationSizeBits + (maxAllocationSizeBits / 8) + (maxAllocationSizeBits / atomSizeBits)
1194 // availableBits == (2 + 1/8 + 1/atomSizeBits) * maxAllocationSizeBits
1195 // 8 * availableBits == (16 + 1 + 8/atomSizeBits) * maxAllocationSizeBits
1196 // atomSizeBits * 8 * availableBits == (17 * atomSizeBits + 8) * maxAllocationSizeBits
1197 // maxAllocationSizeBits == atomSizeBits * 8 * availableBits / (17 * atomSizeBits + 8)
1198 // maxAllocationSizeBytes == maxAllocationSizeBits / 8
1199 // maxAllocationSizeBytes == atomSizeBits * availableBits / (17 * atomSizeBits + 8)
1200 // atomSizeBits = atomSize * 8
1201 // maxAllocationSizeBytes == atomSize * 8 * availableBits / (17 * atomSize * 8 + 8)
1202 // maxAllocationSizeBytes == atomSize * availableBits / (17 * atomSize + 1)
1203 const VkDeviceSize maxAllocationSize = roundDownToMultiple(((atomSize * availableBits) / (17 * atomSize + 1)), allocationSizeGranularity);
1204
1205 DE_ASSERT(totalUsage <= totalSysMem);
1206 DE_ASSERT(maxAllocationSize <= totalSysMem);
1207
1208 if (minAllocationSize + minReferenceSize <= (totalSysMem - totalUsage) && minAllocationSize <= availableInHeap)
1209 {
1210 DE_ASSERT(maxAllocationSize >= minAllocationSize);
1211 memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1212 }
1213 }
1214 else
1215 {
1216 // Max memory size calculation is little tricky since reference memory requires 1/n bits per byte.
1217 const VkDeviceSize totalUsage = m_totalMemTracker.getTotalUsage();
1218 const VkDeviceSize totalSysMem = (VkDeviceSize)m_limits.totalSystemMemory;
1219
1220 const VkDeviceSize totalMemClass = memClass == MEMORY_CLASS_SYSTEM
1221 ? m_limits.totalSystemMemory
1222 : m_limits.totalDeviceLocalMemory;
1223 const VkDeviceSize usedMemClass = m_totalMemTracker.getUsage(memClass);
1224 // availableRefBits = maxRefBits + maxRefBits/8 + maxRefBits/atomSizeBits
1225 // availableRefBits = maxRefBits * (1 + 1/8 + 1/atomSizeBits)
1226 // 8 * availableRefBits = maxRefBits * (8 + 1 + 8/atomSizeBits)
1227 // 8 * atomSizeBits * availableRefBits = maxRefBits * (9 * atomSizeBits + 8)
1228 // maxRefBits = 8 * atomSizeBits * availableRefBits / (9 * atomSizeBits + 8)
1229 // atomSizeBits = atomSize * 8
1230 // maxRefBits = 8 * atomSize * 8 * availableRefBits / (9 * atomSize * 8 + 8)
1231 // maxRefBits = atomSize * 8 * availableRefBits / (9 * atomSize + 1)
1232 // maxRefBytes = atomSize * availableRefBits / (9 * atomSize + 1)
1233 const VkDeviceSize maxAllocationSize = roundDownToMultiple(de::min(totalMemClass - usedMemClass, (atomSize * 8 * (totalSysMem - totalUsage)) / (9 * atomSize + 1)), allocationSizeGranularity);
1234
1235 DE_ASSERT(usedMemClass <= totalMemClass);
1236
1237 if (minAllocationSize <= availableInHeap
1238 && minAllocationSize <= (totalMemClass - usedMemClass)
1239 && minReferenceSize <= (totalSysMem - totalUsage))
1240 {
1241 DE_ASSERT(maxAllocationSize >= minAllocationSize);
1242 memoryTypes.push_back(std::make_pair(type, maxAllocationSize));
1243 }
1244
1245 }
1246 }
1247
1248 memoryTypeMaxSizePair = rng.choose<pair<MemoryType, VkDeviceSize> >(memoryTypes.begin(), memoryTypes.end());
1249 }
1250
1251 const MemoryType type = memoryTypeMaxSizePair.first;
1252 const VkDeviceSize maxAllocationSize = memoryTypeMaxSizePair.second / MAX_MEMORY_ALLOC_DIV;
1253 const VkDeviceSize atomSize = m_nonCoherentAtomSize;
1254 const VkDeviceSize allocationSizeGranularity = de::max(atomSize, getMemoryClass() == MEMORY_CLASS_DEVICE ? m_limits.devicePageSize : getHostPageSize());
1255 const VkDeviceSize size = randomSize(rng, atomSize, maxAllocationSize);
1256 const VkDeviceSize memoryUsage = roundUpToMultiple(size, allocationSizeGranularity);
1257 const VkDeviceSize referenceMemoryUsage = size + divRoundUp<VkDeviceSize>(size, 8) + divRoundUp<VkDeviceSize>(size / atomSize, 8);
1258
1259 DE_ASSERT(size <= maxAllocationSize);
1260
1261 MemoryObject* const object = new MemoryObject(vkd, device, size, type.index, atomSize, memoryUsage, referenceMemoryUsage);
1262
1263 m_usage += memoryUsage;
1264 m_totalMemTracker.allocate(getMemoryClass(), memoryUsage);
1265 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, referenceMemoryUsage);
1266 m_objects.push_back(object);
1267
1268 return object;
1269 }
1270
getMemoryObjectSystemSize(Context & context)1271 size_t getMemoryObjectSystemSize (Context& context)
1272 {
1273 return computeDeviceMemorySystemMemFootprint(context.getDeviceInterface(), context.getDevice())
1274 + sizeof(MemoryObject)
1275 + sizeof(de::SharedPtr<MemoryObject>);
1276 }
1277
getMemoryMappingSystemSize(void)1278 size_t getMemoryMappingSystemSize (void)
1279 {
1280 return sizeof(MemoryMapping) + sizeof(de::SharedPtr<MemoryMapping>);
1281 }
1282
1283 class RandomMemoryMappingInstance : public TestInstance
1284 {
1285 public:
RandomMemoryMappingInstance(Context & context,deUint32 seed)1286 RandomMemoryMappingInstance (Context& context, deUint32 seed)
1287 : TestInstance (context)
1288 , m_memoryObjectSysMemSize (getMemoryObjectSystemSize(context))
1289 , m_memoryMappingSysMemSize (getMemoryMappingSystemSize())
1290 , m_memoryLimits (getMemoryLimits(context.getTestContext().getPlatform().getVulkanPlatform()))
1291 , m_rng (seed)
1292 , m_opNdx (0)
1293 {
1294 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
1295 const InstanceInterface& vki = context.getInstanceInterface();
1296 const VkPhysicalDeviceMemoryProperties memoryProperties = getPhysicalDeviceMemoryProperties(vki, physicalDevice);
1297 const VkDeviceSize nonCoherentAtomSize = context.getDeviceProperties().limits.nonCoherentAtomSize;
1298
1299 // Initialize heaps
1300 {
1301 vector<vector<MemoryType> > memoryTypes (memoryProperties.memoryHeapCount);
1302
1303 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < memoryProperties.memoryTypeCount; memoryTypeNdx++)
1304 {
1305 if (memoryProperties.memoryTypes[memoryTypeNdx].propertyFlags & VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT)
1306 memoryTypes[memoryProperties.memoryTypes[memoryTypeNdx].heapIndex].push_back(MemoryType(memoryTypeNdx, memoryProperties.memoryTypes[memoryTypeNdx]));
1307 }
1308
1309 for (deUint32 heapIndex = 0; heapIndex < memoryProperties.memoryHeapCount; heapIndex++)
1310 {
1311 const VkMemoryHeap heapInfo = memoryProperties.memoryHeaps[heapIndex];
1312
1313 if (!memoryTypes[heapIndex].empty())
1314 {
1315 const de::SharedPtr<MemoryHeap> heap (new MemoryHeap(heapInfo, memoryTypes[heapIndex], m_memoryLimits, nonCoherentAtomSize, m_totalMemTracker));
1316
1317 TCU_CHECK_INTERNAL(!heap->full());
1318
1319 m_memoryHeaps.push_back(heap);
1320 }
1321 }
1322 }
1323 }
1324
~RandomMemoryMappingInstance(void)1325 ~RandomMemoryMappingInstance (void)
1326 {
1327 }
1328
iterate(void)1329 tcu::TestStatus iterate (void)
1330 {
1331 const size_t opCount = 100;
1332 const float memoryOpProbability = 0.5f; // 0.50
1333 const float flushInvalidateProbability = 0.4f; // 0.20
1334 const float mapProbability = 0.50f; // 0.15
1335 const float unmapProbability = 0.25f; // 0.075
1336
1337 const float allocProbability = 0.75f; // Versun free
1338
1339 const VkDevice device = m_context.getDevice();
1340 const DeviceInterface& vkd = m_context.getDeviceInterface();
1341
1342 const VkDeviceSize sysMemUsage = (m_memoryLimits.totalDeviceLocalMemory == 0)
1343 ? m_totalMemTracker.getTotalUsage()
1344 : m_totalMemTracker.getUsage(MEMORY_CLASS_SYSTEM);
1345
1346 if (!m_memoryMappings.empty() && m_rng.getFloat() < memoryOpProbability)
1347 {
1348 // Perform operations on mapped memory
1349 MemoryMapping* const mapping = m_rng.choose<MemoryMapping*>(m_memoryMappings.begin(), m_memoryMappings.end());
1350
1351 enum Op
1352 {
1353 OP_READ = 0,
1354 OP_WRITE,
1355 OP_MODIFY,
1356 OP_LAST
1357 };
1358
1359 const Op op = (Op)(m_rng.getUint32() % OP_LAST);
1360
1361 switch (op)
1362 {
1363 case OP_READ:
1364 mapping->randomRead(m_rng);
1365 break;
1366
1367 case OP_WRITE:
1368 mapping->randomWrite(m_rng);
1369 break;
1370
1371 case OP_MODIFY:
1372 mapping->randomModify(m_rng);
1373 break;
1374
1375 default:
1376 DE_FATAL("Invalid operation");
1377 }
1378 }
1379 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < flushInvalidateProbability)
1380 {
1381 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1382
1383 if (m_rng.getBool())
1384 object->randomFlush(vkd, device, m_rng);
1385 else
1386 object->randomInvalidate(vkd, device, m_rng);
1387 }
1388 else if (!m_mappedMemoryObjects.empty() && m_rng.getFloat() < unmapProbability)
1389 {
1390 // Unmap memory object
1391 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_mappedMemoryObjects.begin(), m_mappedMemoryObjects.end());
1392
1393 // Remove mapping
1394 removeFirstEqual(m_memoryMappings, object->getMapping());
1395
1396 object->unmap();
1397 removeFirstEqual(m_mappedMemoryObjects, object);
1398 m_nonMappedMemoryObjects.push_back(object);
1399
1400 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1401 }
1402 else if (!m_nonMappedMemoryObjects.empty() &&
1403 (m_rng.getFloat() < mapProbability) &&
1404 (sysMemUsage+m_memoryMappingSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory))
1405 {
1406 // Map memory object
1407 MemoryObject* const object = m_rng.choose<MemoryObject*>(m_nonMappedMemoryObjects.begin(), m_nonMappedMemoryObjects.end());
1408 MemoryMapping* mapping = object->mapRandom(vkd, device, m_rng);
1409
1410 m_memoryMappings.push_back(mapping);
1411 m_mappedMemoryObjects.push_back(object);
1412 removeFirstEqual(m_nonMappedMemoryObjects, object);
1413
1414 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryMappingSysMemSize);
1415 }
1416 else
1417 {
1418 // Sort heaps based on capacity (full or not)
1419 vector<MemoryHeap*> nonFullHeaps;
1420 vector<MemoryHeap*> nonEmptyHeaps;
1421
1422 if (sysMemUsage+m_memoryObjectSysMemSize <= (VkDeviceSize)m_memoryLimits.totalSystemMemory)
1423 {
1424 // For the duration of sorting reserve MemoryObject space from system memory
1425 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1426
1427 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1428 heapIter != m_memoryHeaps.end();
1429 ++heapIter)
1430 {
1431 if (!(*heapIter)->full())
1432 nonFullHeaps.push_back(heapIter->get());
1433
1434 if (!(*heapIter)->empty())
1435 nonEmptyHeaps.push_back(heapIter->get());
1436 }
1437
1438 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1439 }
1440 else
1441 {
1442 // Not possible to even allocate MemoryObject from system memory, look for non-empty heaps
1443 for (vector<de::SharedPtr<MemoryHeap> >::const_iterator heapIter = m_memoryHeaps.begin();
1444 heapIter != m_memoryHeaps.end();
1445 ++heapIter)
1446 {
1447 if (!(*heapIter)->empty())
1448 nonEmptyHeaps.push_back(heapIter->get());
1449 }
1450 }
1451
1452 if (!nonFullHeaps.empty() && (nonEmptyHeaps.empty() || m_rng.getFloat() < allocProbability))
1453 {
1454 // Reserve MemoryObject from sys mem first
1455 m_totalMemTracker.allocate(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1456
1457 // Allocate more memory objects
1458 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonFullHeaps.begin(), nonFullHeaps.end());
1459 MemoryObject* const object = heap->allocateRandom(vkd, device, m_rng);
1460
1461 m_nonMappedMemoryObjects.push_back(object);
1462 }
1463 else
1464 {
1465 // Free memory objects
1466 MemoryHeap* const heap = m_rng.choose<MemoryHeap*>(nonEmptyHeaps.begin(), nonEmptyHeaps.end());
1467 MemoryObject* const object = heap->getRandomObject(m_rng);
1468
1469 // Remove mapping
1470 if (object->getMapping())
1471 {
1472 removeFirstEqual(m_memoryMappings, object->getMapping());
1473 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, m_memoryMappingSysMemSize);
1474 }
1475
1476 removeFirstEqual(m_mappedMemoryObjects, object);
1477 removeFirstEqual(m_nonMappedMemoryObjects, object);
1478
1479 heap->free(object);
1480 m_totalMemTracker.free(MEMORY_CLASS_SYSTEM, (VkDeviceSize)m_memoryObjectSysMemSize);
1481 }
1482 }
1483
1484 m_opNdx += 1;
1485 if (m_opNdx == opCount)
1486 return tcu::TestStatus::pass("Pass");
1487 else
1488 return tcu::TestStatus::incomplete();
1489 }
1490
1491 private:
1492 const size_t m_memoryObjectSysMemSize;
1493 const size_t m_memoryMappingSysMemSize;
1494 const PlatformMemoryLimits m_memoryLimits;
1495
1496 de::Random m_rng;
1497 size_t m_opNdx;
1498
1499 TotalMemoryTracker m_totalMemTracker;
1500 vector<de::SharedPtr<MemoryHeap> > m_memoryHeaps;
1501
1502 vector<MemoryObject*> m_mappedMemoryObjects;
1503 vector<MemoryObject*> m_nonMappedMemoryObjects;
1504 vector<MemoryMapping*> m_memoryMappings;
1505 };
1506
1507 enum Op
1508 {
1509 OP_NONE = 0,
1510
1511 OP_FLUSH,
1512 OP_SUB_FLUSH,
1513 OP_SUB_FLUSH_SEPARATE,
1514 OP_SUB_FLUSH_OVERLAPPING,
1515
1516 OP_INVALIDATE,
1517 OP_SUB_INVALIDATE,
1518 OP_SUB_INVALIDATE_SEPARATE,
1519 OP_SUB_INVALIDATE_OVERLAPPING,
1520
1521 OP_REMAP,
1522
1523 OP_LAST
1524 };
1525
subMappedConfig(VkDeviceSize allocationSize,const MemoryRange & mapping,Op op,deUint32 seed,AllocationKind allocationKind)1526 TestConfig subMappedConfig (VkDeviceSize allocationSize,
1527 const MemoryRange& mapping,
1528 Op op,
1529 deUint32 seed,
1530 AllocationKind allocationKind)
1531 {
1532 TestConfig config;
1533
1534 config.allocationSize = allocationSize;
1535 config.seed = seed;
1536 config.mapping = mapping;
1537 config.remap = false;
1538 config.allocationKind = allocationKind;
1539
1540 switch (op)
1541 {
1542 case OP_NONE:
1543 break;
1544
1545 case OP_REMAP:
1546 config.remap = true;
1547 break;
1548
1549 case OP_FLUSH:
1550 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1551 break;
1552
1553 case OP_SUB_FLUSH:
1554 DE_ASSERT(mapping.size / 4 > 0);
1555
1556 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1557 break;
1558
1559 case OP_SUB_FLUSH_SEPARATE:
1560 DE_ASSERT(mapping.size / 2 > 0);
1561
1562 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1563 config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1564
1565 break;
1566
1567 case OP_SUB_FLUSH_OVERLAPPING:
1568 DE_ASSERT((mapping.size / 3) > 0);
1569
1570 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1571 config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1572
1573 break;
1574
1575 case OP_INVALIDATE:
1576 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1577 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset, mapping.size));
1578 break;
1579
1580 case OP_SUB_INVALIDATE:
1581 DE_ASSERT(mapping.size / 4 > 0);
1582
1583 config.flushMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1584 config.invalidateMappings = vector<MemoryRange>(1, MemoryRange(mapping.offset + mapping.size / 4, mapping.size / 2));
1585 break;
1586
1587 case OP_SUB_INVALIDATE_SEPARATE:
1588 DE_ASSERT(mapping.size / 2 > 0);
1589
1590 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1591 config.flushMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1592
1593 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 2, mapping.size - (mapping.size / 2)));
1594 config.invalidateMappings.push_back(MemoryRange(mapping.offset, mapping.size / 2));
1595
1596 break;
1597
1598 case OP_SUB_INVALIDATE_OVERLAPPING:
1599 DE_ASSERT((mapping.size / 3) > 0);
1600
1601 config.flushMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1602 config.flushMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1603
1604 config.invalidateMappings.push_back(MemoryRange(mapping.offset + mapping.size / 3, mapping.size - (mapping.size / 2)));
1605 config.invalidateMappings.push_back(MemoryRange(mapping.offset, (2 * mapping.size) / 3));
1606
1607 break;
1608
1609 default:
1610 DE_FATAL("Unknown Op");
1611 return TestConfig();
1612 }
1613 for (size_t ndx = 0; ndx < config.flushMappings.size(); ndx++)
1614 {
1615 if (config.flushMappings[ndx].offset + config.flushMappings[ndx].size > mapping.size) {
1616 config.flushMappings[ndx].size = VK_WHOLE_SIZE;
1617 }
1618 }
1619 for (size_t ndx = 0; ndx < config.invalidateMappings.size(); ndx++)
1620 {
1621 if (config.invalidateMappings[ndx].offset + config.invalidateMappings[ndx].size > mapping.size) {
1622 config.invalidateMappings[ndx].size = VK_WHOLE_SIZE;
1623 }
1624 }
1625 return config;
1626 }
1627
fullMappedConfig(VkDeviceSize allocationSize,Op op,deUint32 seed,AllocationKind allocationKind)1628 TestConfig fullMappedConfig (VkDeviceSize allocationSize,
1629 Op op,
1630 deUint32 seed,
1631 AllocationKind allocationKind)
1632 {
1633 return subMappedConfig(allocationSize, MemoryRange(0, allocationSize), op, seed, allocationKind);
1634 }
1635
1636 } // anonymous
1637
createMappingTests(tcu::TestContext & testCtx)1638 tcu::TestCaseGroup* createMappingTests (tcu::TestContext& testCtx)
1639 {
1640 de::MovePtr<tcu::TestCaseGroup> group (new tcu::TestCaseGroup(testCtx, "mapping", "Memory mapping tests."));
1641 de::MovePtr<tcu::TestCaseGroup> dedicated (new tcu::TestCaseGroup(testCtx, "dedicated_alloc", "Dedicated memory mapping tests."));
1642 de::MovePtr<tcu::TestCaseGroup> sets[] =
1643 {
1644 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "suballocation", "Suballocated memory mapping tests.")),
1645 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "buffer", "Buffer dedicated memory mapping tests.")),
1646 de::MovePtr<tcu::TestCaseGroup> (new tcu::TestCaseGroup(testCtx, "image", "Image dedicated memory mapping tests."))
1647 };
1648
1649 const VkDeviceSize allocationSizes[] =
1650 {
1651 33, 257, 4087, 8095, 1*1024*1024 + 1
1652 };
1653
1654 const VkDeviceSize offsets[] =
1655 {
1656 0, 17, 129, 255, 1025, 32*1024+1
1657 };
1658
1659 const VkDeviceSize sizes[] =
1660 {
1661 31, 255, 1025, 4085, 1*1024*1024 - 1
1662 };
1663
1664 const struct
1665 {
1666 const Op op;
1667 const char* const name;
1668 } ops[] =
1669 {
1670 { OP_NONE, "simple" },
1671 { OP_REMAP, "remap" },
1672 { OP_FLUSH, "flush" },
1673 { OP_SUB_FLUSH, "subflush" },
1674 { OP_SUB_FLUSH_SEPARATE, "subflush_separate" },
1675 { OP_SUB_FLUSH_SEPARATE, "subflush_overlapping" },
1676
1677 { OP_INVALIDATE, "invalidate" },
1678 { OP_SUB_INVALIDATE, "subinvalidate" },
1679 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_separate" },
1680 { OP_SUB_INVALIDATE_SEPARATE, "subinvalidate_overlapping" }
1681 };
1682
1683 // .full
1684 for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1685 {
1686 de::MovePtr<tcu::TestCaseGroup> fullGroup (new tcu::TestCaseGroup(testCtx, "full", "Map memory completely."));
1687
1688 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1689 {
1690 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1691 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1692
1693 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1694 {
1695 const Op op = ops[opNdx].op;
1696 const char* const name = ops[opNdx].name;
1697 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1698 const TestConfig config = fullMappedConfig(allocationSize, op, seed, static_cast<AllocationKind>(allocationKindNdx));
1699
1700 addFunctionCase(allocationSizeGroup.get(), name, name, testMemoryMapping, config);
1701 }
1702
1703 fullGroup->addChild(allocationSizeGroup.release());
1704 }
1705
1706 sets[allocationKindNdx]->addChild(fullGroup.release());
1707 }
1708
1709 // .sub
1710 for (size_t allocationKindNdx = 0; allocationKindNdx < ALLOCATION_KIND_LAST; allocationKindNdx++)
1711 {
1712 de::MovePtr<tcu::TestCaseGroup> subGroup (new tcu::TestCaseGroup(testCtx, "sub", "Map part of the memory."));
1713
1714 for (size_t allocationSizeNdx = 0; allocationSizeNdx < DE_LENGTH_OF_ARRAY(allocationSizes); allocationSizeNdx++)
1715 {
1716 const VkDeviceSize allocationSize = allocationSizes[allocationSizeNdx];
1717 de::MovePtr<tcu::TestCaseGroup> allocationSizeGroup (new tcu::TestCaseGroup(testCtx, de::toString(allocationSize).c_str(), ""));
1718
1719 for (size_t offsetNdx = 0; offsetNdx < DE_LENGTH_OF_ARRAY(offsets); offsetNdx++)
1720 {
1721 const VkDeviceSize offset = offsets[offsetNdx];
1722
1723 if (offset >= allocationSize)
1724 continue;
1725
1726 de::MovePtr<tcu::TestCaseGroup> offsetGroup (new tcu::TestCaseGroup(testCtx, ("offset_" + de::toString(offset)).c_str(), ""));
1727
1728 for (size_t sizeNdx = 0; sizeNdx < DE_LENGTH_OF_ARRAY(sizes); sizeNdx++)
1729 {
1730 const VkDeviceSize size = sizes[sizeNdx];
1731
1732 if (offset + size > allocationSize)
1733 continue;
1734
1735 if (offset == 0 && size == allocationSize)
1736 continue;
1737
1738 de::MovePtr<tcu::TestCaseGroup> sizeGroup (new tcu::TestCaseGroup(testCtx, ("size_" + de::toString(size)).c_str(), ""));
1739
1740 for (size_t opNdx = 0; opNdx < DE_LENGTH_OF_ARRAY(ops); opNdx++)
1741 {
1742 const deUint32 seed = (deUint32)(opNdx * allocationSizeNdx);
1743 const Op op = ops[opNdx].op;
1744 const char* const name = ops[opNdx].name;
1745 const TestConfig config = subMappedConfig(allocationSize, MemoryRange(offset, size), op, seed, static_cast<AllocationKind>(allocationKindNdx));
1746
1747 addFunctionCase(sizeGroup.get(), name, name, testMemoryMapping, config);
1748 }
1749
1750 offsetGroup->addChild(sizeGroup.release());
1751 }
1752
1753 allocationSizeGroup->addChild(offsetGroup.release());
1754 }
1755
1756 subGroup->addChild(allocationSizeGroup.release());
1757 }
1758
1759 sets[allocationKindNdx]->addChild(subGroup.release());
1760 }
1761
1762 // .random
1763 {
1764 de::MovePtr<tcu::TestCaseGroup> randomGroup (new tcu::TestCaseGroup(testCtx, "random", "Random memory mapping tests."));
1765 de::Random rng (3927960301u);
1766
1767 for (size_t ndx = 0; ndx < 100; ndx++)
1768 {
1769 const deUint32 seed = rng.getUint32();
1770 const std::string name = de::toString(ndx);
1771
1772 randomGroup->addChild(new InstanceFactory1<RandomMemoryMappingInstance, deUint32>(testCtx, tcu::NODETYPE_SELF_VALIDATE, de::toString(ndx), "Random case", seed));
1773 }
1774
1775 sets[static_cast<deUint32>(ALLOCATION_KIND_SUBALLOCATED)]->addChild(randomGroup.release());
1776 }
1777
1778 group->addChild(sets[0].release());
1779 dedicated->addChild(sets[1].release());
1780 dedicated->addChild(sets[2].release());
1781 group->addChild(dedicated.release());
1782
1783 return group.release();
1784 }
1785
1786 } // memory
1787 } // vkt
1788