1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Synchronization primitive tests with multi queue
22 *//*--------------------------------------------------------------------*/
23
24 #include "vktSynchronizationOperationMultiQueueTests.hpp"
25 #include "vkDefs.hpp"
26 #include "vktTestCase.hpp"
27 #include "vktTestCaseUtil.hpp"
28 #include "vkRef.hpp"
29 #include "vkRefUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkBarrierUtil.hpp"
32 #include "vkQueryUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkCmdUtil.hpp"
36 #include "deUniquePtr.hpp"
37 #include "tcuTestLog.hpp"
38 #include "vktSynchronizationUtil.hpp"
39 #include "vktSynchronizationOperation.hpp"
40 #include "vktSynchronizationOperationTestData.hpp"
41 #include "vktSynchronizationOperationResources.hpp"
42 #include "vktTestGroupUtil.hpp"
43
44 namespace vkt
45 {
46 namespace synchronization
47 {
48 namespace
49 {
50 using namespace vk;
51 using de::MovePtr;
52 using de::UniquePtr;
53
54 enum QueueType
55 {
56 QUEUETYPE_WRITE,
57 QUEUETYPE_READ
58 };
59
60 struct QueuePair
61 {
QueuePairvkt::synchronization::__anon715f59db0111::QueuePair62 QueuePair (const deUint32 familyWrite, const deUint32 familyRead, const VkQueue write, const VkQueue read)
63 : familyIndexWrite (familyWrite)
64 , familyIndexRead (familyRead)
65 , queueWrite (write)
66 , queueRead (read)
67 {}
68
69 deUint32 familyIndexWrite;
70 deUint32 familyIndexRead;
71 VkQueue queueWrite;
72 VkQueue queueRead;
73 };
74
checkQueueFlags(VkQueueFlags availableFlags,const VkQueueFlags neededFlags)75 bool checkQueueFlags (VkQueueFlags availableFlags, const VkQueueFlags neededFlags)
76 {
77 if ((availableFlags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) != 0)
78 availableFlags |= VK_QUEUE_TRANSFER_BIT;
79
80 return (availableFlags & neededFlags) != 0;
81 }
82
83 class MultiQueues
84 {
85 struct QueueData
86 {
87 VkQueueFlags flags;
88 std::vector<VkQueue> queue;
89 };
90
91 public:
MultiQueues(const Context & context)92 MultiQueues (const Context& context)
93 {
94 const InstanceInterface& instance = context.getInstanceInterface();
95 const VkPhysicalDevice physicalDevice = context.getPhysicalDevice();
96 const std::vector<VkQueueFamilyProperties> queueFamilyProperties = getPhysicalDeviceQueueFamilyProperties(instance, physicalDevice);
97
98 for (deUint32 queuePropertiesNdx = 0; queuePropertiesNdx < queueFamilyProperties.size(); ++queuePropertiesNdx)
99 {
100 addQueueIndex(queuePropertiesNdx,
101 std::min(2u, queueFamilyProperties[queuePropertiesNdx].queueCount),
102 queueFamilyProperties[queuePropertiesNdx].queueFlags);
103 }
104
105 std::vector<VkDeviceQueueCreateInfo> queueInfos;
106 const float queuePriorities[2] = { 1.0f, 1.0f }; //get max 2 queues from one family
107
108 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it!= m_queues.end(); ++it)
109 {
110 const VkDeviceQueueCreateInfo queueInfo =
111 {
112 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //VkStructureType sType;
113 DE_NULL, //const void* pNext;
114 (VkDeviceQueueCreateFlags)0u, //VkDeviceQueueCreateFlags flags;
115 it->first, //deUint32 queueFamilyIndex;
116 static_cast<deUint32>(it->second.queue.size()), //deUint32 queueCount;
117 &queuePriorities[0] //const float* pQueuePriorities;
118 };
119 queueInfos.push_back(queueInfo);
120 }
121
122 {
123 const VkDeviceCreateInfo deviceInfo =
124 {
125 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //VkStructureType sType;
126 DE_NULL, //const void* pNext;
127 0u, //VkDeviceCreateFlags flags;
128 static_cast<deUint32>(queueInfos.size()), //deUint32 queueCreateInfoCount;
129 &queueInfos[0], //const VkDeviceQueueCreateInfo* pQueueCreateInfos;
130 0u, //deUint32 enabledLayerCount;
131 DE_NULL, //const char* const* ppEnabledLayerNames;
132 0u, //deUint32 enabledExtensionCount;
133 DE_NULL, //const char* const* ppEnabledExtensionNames;
134 &context.getDeviceFeatures() //const VkPhysicalDeviceFeatures* pEnabledFeatures;
135 };
136
137 m_logicalDevice = createDevice(context.getPlatformInterface(), context.getInstance(), instance, physicalDevice, &deviceInfo);
138 m_deviceDriver = MovePtr<DeviceDriver>(new DeviceDriver(context.getPlatformInterface(), context.getInstance(), *m_logicalDevice));
139 m_allocator = MovePtr<Allocator>(new SimpleAllocator(*m_deviceDriver, *m_logicalDevice, getPhysicalDeviceMemoryProperties(instance, physicalDevice)));
140
141 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
142 for (int queueNdx = 0; queueNdx < static_cast<int>(it->second.queue.size()); ++queueNdx)
143 m_deviceDriver->getDeviceQueue(*m_logicalDevice, it->first, queueNdx, &it->second.queue[queueNdx]);
144 }
145 }
146
addQueueIndex(const deUint32 queueFamilyIndex,const deUint32 count,const VkQueueFlags flags)147 void addQueueIndex (const deUint32 queueFamilyIndex, const deUint32 count, const VkQueueFlags flags)
148 {
149 QueueData dataToPush;
150 dataToPush.flags = flags;
151 dataToPush.queue.resize(count);
152 m_queues[queueFamilyIndex] = dataToPush;
153 }
154
getQueuesPairs(const VkQueueFlags flagsWrite,const VkQueueFlags flagsRead)155 std::vector<QueuePair> getQueuesPairs (const VkQueueFlags flagsWrite, const VkQueueFlags flagsRead)
156 {
157 std::map<deUint32, QueueData> queuesWrite;
158 std::map<deUint32, QueueData> queuesRead;
159 std::vector<QueuePair> queuesPairs;
160
161 for (std::map<deUint32, QueueData>::iterator it = m_queues.begin(); it != m_queues.end(); ++it)
162 {
163 const bool writeQueue = checkQueueFlags(it->second.flags, flagsWrite);
164 const bool readQueue = checkQueueFlags(it->second.flags, flagsRead);
165
166 if (!(writeQueue || readQueue))
167 continue;
168
169 if (writeQueue && readQueue)
170 {
171 queuesWrite[it->first] = it->second;
172 queuesRead[it->first] = it->second;
173 }
174 else if (writeQueue)
175 queuesWrite[it->first] = it->second;
176 else if (readQueue)
177 queuesRead[it->first] = it->second;
178 }
179
180 for (std::map<deUint32, QueueData>::iterator write = queuesWrite.begin(); write != queuesWrite.end(); ++write)
181 for (std::map<deUint32, QueueData>::iterator read = queuesRead.begin(); read != queuesRead.end(); ++read)
182 {
183 const int writeSize = static_cast<int>(write->second.queue.size());
184 const int readSize = static_cast<int>(read->second.queue.size());
185
186 for (int writeNdx = 0; writeNdx < writeSize; ++writeNdx)
187 for (int readNdx = 0; readNdx < readSize; ++readNdx)
188 {
189 if (write->second.queue[writeNdx] != read->second.queue[readNdx])
190 {
191 queuesPairs.push_back(QueuePair(write->first, read->first, write->second.queue[writeNdx], read->second.queue[readNdx]));
192 writeNdx = readNdx = std::max(writeSize, readSize); //exit from the loops
193 }
194 }
195 }
196
197 if (queuesPairs.empty())
198 TCU_THROW(NotSupportedError, "Queue not found");
199
200 return queuesPairs;
201 }
202
getDevice(void) const203 VkDevice getDevice (void) const
204 {
205 return *m_logicalDevice;
206 }
207
getDeviceInterface(void) const208 const DeviceInterface& getDeviceInterface (void) const
209 {
210 return *m_deviceDriver;
211 }
212
getAllocator(void)213 Allocator& getAllocator (void)
214 {
215 return *m_allocator;
216 }
217
218 private:
219 Move<VkDevice> m_logicalDevice;
220 MovePtr<DeviceDriver> m_deviceDriver;
221 MovePtr<Allocator> m_allocator;
222 std::map<deUint32, QueueData> m_queues;
223 };
224
createBarrierMultiQueue(const DeviceInterface & vk,const VkCommandBuffer & cmdBuffer,const SyncInfo & writeSync,const SyncInfo & readSync,const Resource & resource,const deUint32 writeFamily,const deUint32 readFamily,const VkSharingMode sharingMode,const bool secondQueue=false)225 void createBarrierMultiQueue (const DeviceInterface& vk,
226 const VkCommandBuffer& cmdBuffer,
227 const SyncInfo& writeSync,
228 const SyncInfo& readSync,
229 const Resource& resource,
230 const deUint32 writeFamily,
231 const deUint32 readFamily,
232 const VkSharingMode sharingMode,
233 const bool secondQueue = false)
234 {
235 if (resource.getType() == RESOURCE_TYPE_IMAGE)
236 {
237 VkImageMemoryBarrier barrier = makeImageMemoryBarrier(secondQueue ? 0u : writeSync.accessMask, !secondQueue ? 0u : readSync.accessMask,
238 writeSync.imageLayout, readSync.imageLayout, resource.getImage().handle, resource.getImage().subresourceRange);
239
240 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
241 {
242 barrier.srcQueueFamilyIndex = writeFamily;
243 barrier.dstQueueFamilyIndex = readFamily;
244 vk.cmdPipelineBarrier(cmdBuffer, secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
245 !secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL,
246 0u, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &barrier);
247 }
248 else if (!secondQueue)
249 {
250 vk.cmdPipelineBarrier(cmdBuffer, secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask,
251 !secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL,
252 0u, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &barrier);
253 }
254 }
255 else if (resource.getType() == RESOURCE_TYPE_BUFFER || isIndirectBuffer(resource.getType()))
256 {
257 VkBufferMemoryBarrier barrier = makeBufferMemoryBarrier(secondQueue ? 0u : writeSync.accessMask, !secondQueue ? 0u : readSync.accessMask,
258 resource.getBuffer().handle, resource.getBuffer().offset, resource.getBuffer().size);
259
260 if (writeFamily != readFamily && VK_SHARING_MODE_EXCLUSIVE == sharingMode)
261 {
262 barrier.srcQueueFamilyIndex = writeFamily;
263 barrier.dstQueueFamilyIndex = readFamily;
264 }
265
266 vk.cmdPipelineBarrier(cmdBuffer, secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT) : writeSync.stageMask, !secondQueue ? VkPipelineStageFlags(VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT) : readSync.stageMask, (VkDependencyFlags)0, 0u, (const VkMemoryBarrier*)DE_NULL, 1u, (const VkBufferMemoryBarrier*)&barrier, 0u, (const VkImageMemoryBarrier *)DE_NULL);
267 }
268 }
269
270 class BaseTestInstance : public TestInstance
271 {
272 public:
BaseTestInstance(Context & context,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData)273 BaseTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData)
274 : TestInstance (context)
275 , m_queues (new MultiQueues(context))
276 , m_opContext (new OperationContext(context, pipelineCacheData, m_queues->getDeviceInterface(), m_queues->getDevice(), m_queues->getAllocator()))
277 , m_resourceDesc (resourceDesc)
278 , m_writeOp (writeOp)
279 , m_readOp (readOp)
280 {
281 }
282
283 protected:
284 const UniquePtr<MultiQueues> m_queues;
285 const UniquePtr<OperationContext> m_opContext;
286 const ResourceDescription m_resourceDesc;
287 const OperationSupport& m_writeOp;
288 const OperationSupport& m_readOp;
289 };
290
291 class SemaphoreTestInstance : public BaseTestInstance
292 {
293 public:
SemaphoreTestInstance(Context & context,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)294 SemaphoreTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
295 : BaseTestInstance (context, resourceDesc, writeOp, readOp, pipelineCacheData)
296 , m_sharingMode (sharingMode)
297 {
298 }
299
iterate(void)300 tcu::TestStatus iterate (void)
301 {
302 const DeviceInterface& vk = m_opContext->getDeviceInterface();
303 const VkDevice device = m_opContext->getDevice();
304 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
305
306 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
307 {
308
309 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getResourceUsageFlags() | m_readOp.getResourceUsageFlags()));
310 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource));
311 const UniquePtr<Operation> readOp (m_readOp.build (*m_opContext, *resource));
312
313 const Move<VkCommandPool> cmdPool[] =
314 {
315 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
316 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
317 };
318 const Move<VkCommandBuffer> ptrCmdBuffer[] =
319 {
320 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
321 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
322 };
323 const VkCommandBuffer cmdBuffers[] =
324 {
325 *ptrCmdBuffer[QUEUETYPE_WRITE],
326 *ptrCmdBuffer[QUEUETYPE_READ]
327 };
328 const Unique<VkSemaphore> semaphore (createSemaphore(vk, device));
329 const VkPipelineStageFlags stageBits[] = { VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT };
330 const VkSubmitInfo submitInfo[] =
331 {
332 {
333 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
334 DE_NULL, // const void* pNext;
335 0u, // deUint32 waitSemaphoreCount;
336 DE_NULL, // const VkSemaphore* pWaitSemaphores;
337 (const VkPipelineStageFlags*)DE_NULL,
338 1u, // deUint32 commandBufferCount;
339 &cmdBuffers[QUEUETYPE_WRITE], // const VkCommandBuffer* pCommandBuffers;
340 1u, // deUint32 signalSemaphoreCount;
341 &semaphore.get(), // const VkSemaphore* pSignalSemaphores;
342 },
343 {
344 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
345 DE_NULL, // const void* pNext;
346 1u, // deUint32 waitSemaphoreCount;
347 &semaphore.get(), // const VkSemaphore* pWaitSemaphores;
348 stageBits, // const VkPipelineStageFlags* pWaitDstStageMask;
349 1u, // deUint32 commandBufferCount;
350 &cmdBuffers[QUEUETYPE_READ], // const VkCommandBuffer* pCommandBuffers;
351 0u, // deUint32 signalSemaphoreCount;
352 DE_NULL, // const VkSemaphore* pSignalSemaphores;
353 }
354 };
355 const SyncInfo writeSync = writeOp->getSyncInfo();
356 const SyncInfo readSync = readOp->getSyncInfo();
357
358 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
359 writeOp->recordCommands (cmdBuffers[QUEUETYPE_WRITE]);
360 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_WRITE], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
361 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
362
363 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
364 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_READ], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
365 readOp->recordCommands (cmdBuffers[QUEUETYPE_READ]);
366 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
367
368 VK_CHECK(vk.queueSubmit(queuePairs[pairNdx].queueWrite, 1u, &submitInfo[QUEUETYPE_WRITE], DE_NULL));
369 VK_CHECK(vk.queueSubmit(queuePairs[pairNdx].queueRead, 1u, &submitInfo[QUEUETYPE_READ], DE_NULL));
370 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueWrite));
371 VK_CHECK(vk.queueWaitIdle(queuePairs[pairNdx].queueRead));
372
373 {
374 const Data expected = writeOp->getData();
375 const Data actual = readOp->getData();
376
377 if (0 != deMemCmp(expected.data, actual.data, expected.size))
378 return tcu::TestStatus::fail("Memory contents don't match");
379 }
380 }
381 return tcu::TestStatus::pass("OK");
382 }
383
384 private:
385 const VkSharingMode m_sharingMode;
386 };
387
388 class FenceTestInstance : public BaseTestInstance
389 {
390 public:
FenceTestInstance(Context & context,const ResourceDescription & resourceDesc,const OperationSupport & writeOp,const OperationSupport & readOp,PipelineCacheData & pipelineCacheData,const VkSharingMode sharingMode)391 FenceTestInstance (Context& context, const ResourceDescription& resourceDesc, const OperationSupport& writeOp, const OperationSupport& readOp, PipelineCacheData& pipelineCacheData, const VkSharingMode sharingMode)
392 : BaseTestInstance (context, resourceDesc, writeOp, readOp, pipelineCacheData)
393 , m_sharingMode (sharingMode)
394 {
395 }
396
iterate(void)397 tcu::TestStatus iterate (void)
398 {
399 const DeviceInterface& vk = m_opContext->getDeviceInterface();
400 const VkDevice device = m_opContext->getDevice();
401 const std::vector<QueuePair> queuePairs = m_queues->getQueuesPairs(m_writeOp.getQueueFlags(*m_opContext), m_readOp.getQueueFlags(*m_opContext));
402
403 for (deUint32 pairNdx = 0; pairNdx < static_cast<deUint32>(queuePairs.size()); ++pairNdx)
404 {
405 const UniquePtr<Resource> resource (new Resource(*m_opContext, m_resourceDesc, m_writeOp.getResourceUsageFlags() | m_readOp.getResourceUsageFlags()));
406 const UniquePtr<Operation> writeOp (m_writeOp.build(*m_opContext, *resource));
407 const UniquePtr<Operation> readOp (m_readOp.build(*m_opContext, *resource));
408 const Move<VkCommandPool> cmdPool[] =
409 {
410 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexWrite),
411 createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, queuePairs[pairNdx].familyIndexRead)
412 };
413 const Move<VkCommandBuffer> ptrCmdBuffer[] =
414 {
415 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_WRITE]),
416 makeCommandBuffer(vk, device, *cmdPool[QUEUETYPE_READ])
417 };
418 const VkCommandBuffer cmdBuffers[] =
419 {
420 *ptrCmdBuffer[QUEUETYPE_WRITE],
421 *ptrCmdBuffer[QUEUETYPE_READ]
422 };
423 const SyncInfo writeSync = writeOp->getSyncInfo();
424 const SyncInfo readSync = readOp->getSyncInfo();
425
426 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
427 writeOp->recordCommands (cmdBuffers[QUEUETYPE_WRITE]);
428 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_WRITE], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode);
429 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_WRITE]);
430
431 submitCommandsAndWait (vk, device, queuePairs[pairNdx].queueWrite, cmdBuffers[QUEUETYPE_WRITE]);
432
433 beginCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
434 createBarrierMultiQueue (vk, cmdBuffers[QUEUETYPE_READ], writeSync, readSync, *resource, queuePairs[pairNdx].familyIndexWrite, queuePairs[pairNdx].familyIndexRead, m_sharingMode, true);
435 readOp->recordCommands (cmdBuffers[QUEUETYPE_READ]);
436 endCommandBuffer (vk, cmdBuffers[QUEUETYPE_READ]);
437
438 submitCommandsAndWait (vk, device, queuePairs[pairNdx].queueRead, cmdBuffers[QUEUETYPE_READ]);
439
440 {
441 const Data expected = writeOp->getData();
442 const Data actual = readOp->getData();
443
444 if (0 != deMemCmp(expected.data, actual.data, expected.size))
445 return tcu::TestStatus::fail("Memory contents don't match");
446 }
447 }
448 return tcu::TestStatus::pass("OK");
449 }
450
451 private:
452 const VkSharingMode m_sharingMode;
453 };
454
455 class BaseTestCase : public TestCase
456 {
457 public:
BaseTestCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const SyncPrimitive syncPrimitive,const ResourceDescription resourceDesc,const OperationName writeOp,const OperationName readOp,const VkSharingMode sharingMode,PipelineCacheData & pipelineCacheData)458 BaseTestCase (tcu::TestContext& testCtx,
459 const std::string& name,
460 const std::string& description,
461 const SyncPrimitive syncPrimitive,
462 const ResourceDescription resourceDesc,
463 const OperationName writeOp,
464 const OperationName readOp,
465 const VkSharingMode sharingMode,
466 PipelineCacheData& pipelineCacheData)
467 : TestCase (testCtx, name, description)
468 , m_resourceDesc (resourceDesc)
469 , m_writeOp (makeOperationSupport(writeOp, resourceDesc))
470 , m_readOp (makeOperationSupport(readOp, resourceDesc))
471 , m_syncPrimitive (syncPrimitive)
472 , m_sharingMode (sharingMode)
473 , m_pipelineCacheData (pipelineCacheData)
474 {
475 }
476
initPrograms(SourceCollections & programCollection) const477 void initPrograms (SourceCollections& programCollection) const
478 {
479 m_writeOp->initPrograms(programCollection);
480 m_readOp->initPrograms(programCollection);
481 }
482
createInstance(Context & context) const483 TestInstance* createInstance (Context& context) const
484 {
485 switch (m_syncPrimitive)
486 {
487 case SYNC_PRIMITIVE_FENCE:
488 return new FenceTestInstance(context, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
489 case SYNC_PRIMITIVE_SEMAPHORE:
490 return new SemaphoreTestInstance(context, m_resourceDesc, *m_writeOp, *m_readOp, m_pipelineCacheData, m_sharingMode);
491 default:
492 DE_ASSERT(0);
493 return DE_NULL;
494 }
495 }
496
497 private:
498 const ResourceDescription m_resourceDesc;
499 const UniquePtr<OperationSupport> m_writeOp;
500 const UniquePtr<OperationSupport> m_readOp;
501 const SyncPrimitive m_syncPrimitive;
502 const VkSharingMode m_sharingMode;
503 PipelineCacheData& m_pipelineCacheData;
504 };
505
createTests(tcu::TestCaseGroup * group,PipelineCacheData * pipelineCacheData)506 void createTests (tcu::TestCaseGroup* group, PipelineCacheData* pipelineCacheData)
507 {
508 tcu::TestContext& testCtx = group->getTestContext();
509
510 static const struct
511 {
512 const char* name;
513 SyncPrimitive syncPrimitive;
514 int numOptions;
515 } groups[] =
516 {
517 { "fence", SYNC_PRIMITIVE_FENCE, 1 },
518 { "semaphore", SYNC_PRIMITIVE_SEMAPHORE, 1 }
519 };
520
521 for (int groupNdx = 0; groupNdx < DE_LENGTH_OF_ARRAY(groups); ++groupNdx)
522 {
523 MovePtr<tcu::TestCaseGroup> synchGroup (new tcu::TestCaseGroup(testCtx, groups[groupNdx].name, ""));
524
525 for (int writeOpNdx = 0; writeOpNdx < DE_LENGTH_OF_ARRAY(s_writeOps); ++writeOpNdx)
526 for (int readOpNdx = 0; readOpNdx < DE_LENGTH_OF_ARRAY(s_readOps); ++readOpNdx)
527 {
528 const OperationName writeOp = s_writeOps[writeOpNdx];
529 const OperationName readOp = s_readOps[readOpNdx];
530 const std::string opGroupName = getOperationName(writeOp) + "_" + getOperationName(readOp);
531 bool empty = true;
532
533 MovePtr<tcu::TestCaseGroup> opGroup (new tcu::TestCaseGroup(testCtx, opGroupName.c_str(), ""));
534
535 for (int optionNdx = 0; optionNdx <= groups[groupNdx].numOptions; ++optionNdx)
536 for (int resourceNdx = 0; resourceNdx < DE_LENGTH_OF_ARRAY(s_resources); ++resourceNdx)
537 {
538 const ResourceDescription& resource = s_resources[resourceNdx];
539 std::string name = getResourceName(resource);
540 VkSharingMode sharingMode = VK_SHARING_MODE_EXCLUSIVE;
541
542 // queue family sharing mode used for resource
543 if (optionNdx)
544 {
545 name += "_concurrent";
546 sharingMode = VK_SHARING_MODE_CONCURRENT;
547 }
548 else
549 name += "_exclusive";
550
551 if (isResourceSupported(writeOp, resource) && isResourceSupported(readOp, resource))
552 {
553 opGroup->addChild(new BaseTestCase(testCtx, name, "", groups[groupNdx].syncPrimitive, resource, writeOp, readOp, sharingMode, *pipelineCacheData));
554 empty = false;
555 }
556 }
557 if (!empty)
558 synchGroup->addChild(opGroup.release());
559 }
560 group->addChild(synchGroup.release());
561 }
562 }
563
564 } // anonymous
565
createSynchronizedOperationMultiQueueTests(tcu::TestContext & testCtx,PipelineCacheData & pipelineCacheData)566 tcu::TestCaseGroup* createSynchronizedOperationMultiQueueTests (tcu::TestContext& testCtx, PipelineCacheData& pipelineCacheData)
567 {
568 return createTestGroup(testCtx, "multi_queue", "Synchronization of a memory-modifying operation", createTests, &pipelineCacheData);
569 }
570
571 } // synchronization
572 } // vkt
573