1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2017 The Khronos Group Inc.
6 * Copyright (c) 2017 Nvidia Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Device Group Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktDeviceGroupTests.hpp"
26
27 #include "vkDefs.hpp"
28 #include "vkDeviceUtil.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkMemUtil.hpp"
31 #include "vkPlatform.hpp"
32 #include "vkPrograms.hpp"
33 #include "vkQueryUtil.hpp"
34 #include "vkRef.hpp"
35 #include "vkRefUtil.hpp"
36 #include "vkStrUtil.hpp"
37 #include "vkTypeUtil.hpp"
38 #include "vkCmdUtil.hpp"
39 #include "vkObjUtil.hpp"
40 #include "vktTestCase.hpp"
41 #include "vktTestCaseUtil.hpp"
42 #include "vktTestGroupUtil.hpp"
43
44 #include "tcuDefs.hpp"
45 #include "tcuFormatUtil.hpp"
46 #include "tcuImageCompare.hpp"
47 #include "tcuResource.hpp"
48 #include "tcuTestCase.hpp"
49 #include "tcuTestLog.hpp"
50 #include "tcuCommandLine.hpp"
51 #include "tcuTextureUtil.hpp"
52 #include "tcuImageIO.hpp"
53
54 #include "rrRenderer.hpp"
55
56 namespace vkt
57 {
58 namespace DeviceGroup
59 {
60 namespace
61 {
62
63 using namespace vk;
64 using std::string;
65 using std::vector;
66 using tcu::TestLog;
67 using de::UniquePtr;
68
69 //Device group test modes
70 enum TestModeType
71 {
72 TEST_MODE_SFR = 1 << 0, //!< Split frame remdering
73 TEST_MODE_AFR = 1 << 1, //!< Alternate frame rendering
74 TEST_MODE_HOSTMEMORY = 1 << 2, //!< Use host memory for rendertarget
75 TEST_MODE_DEDICATED = 1 << 3, //!< Use dedicated allocations
76 TEST_MODE_PEER_FETCH = 1 << 4, //!< Peer vertex attributes from peer memroy
77 TEST_MODE_TESSELLATION = 1 << 5, //!< Generate a tessellated sphere instead of triangle
78 TEST_MODE_LINEFILL = 1 << 6, //!< Draw polygon edges as line segments
79 };
80
81 class RefVertexShader : public rr::VertexShader
82 {
83 public:
RefVertexShader(void)84 RefVertexShader (void)
85 : rr::VertexShader(1, 0)
86 {
87 m_inputs[0].type = rr::GENERICVECTYPE_FLOAT;
88 }
~RefVertexShader(void)89 virtual ~RefVertexShader(void) {}
90
shadeVertices(const rr::VertexAttrib * inputs,rr::VertexPacket * const * packets,const int numPackets) const91 void shadeVertices (const rr::VertexAttrib* inputs, rr::VertexPacket* const* packets, const int numPackets) const
92 {
93 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
94 {
95 packets[packetNdx]->position = rr::readVertexAttribFloat(inputs[0],
96 packets[packetNdx]->instanceNdx,
97 packets[packetNdx]->vertexNdx);
98 }
99 }
100 };
101
102 class RefFragmentShader : public rr::FragmentShader
103 {
104 public:
RefFragmentShader(void)105 RefFragmentShader (void)
106 : rr::FragmentShader(0, 1)
107 {
108 m_outputs[0].type = rr::GENERICVECTYPE_FLOAT;
109 }
110
~RefFragmentShader(void)111 virtual ~RefFragmentShader(void) {}
112
shadeFragments(rr::FragmentPacket *,const int numPackets,const rr::FragmentShadingContext & context) const113 void shadeFragments (rr::FragmentPacket*, const int numPackets, const rr::FragmentShadingContext& context) const
114 {
115 for (int packetNdx = 0; packetNdx < numPackets; ++packetNdx)
116 {
117 for (int fragNdx = 0; fragNdx < rr::NUM_FRAGMENTS_PER_PACKET; ++fragNdx)
118 {
119 rr::writeFragmentOutput(context, packetNdx, fragNdx, 0, tcu::Vec4(1.0f, 1.0f, 0.0f, 1.0f));
120 }
121 }
122 }
123 };
124
renderReferenceTriangle(const tcu::PixelBufferAccess & dst,const tcu::Vec4 (& vertices)[3])125 void renderReferenceTriangle (const tcu::PixelBufferAccess& dst, const tcu::Vec4(&vertices)[3])
126 {
127 const RefVertexShader vertShader;
128 const RefFragmentShader fragShader;
129 const rr::Program program(&vertShader, &fragShader);
130 const rr::MultisamplePixelBufferAccess colorBuffer = rr::MultisamplePixelBufferAccess::fromSinglesampleAccess(dst);
131 const rr::RenderTarget renderTarget(colorBuffer);
132 const rr::RenderState renderState((rr::ViewportState(colorBuffer)));
133 const rr::Renderer renderer;
134 const rr::VertexAttrib vertexAttribs[] =
135 {
136 rr::VertexAttrib(rr::VERTEXATTRIBTYPE_FLOAT, 4, sizeof(tcu::Vec4), 0, vertices[0].getPtr())
137 };
138 renderer.draw(rr::DrawCommand(renderState,
139 renderTarget,
140 program,
141 DE_LENGTH_OF_ARRAY(vertexAttribs),
142 &vertexAttribs[0],
143 rr::PrimitiveList(rr::PRIMITIVETYPE_TRIANGLES, DE_LENGTH_OF_ARRAY(vertices), 0)));
144 }
145
146 class DeviceGroupTestInstance : public TestInstance
147 {
148 public:
149 DeviceGroupTestInstance(Context& context, deUint32 mode);
~DeviceGroupTestInstance(void)150 ~DeviceGroupTestInstance(void) {}
151 private:
152 void init (void);
153 deUint32 getMemoryIndex (deUint32 memoryTypeBits, deUint32 memoryPropertyFlag);
154 void getDeviceLayers (vector<string>& enabledLayers);
155 bool isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID);
156 void SubmitBufferAndWaitForIdle (const DeviceDriver& vk, VkCommandBuffer cmdBuf, deUint32 deviceMask);
157 virtual tcu::TestStatus iterate (void);
158
159 Move<VkDevice> m_deviceGroup;
160 deUint32 m_physicalDeviceCount;
161 VkQueue m_deviceGroupQueue;
162 vector<VkPhysicalDevice> m_physicalDevices;
163
164 deUint32 m_testMode;
165 bool m_useHostMemory;
166 bool m_useDedicated;
167 bool m_usePeerFetch;
168 bool m_subsetAllocation;
169 bool m_fillModeNonSolid;
170 bool m_drawTessellatedSphere;
171 };
172
DeviceGroupTestInstance(Context & context,const deUint32 mode)173 DeviceGroupTestInstance::DeviceGroupTestInstance (Context& context, const deUint32 mode)
174 : TestInstance (context)
175 , m_physicalDeviceCount (0)
176 , m_deviceGroupQueue (DE_NULL)
177 , m_testMode (mode)
178 , m_useHostMemory (m_testMode & TEST_MODE_HOSTMEMORY)
179 , m_useDedicated (m_testMode & TEST_MODE_DEDICATED)
180 , m_usePeerFetch (m_testMode & TEST_MODE_PEER_FETCH)
181 , m_subsetAllocation (true)
182 , m_fillModeNonSolid (m_testMode & TEST_MODE_LINEFILL)
183 , m_drawTessellatedSphere (m_testMode & TEST_MODE_TESSELLATION)
184 {
185 init();
186 }
187
getMemoryIndex(const deUint32 memoryTypeBits,const deUint32 memoryPropertyFlag)188 deUint32 DeviceGroupTestInstance::getMemoryIndex (const deUint32 memoryTypeBits, const deUint32 memoryPropertyFlag)
189 {
190 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
191 for (deUint32 memoryTypeNdx = 0; memoryTypeNdx < deviceMemProps.memoryTypeCount; memoryTypeNdx++)
192 {
193 if ((memoryTypeBits & (1u << memoryTypeNdx)) != 0 &&
194 (deviceMemProps.memoryTypes[memoryTypeNdx].propertyFlags & memoryPropertyFlag) == memoryPropertyFlag)
195 return memoryTypeNdx;
196 }
197 TCU_THROW(NotSupportedError, "No compatible memory type found");
198 }
199
isPeerFetchAllowed(deUint32 memoryTypeIndex,deUint32 firstdeviceID,deUint32 seconddeviceID)200 bool DeviceGroupTestInstance::isPeerFetchAllowed (deUint32 memoryTypeIndex, deUint32 firstdeviceID, deUint32 seconddeviceID)
201 {
202 VkPeerMemoryFeatureFlags peerMemFeatures1;
203 VkPeerMemoryFeatureFlags peerMemFeatures2;
204 const DeviceDriver vk (m_context.getPlatformInterface(), m_context.getInstance(), *m_deviceGroup);
205 const VkPhysicalDeviceMemoryProperties deviceMemProps1 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[firstdeviceID]);
206 const VkPhysicalDeviceMemoryProperties deviceMemProps2 = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[seconddeviceID]);
207 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps2.memoryTypes[memoryTypeIndex].heapIndex, firstdeviceID, seconddeviceID, &peerMemFeatures1);
208 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps1.memoryTypes[memoryTypeIndex].heapIndex, seconddeviceID, firstdeviceID, &peerMemFeatures2);
209 return (peerMemFeatures1 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT) && (peerMemFeatures2 & VK_PEER_MEMORY_FEATURE_GENERIC_SRC_BIT);
210 }
211
getDeviceLayers(vector<string> & enabledLayers)212 void DeviceGroupTestInstance::getDeviceLayers (vector<string>& enabledLayers)
213 {
214 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
215 if (cmdLine.isValidationEnabled())
216 {
217 const vector<VkLayerProperties> layerProperties = enumerateDeviceLayerProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice());
218
219 static const char* s_magicLayer = "VK_LAYER_LUNARG_standard_validation";
220 static const char* s_defaultLayers[] =
221 {
222 "VK_LAYER_GOOGLE_threading",
223 "VK_LAYER_LUNARG_parameter_validation",
224 "VK_LAYER_LUNARG_device_limits",
225 "VK_LAYER_LUNARG_object_tracker",
226 "VK_LAYER_LUNARG_image",
227 "VK_LAYER_LUNARG_core_validation",
228 "VK_LAYER_LUNARG_swapchain",
229 "VK_LAYER_GOOGLE_unique_objects",
230 };
231
232 if (isLayerSupported(layerProperties, RequiredLayer(s_magicLayer)))
233 enabledLayers.push_back(s_magicLayer);
234 else
235 {
236 for (deUint32 ndx = 0; ndx < DE_LENGTH_OF_ARRAY(s_defaultLayers); ++ndx)
237 {
238 if (isLayerSupported(layerProperties, RequiredLayer(s_defaultLayers[ndx])))
239 enabledLayers.push_back(s_defaultLayers[ndx]);
240 }
241 }
242 if (enabledLayers.empty())
243 TCU_THROW(NotSupportedError, "No device validation layers found");
244 }
245 }
246
init(void)247 void DeviceGroupTestInstance::init (void)
248 {
249 if (!isInstanceExtensionSupported(m_context.getUsedApiVersion(), m_context.getInstanceExtensions(), "VK_KHR_device_group_creation"))
250 TCU_THROW(NotSupportedError, "Device Group tests are not supported, no device group extension present.");
251
252 const InstanceInterface& instanceInterface = m_context.getInstanceInterface();
253 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
254 const deUint32 queueIndex = 0;
255 const float queuePriority = 1.0f;
256 vector<const char*> extensionPtrs;
257 de::MovePtr<vk::DeviceDriver> deviceDriver;
258 vector<const char*> layerPtrs;
259 vector<string> deviceExtensions;
260 vector<string> enabledLayers;
261
262 if (!isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_KHR_device_group"))
263 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_device_group");
264
265 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_device_group"))
266 deviceExtensions.push_back("VK_KHR_device_group");
267
268 if(m_useDedicated)
269 {
270 if (!isDeviceExtensionSupported(m_context.getUsedApiVersion(), m_context.getDeviceExtensions(), "VK_KHR_dedicated_allocation"))
271 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_dedicated_allocation");
272
273 if (!isCoreDeviceExtension(m_context.getUsedApiVersion(), "VK_KHR_dedicated_allocation"))
274 deviceExtensions.push_back("VK_KHR_dedicated_allocation");
275 }
276
277 {
278 const tcu::CommandLine& cmdLine = m_context.getTestContext().getCommandLine();
279 const vector<VkPhysicalDeviceGroupProperties> properties = enumeratePhysicalDeviceGroups(instanceInterface, m_context.getInstance());
280 if ((size_t)cmdLine.getVKDeviceGroupId() > properties.size())
281 TCU_THROW(TestError, "Invalid device group index.");
282
283 m_physicalDeviceCount = properties[cmdLine.getVKDeviceGroupId() - 1].physicalDeviceCount;
284 for (deUint32 idx = 0; idx < m_physicalDeviceCount; idx++)
285 {
286 m_physicalDevices.push_back(properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices[idx]);
287 }
288
289 if (m_usePeerFetch && m_physicalDeviceCount < 2)
290 TCU_THROW(NotSupportedError, "Peer fetching needs more than 1 physical device.");
291
292 if (!(m_testMode & TEST_MODE_AFR) || (m_physicalDeviceCount > 1))
293 {
294 if (!de::contains(m_context.getDeviceExtensions().begin(), m_context.getDeviceExtensions().end(), std::string("VK_KHR_bind_memory2")))
295 TCU_THROW(NotSupportedError, "Missing extension: VK_KHR_bind_memory2");
296 deviceExtensions.push_back("VK_KHR_bind_memory2");
297 }
298
299 const VkDeviceQueueCreateInfo deviceQueueCreateInfo =
300 {
301 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, //type
302 DE_NULL, //pNext
303 (VkDeviceQueueCreateFlags)0u, //flags
304 queueFamilyIndex, //queueFamilyIndex;
305 1u, //queueCount;
306 &queuePriority, //pQueuePriorities;
307 };
308 const VkDeviceGroupDeviceCreateInfo deviceGroupInfo =
309 {
310 VK_STRUCTURE_TYPE_DEVICE_GROUP_DEVICE_CREATE_INFO, //stype
311 DE_NULL, //pNext
312 properties[cmdLine.getVKDeviceGroupId() - 1].physicalDeviceCount, //physicalDeviceCount
313 properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices //physicalDevices
314 };
315
316 VkPhysicalDevice physicalDevice = properties[cmdLine.getVKDeviceGroupId() - 1].physicalDevices[(size_t)(cmdLine.getVKDeviceId() - 1)];
317 VkPhysicalDeviceFeatures enabledDeviceFeatures = getPhysicalDeviceFeatures(instanceInterface, physicalDevice);
318 m_subsetAllocation = properties[cmdLine.getVKDeviceGroupId() - 1].subsetAllocation;
319
320 if (m_drawTessellatedSphere & static_cast<bool>(!enabledDeviceFeatures.tessellationShader))
321 TCU_THROW(NotSupportedError, "Tessellation is not supported.");
322
323 if (m_fillModeNonSolid & static_cast<bool>(!enabledDeviceFeatures.fillModeNonSolid))
324 TCU_THROW(NotSupportedError, "Line polygon mode is not supported.");
325
326 extensionPtrs.resize(deviceExtensions.size());
327 for (size_t ndx = 0; ndx < deviceExtensions.size(); ++ndx)
328 extensionPtrs[ndx] = deviceExtensions[ndx].c_str();
329
330 // Get Layers
331 getDeviceLayers(enabledLayers);
332 layerPtrs.resize(enabledLayers.size());
333 for (size_t ndx = 0; ndx < enabledLayers.size(); ++ndx)
334 layerPtrs[ndx] = enabledLayers[ndx].c_str();
335
336 const VkDeviceCreateInfo deviceCreateInfo =
337 {
338 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, //sType;
339 &deviceGroupInfo, //pNext;
340 (VkDeviceCreateFlags)0u, //flags
341 1, //queueRecordCount;
342 &deviceQueueCreateInfo, //pRequestedQueues;
343 (deUint32)layerPtrs.size(), //layerCount;
344 (layerPtrs.empty() ? DE_NULL : &layerPtrs[0]), //ppEnabledLayerNames;
345 (deUint32)extensionPtrs.size(), //extensionCount;
346 (extensionPtrs.empty() ? DE_NULL : &extensionPtrs[0]), //ppEnabledExtensionNames;
347 &enabledDeviceFeatures, //pEnabledFeatures;
348 };
349 m_deviceGroup = createDevice(m_context.getPlatformInterface(), m_context.getInstance(), instanceInterface, physicalDevice, &deviceCreateInfo);
350 }
351
352 deviceDriver = de::MovePtr<vk::DeviceDriver>(new vk::DeviceDriver(m_context.getPlatformInterface(), m_context.getInstance(), *m_deviceGroup));
353 m_deviceGroupQueue = getDeviceQueue(*deviceDriver, *m_deviceGroup, queueFamilyIndex, queueIndex);
354 }
355
SubmitBufferAndWaitForIdle(const DeviceDriver & vk,VkCommandBuffer cmdBuf,deUint32 deviceMask)356 void DeviceGroupTestInstance::SubmitBufferAndWaitForIdle(const DeviceDriver& vk, VkCommandBuffer cmdBuf, deUint32 deviceMask)
357 {
358 submitCommandsAndWait(vk, *m_deviceGroup, m_deviceGroupQueue, cmdBuf, true, deviceMask);
359 VK_CHECK(vk.deviceWaitIdle(*m_deviceGroup));
360 }
361
iterate(void)362 tcu::TestStatus DeviceGroupTestInstance::iterate (void)
363 {
364 const InstanceInterface& vki (m_context.getInstanceInterface());
365 const DeviceDriver vk (m_context.getPlatformInterface(), m_context.getInstance(), *m_deviceGroup);
366 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
367 const tcu::UVec2 renderSize (256, 256);
368 const VkFormat colorFormat = VK_FORMAT_R8G8B8A8_UNORM;
369 const tcu::Vec4 clearColor (0.125f, 0.25f, 0.75f, 1.0f);
370 const tcu::Vec4 drawColor (1.0f, 1.0f, 0.0f, 1.0f);
371 const float tessLevel = 16.0f;
372 SimpleAllocator memAlloc (vk, *m_deviceGroup, getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_context.getPhysicalDevice()));
373 bool iterateResultSuccess = false;
374 const tcu::Vec4 sphereVertices[] =
375 {
376 tcu::Vec4(0.0f, 0.0f, 1.0f, 1.0f),
377 tcu::Vec4(0.0f, 1.0f, 0.0f, 1.0f),
378 tcu::Vec4(1.0f, 0.0f, 0.0f, 1.0f),
379 tcu::Vec4(0.0f, 0.0f, -1.0f, 1.0f),
380 tcu::Vec4(0.0f, -1.0f, 0.0f, 1.0f),
381 tcu::Vec4(-1.0f, 0.0f, 0.0f, 1.0f),
382 };
383 const deUint32 sphereIndices[] = {0, 1, 2, 2, 1, 3, 3, 1, 5, 5, 1, 0, 0, 2, 4, 2, 3, 4, 3, 5, 4, 5, 0, 4};
384 const tcu::Vec4 triVertices[] =
385 {
386 tcu::Vec4(-0.5f, -0.5f, 0.0f, 1.0f),
387 tcu::Vec4(+0.5f, -0.5f, 0.0f, 1.0f),
388 tcu::Vec4(0.0f, +0.5f, 0.0f, 1.0f)
389 };
390 const deUint32 triIndices[] = {0, 1, 2};
391 const tcu::Vec4 * vertices = m_drawTessellatedSphere ? &sphereVertices[0] : &triVertices[0];
392 const deUint32 * indices = m_drawTessellatedSphere ? &sphereIndices[0] : &triIndices[0];
393 const deUint32 verticesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereVertices)) : deUint32(sizeof(triVertices));
394 const deUint32 numIndices = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)/sizeof(sphereIndices[0])) : deUint32(sizeof(triIndices)/sizeof(triIndices[0]));
395 const deUint32 indicesSize = m_drawTessellatedSphere ? deUint32(sizeof(sphereIndices)) : deUint32(sizeof(triIndices));
396
397 // Loop through all physical devices in the device group
398 for (deUint32 physDevID = 0; physDevID < m_physicalDeviceCount; physDevID++)
399 {
400 const deUint32 firstDeviceID = physDevID;
401 const deUint32 secondDeviceID = (firstDeviceID + 1 ) % m_physicalDeviceCount;
402 vector<deUint32> deviceIndices (m_physicalDeviceCount);
403 bool isPeerMemAsCopySrcAllowed = true;
404 // Set broadcast on memory allocation
405 const deUint32 allocDeviceMask = m_subsetAllocation ? (1 << firstDeviceID) | (1 << secondDeviceID) : (1 << m_physicalDeviceCount) - 1;
406
407 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
408 deviceIndices[i] = i;
409 deviceIndices[firstDeviceID] = secondDeviceID;
410 deviceIndices[secondDeviceID] = firstDeviceID;
411
412 VkMemoryRequirements memReqs =
413 {
414 0, // VkDeviceSize size
415 0, // VkDeviceSize alignment
416 0, // uint32_t memoryTypeBits
417 };
418 deUint32 memoryTypeNdx = 0;
419 de::MovePtr<Allocation> stagingVertexBufferMemory;
420 de::MovePtr<Allocation> stagingIndexBufferMemory;
421 de::MovePtr<Allocation> stagingUniformBufferMemory;
422 de::MovePtr<Allocation> stagingSboBufferMemory;
423
424 vk::Move<vk::VkDeviceMemory> vertexBufferMemory;
425 vk::Move<vk::VkDeviceMemory> indexBufferMemory;
426 vk::Move<vk::VkDeviceMemory> uniformBufferMemory;
427 vk::Move<vk::VkDeviceMemory> sboBufferMemory;
428 vk::Move<vk::VkDeviceMemory> imageMemory;
429
430 Move<VkRenderPass> renderPass;
431 Move<VkImage> renderImage;
432 Move<VkImage> readImage;
433
434 Move<VkDescriptorSetLayout> descriptorSetLayout;
435 Move<VkDescriptorPool> descriptorPool;
436 Move<VkDescriptorSet> descriptorSet;
437
438 Move<VkBuffer> stagingVertexBuffer;
439 Move<VkBuffer> stagingUniformBuffer;
440 Move<VkBuffer> stagingIndexBuffer;
441 Move<VkBuffer> stagingSboBuffer;
442
443 Move<VkBuffer> vertexBuffer;
444 Move<VkBuffer> indexBuffer;
445 Move<VkBuffer> uniformBuffer;
446 Move<VkBuffer> sboBuffer;
447
448 Move<VkPipeline> pipeline;
449 Move<VkPipelineLayout> pipelineLayout;
450
451 Move<VkImageView> colorAttView;
452 Move<VkFramebuffer> framebuffer;
453 Move<VkCommandPool> cmdPool;
454 Move<VkCommandBuffer> cmdBuffer;
455
456 VkMemoryDedicatedAllocateInfo dedicatedAllocInfo =
457 {
458 VK_STRUCTURE_TYPE_MEMORY_DEDICATED_ALLOCATE_INFO, // sType
459 DE_NULL, // pNext
460 DE_NULL, // image
461 DE_NULL // buffer
462 };
463
464 VkMemoryAllocateFlagsInfo allocDeviceMaskInfo =
465 {
466 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_FLAGS_INFO, // sType
467 m_useDedicated ? &dedicatedAllocInfo : DE_NULL, // pNext
468 VK_MEMORY_ALLOCATE_DEVICE_MASK_BIT, // flags
469 allocDeviceMask, // deviceMask
470 };
471
472 VkMemoryAllocateInfo allocInfo =
473 {
474 VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO, // sType
475 &allocDeviceMaskInfo, // pNext
476 0u, // allocationSize
477 0u, // memoryTypeIndex
478 };
479
480 // create vertex buffers
481 {
482 const VkBufferCreateInfo stagingVertexBufferParams =
483 {
484 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
485 DE_NULL, // pNext
486 0u, // flags
487 (VkDeviceSize)verticesSize, // size
488 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
489 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
490 1u, // queueFamilyIndexCount
491 &queueFamilyIndex, // pQueueFamilyIndices
492 };
493 stagingVertexBuffer = createBuffer(vk, *m_deviceGroup, &stagingVertexBufferParams);
494 stagingVertexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingVertexBuffer), MemoryRequirement::HostVisible);
495 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingVertexBuffer, stagingVertexBufferMemory->getMemory(), stagingVertexBufferMemory->getOffset()));
496
497 void* vertexBufPtr = stagingVertexBufferMemory->getHostPtr();
498 deMemcpy(vertexBufPtr, &vertices[0], verticesSize);
499 flushAlloc(vk, *m_deviceGroup, *stagingVertexBufferMemory);
500 }
501
502 {
503 const VkBufferCreateInfo vertexBufferParams =
504 {
505 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
506 DE_NULL, // pNext
507 0u, // flags
508 (VkDeviceSize)verticesSize, // size
509 VK_BUFFER_USAGE_VERTEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
510 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
511 1u, // queueFamilyIndexCount
512 &queueFamilyIndex, // pQueueFamilyIndices
513 };
514 vertexBuffer = createBuffer(vk, *m_deviceGroup, &vertexBufferParams);
515
516 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, vertexBuffer.get());
517 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
518
519 dedicatedAllocInfo.buffer = vertexBuffer.get();
520 allocInfo.allocationSize = memReqs.size;
521 allocInfo.memoryTypeIndex = memoryTypeNdx;
522 vertexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
523
524 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
525 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
526
527 // Bind vertex buffer
528 if (m_usePeerFetch)
529 {
530 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
531 {
532 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
533 DE_NULL, // pNext
534 m_physicalDeviceCount, // deviceIndexCount
535 &deviceIndices[0], // pDeviceIndices
536 };
537
538 VkBindBufferMemoryInfo bindInfo =
539 {
540 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
541 &devGroupBindInfo, // pNext
542 vertexBuffer.get(), // buffer
543 vertexBufferMemory.get(), // memory
544 0u, // memoryOffset
545 };
546 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
547 }
548 else
549 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *vertexBuffer, vertexBufferMemory.get(), 0));
550 }
551
552 // create index buffers
553 {
554 const VkBufferCreateInfo stagingIndexBufferParams =
555 {
556 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
557 DE_NULL, // pNext
558 0u, // flags
559 (VkDeviceSize)indicesSize, // size
560 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
561 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
562 1u, // queueFamilyIndexCount
563 &queueFamilyIndex, // pQueueFamilyIndices
564 };
565 stagingIndexBuffer = createBuffer(vk, *m_deviceGroup, &stagingIndexBufferParams);
566 stagingIndexBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingIndexBuffer), MemoryRequirement::HostVisible);
567 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingIndexBuffer, stagingIndexBufferMemory->getMemory(), stagingIndexBufferMemory->getOffset()));
568
569 void* indexBufPtr = stagingIndexBufferMemory->getHostPtr();
570 deMemcpy(indexBufPtr, &indices[0], indicesSize);
571 flushAlloc(vk, *m_deviceGroup, *stagingIndexBufferMemory);
572 }
573
574 {
575 const VkBufferCreateInfo indexBufferParams =
576 {
577 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
578 DE_NULL, // pNext
579 0u, // flags
580 (VkDeviceSize)indicesSize, // size
581 VK_BUFFER_USAGE_INDEX_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
582 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
583 1u, // queueFamilyIndexCount
584 &queueFamilyIndex, // pQueueFamilyIndices
585 };
586 indexBuffer = createBuffer(vk, *m_deviceGroup, &indexBufferParams);
587
588 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, indexBuffer.get());
589 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
590
591 dedicatedAllocInfo.buffer = indexBuffer.get();
592 allocInfo.allocationSize = memReqs.size;
593 allocInfo.memoryTypeIndex = memoryTypeNdx;
594 indexBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
595
596 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
597 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
598
599 // Bind index buffer
600 if (m_usePeerFetch)
601 {
602 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
603 {
604 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
605 DE_NULL, // pNext
606 m_physicalDeviceCount, // deviceIndexCount
607 &deviceIndices[0], // pDeviceIndices
608 };
609
610 VkBindBufferMemoryInfo bindInfo =
611 {
612 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
613 &devGroupBindInfo, // pNext
614 indexBuffer.get(), // buffer
615 indexBufferMemory.get(), // memory
616 0u, // memoryOffset
617 };
618 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
619 }
620 else
621 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *indexBuffer, indexBufferMemory.get(), 0));
622 }
623
624 // create uniform buffers
625 {
626 const VkBufferCreateInfo stagingUniformBufferParams =
627 {
628 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
629 DE_NULL, // pNext
630 0u, // flags
631 (VkDeviceSize)sizeof(drawColor), // size
632 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
633 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
634 1u, // queueFamilyIndexCount
635 &queueFamilyIndex, // pQueueFamilyIndices
636 };
637 stagingUniformBuffer = createBuffer(vk, *m_deviceGroup, &stagingUniformBufferParams);
638 stagingUniformBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingUniformBuffer), MemoryRequirement::HostVisible);
639 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingUniformBuffer, stagingUniformBufferMemory->getMemory(), stagingUniformBufferMemory->getOffset()));
640
641 void* uniformBufPtr = stagingUniformBufferMemory->getHostPtr();
642 deMemcpy(uniformBufPtr, &drawColor[0], sizeof(drawColor));
643 flushAlloc(vk, *m_deviceGroup, *stagingUniformBufferMemory);
644 }
645
646 {
647 const VkBufferCreateInfo uniformBufferParams =
648 {
649 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
650 DE_NULL, // pNext
651 0u, // flags
652 (VkDeviceSize)sizeof(drawColor), // size
653 VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
654 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
655 1u, // queueFamilyIndexCount
656 &queueFamilyIndex, // pQueueFamilyIndices
657 };
658 uniformBuffer = createBuffer(vk, *m_deviceGroup, &uniformBufferParams);
659
660 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, uniformBuffer.get());
661 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
662
663 dedicatedAllocInfo.buffer = uniformBuffer.get();
664 allocInfo.allocationSize = memReqs.size;
665 allocInfo.memoryTypeIndex = memoryTypeNdx;
666 uniformBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
667
668 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
669 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
670
671 if (m_usePeerFetch)
672 {
673 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
674 {
675 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
676 DE_NULL, // pNext
677 m_physicalDeviceCount, // deviceIndexCount
678 &deviceIndices[0], // pDeviceIndices
679 };
680
681 VkBindBufferMemoryInfo bindInfo =
682 {
683 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
684 &devGroupBindInfo, // pNext
685 uniformBuffer.get(), // buffer
686 uniformBufferMemory.get(), // memory
687 0u, // memoryOffset
688 };
689 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
690 }
691 else
692 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, uniformBuffer.get(), uniformBufferMemory.get(), 0));
693 }
694
695 // create SBO buffers
696 {
697 const VkBufferCreateInfo stagingSboBufferParams =
698 {
699 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
700 DE_NULL, // pNext
701 0u, // flags
702 (VkDeviceSize)sizeof(tessLevel), // size
703 VK_BUFFER_USAGE_TRANSFER_SRC_BIT, // usage
704 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
705 1u, // queueFamilyIndexCount
706 &queueFamilyIndex, // pQueueFamilyIndices
707 };
708 stagingSboBuffer = createBuffer(vk, *m_deviceGroup, &stagingSboBufferParams);
709 stagingSboBufferMemory = memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *stagingSboBuffer), MemoryRequirement::HostVisible);
710 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *stagingSboBuffer, stagingSboBufferMemory->getMemory(), stagingSboBufferMemory->getOffset()));
711
712 void* sboBufPtr = stagingSboBufferMemory->getHostPtr();
713 deMemcpy(sboBufPtr, &tessLevel, sizeof(tessLevel));
714 flushAlloc(vk, *m_deviceGroup, *stagingSboBufferMemory);
715 }
716
717 {
718 const VkBufferCreateInfo sboBufferParams =
719 {
720 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
721 DE_NULL, // pNext
722 0u, // flags
723 (VkDeviceSize)sizeof(tessLevel), // size
724 VK_BUFFER_USAGE_STORAGE_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
725 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
726 1u, // queueFamilyIndexCount
727 &queueFamilyIndex, // pQueueFamilyIndices
728 };
729 sboBuffer = createBuffer(vk, *m_deviceGroup, &sboBufferParams);
730
731 memReqs = getBufferMemoryRequirements(vk, *m_deviceGroup, sboBuffer.get());
732 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
733
734 dedicatedAllocInfo.buffer = sboBuffer.get();
735 allocInfo.allocationSize = memReqs.size;
736 allocInfo.memoryTypeIndex = memoryTypeNdx;
737 sboBufferMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
738
739 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
740 TCU_THROW(NotSupportedError, "Peer fetch is not supported.");
741
742 if (m_usePeerFetch)
743 {
744 VkBindBufferMemoryDeviceGroupInfo devGroupBindInfo =
745 {
746 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_DEVICE_GROUP_INFO, // sType
747 DE_NULL, // pNext
748 m_physicalDeviceCount, // deviceIndexCount
749 &deviceIndices[0], // pDeviceIndices
750 };
751
752 VkBindBufferMemoryInfo bindInfo =
753 {
754 VK_STRUCTURE_TYPE_BIND_BUFFER_MEMORY_INFO, // sType
755 &devGroupBindInfo, // pNext
756 sboBuffer.get(), // buffer
757 sboBufferMemory.get(), // memory
758 0u, // memoryOffset
759 };
760 VK_CHECK(vk.bindBufferMemory2(*m_deviceGroup, 1, &bindInfo));
761 }
762 else
763 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, sboBuffer.get(), sboBufferMemory.get(), 0));
764 }
765
766 // Create image resources
767 // Use a consistent usage flag because of memory aliasing
768 VkImageUsageFlags imageUsageFlag = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
769 {
770 // Check for SFR support
771 VkImageFormatProperties properties;
772 if ((m_testMode & TEST_MODE_SFR) && vki.getPhysicalDeviceImageFormatProperties(m_context.getPhysicalDevice(),
773 colorFormat, // format
774 VK_IMAGE_TYPE_2D, // type
775 VK_IMAGE_TILING_OPTIMAL, // tiling
776 VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, // usage
777 VK_IMAGE_CREATE_BIND_SFR_BIT, // flags
778 &properties) != VK_SUCCESS) // properties
779 {
780 TCU_THROW(NotSupportedError, "Format not supported for SFR");
781 }
782
783 VkImageCreateFlags imageCreateFlags = VK_IMAGE_CREATE_ALIAS_BIT; // The image objects alias same memory
784 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
785 {
786 imageCreateFlags |= VK_IMAGE_CREATE_BIND_SFR_BIT;
787 }
788
789 const VkImageCreateInfo imageParams =
790 {
791 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
792 DE_NULL, // pNext
793 imageCreateFlags, // flags
794 VK_IMAGE_TYPE_2D, // imageType
795 colorFormat, // format
796 { renderSize.x(), renderSize.y(), 1 }, // extent
797 1u, // mipLevels
798 1u, // arraySize
799 VK_SAMPLE_COUNT_1_BIT, // samples
800 VK_IMAGE_TILING_OPTIMAL, // tiling
801 imageUsageFlag, // usage
802 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
803 1u, // queueFamilyIndexCount
804 &queueFamilyIndex, // pQueueFamilyIndices
805 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
806 };
807
808 renderImage = createImage(vk, *m_deviceGroup, &imageParams);
809 readImage = createImage(vk, *m_deviceGroup, &imageParams);
810
811 dedicatedAllocInfo.image = *renderImage;
812 dedicatedAllocInfo.buffer = DE_NULL;
813 memReqs = getImageMemoryRequirements(vk, *m_deviceGroup, renderImage.get());
814 memoryTypeNdx = getMemoryIndex(memReqs.memoryTypeBits, m_useHostMemory ? 0 : VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT);
815 allocInfo.allocationSize = memReqs.size;
816 allocInfo.memoryTypeIndex = memoryTypeNdx;
817 imageMemory = allocateMemory(vk, *m_deviceGroup, &allocInfo);
818 }
819
820 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *renderImage, imageMemory.get(), 0));
821 VK_CHECK(vk.bindImageMemory(*m_deviceGroup, *readImage, imageMemory.get(), 0));
822
823 // Create renderpass
824 renderPass = makeRenderPass(vk, *m_deviceGroup, colorFormat);
825
826 // Create descriptors
827 {
828 vector<VkDescriptorSetLayoutBinding> layoutBindings;
829 vector<VkDescriptorPoolSize> descriptorTypes;
830 vector<VkWriteDescriptorSet> writeDescritporSets;
831
832 const VkDescriptorSetLayoutBinding layoutBindingUBO =
833 {
834 0u, // deUint32 binding;
835 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
836 1u, // deUint32 descriptorCount;
837 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlags stageFlags;
838 DE_NULL // const VkSampler* pImmutableSamplers;
839 };
840 const VkDescriptorSetLayoutBinding layoutBindingSBO =
841 {
842 1u, // deUint32 binding;
843 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
844 1u, // deUint32 descriptorCount;
845 VK_SHADER_STAGE_TESSELLATION_CONTROL_BIT, // VkShaderStageFlags stageFlags;
846 DE_NULL // const VkSampler* pImmutableSamplers;
847 };
848
849 layoutBindings.push_back(layoutBindingUBO);
850 if (m_drawTessellatedSphere)
851 layoutBindings.push_back(layoutBindingSBO);
852
853 const VkDescriptorSetLayoutCreateInfo descriptorLayoutParams =
854 {
855 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO, // VkStructureType sType;
856 DE_NULL, // cost void* pNext;
857 (VkDescriptorSetLayoutCreateFlags)0, // VkDescriptorSetLayoutCreateFlags flags
858 deUint32(layoutBindings.size()), // deUint32 count;
859 layoutBindings.data() // const VkDescriptorSetLayoutBinding pBinding;
860 };
861 descriptorSetLayout = createDescriptorSetLayout(vk, *m_deviceGroup, &descriptorLayoutParams);
862
863 const VkDescriptorPoolSize descriptorTypeUBO =
864 {
865 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType type;
866 1 // deUint32 count;
867 };
868 const VkDescriptorPoolSize descriptorTypeSBO =
869 {
870 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType type;
871 1 // deUint32 count;
872 };
873 descriptorTypes.push_back(descriptorTypeUBO);
874 if (m_drawTessellatedSphere)
875 descriptorTypes.push_back(descriptorTypeSBO);
876
877 const VkDescriptorPoolCreateInfo descriptorPoolParams =
878 {
879 VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO, // VkStructureType sType;
880 DE_NULL, // void* pNext;
881 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, // VkDescriptorPoolCreateFlags flags;
882 1u, // deUint32 maxSets;
883 deUint32(descriptorTypes.size()), // deUint32 count;
884 descriptorTypes.data() // const VkDescriptorTypeCount* pTypeCount
885 };
886 descriptorPool = createDescriptorPool(vk, *m_deviceGroup, &descriptorPoolParams);
887
888 const VkDescriptorSetAllocateInfo descriptorSetParams =
889 {
890 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
891 DE_NULL,
892 *descriptorPool,
893 1u,
894 &descriptorSetLayout.get(),
895 };
896 descriptorSet = allocateDescriptorSet(vk, *m_deviceGroup, &descriptorSetParams);
897
898 const VkDescriptorBufferInfo uboDescriptorInfo =
899 {
900 uniformBuffer.get(),
901 0,
902 (VkDeviceSize)sizeof(drawColor)
903 };
904 const VkDescriptorBufferInfo sboDescriptorInfo =
905 {
906 sboBuffer.get(),
907 0,
908 (VkDeviceSize)sizeof(tessLevel)
909 };
910 const VkWriteDescriptorSet writeDescritporSetUBO =
911 {
912 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
913 DE_NULL, // const void* pNext;
914 *descriptorSet, // VkDescriptorSet destSet;
915 0, // deUint32 destBinding;
916 0, // deUint32 destArrayElement;
917 1u, // deUint32 count;
918 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, // VkDescriptorType descriptorType;
919 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
920 &uboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
921 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
922 };
923
924 const VkWriteDescriptorSet writeDescritporSetSBO =
925 {
926 VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET, // VkStructureType sType;
927 DE_NULL, // const void* pNext;
928 *descriptorSet, // VkDescriptorSet destSet;
929 1, // deUint32 destBinding;
930 0, // deUint32 destArrayElement;
931 1u, // deUint32 count;
932 VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, // VkDescriptorType descriptorType;
933 (const VkDescriptorImageInfo*)DE_NULL, // VkDescriptorImageInfo* pImageInfo;
934 &sboDescriptorInfo, // VkDescriptorBufferInfo* pBufferInfo;
935 (const VkBufferView*)DE_NULL // VkBufferView* pTexelBufferView;
936 };
937 writeDescritporSets.push_back(writeDescritporSetUBO);
938 if (m_drawTessellatedSphere)
939 writeDescritporSets.push_back(writeDescritporSetSBO);
940
941 vk.updateDescriptorSets(*m_deviceGroup, deUint32(writeDescritporSets.size()), writeDescritporSets.data(), 0u, DE_NULL);
942 }
943
944 // Create Pipeline
945 {
946 Move<VkShaderModule> vertShaderModule;
947 Move<VkShaderModule> tcssShaderModule;
948 Move<VkShaderModule> tessShaderModule;
949 Move<VkShaderModule> fragShaderModule;
950
951 const VkDescriptorSetLayout descset = descriptorSetLayout.get();
952 const VkPipelineLayoutCreateInfo pipelineLayoutParams =
953 {
954 VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
955 DE_NULL, // pNext
956 (vk::VkPipelineLayoutCreateFlags)0, // flags
957 1u, // setLayoutCount
958 &descset, // pSetLayouts
959 0u, // pushConstantRangeCount
960 DE_NULL, // pPushConstantRanges
961 };
962 pipelineLayout = createPipelineLayout(vk, *m_deviceGroup, &pipelineLayoutParams);
963
964 // Shaders
965 vertShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("vert"), 0);
966 fragShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("frag"), 0);
967
968 if (m_drawTessellatedSphere)
969 {
970 tcssShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tesc"), 0);
971 tessShaderModule = createShaderModule(vk, *m_deviceGroup, m_context.getBinaryCollection().get("tese"), 0);
972 }
973
974 const std::vector<VkViewport> viewports (1, makeViewport(renderSize));
975 const std::vector<VkRect2D> scissors (1, makeRect2D(renderSize));
976
977 const VkPipelineRasterizationStateCreateInfo rasterParams =
978 {
979 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // sType
980 DE_NULL, // pNext
981 0u, // flags
982 VK_FALSE, // depthClampEnable
983 VK_FALSE, // rasterizerDiscardEnable
984 m_fillModeNonSolid ? VK_POLYGON_MODE_LINE : VK_POLYGON_MODE_FILL, // polygonMode
985 VK_CULL_MODE_NONE, // cullMode
986 VK_FRONT_FACE_COUNTER_CLOCKWISE, // frontFace
987 VK_FALSE, // depthBiasEnable
988 0.0f, // depthBiasConstantFactor
989 0.0f, // depthBiasClamp
990 0.0f, // depthBiasSlopeFactor
991 1.0f, // lineWidth
992 };
993
994 const VkPrimitiveTopology topology = m_drawTessellatedSphere ? VK_PRIMITIVE_TOPOLOGY_PATCH_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST;
995
996 pipeline = makeGraphicsPipeline(vk, // const DeviceInterface& vk
997 *m_deviceGroup, // const VkDevice device
998 *pipelineLayout, // const VkPipelineLayout pipelineLayout
999 *vertShaderModule, // const VkShaderModule vertexShaderModule
1000 m_drawTessellatedSphere ? *tcssShaderModule : DE_NULL, // const VkShaderModule tessellationControlModule,
1001 m_drawTessellatedSphere ? *tessShaderModule : DE_NULL, // const VkShaderModule tessellationEvalModule,
1002 DE_NULL, // const VkShaderModule geometryShaderModule
1003 *fragShaderModule, // const VkShaderModule fragmentShaderModule
1004 *renderPass, // const VkRenderPass renderPass
1005 viewports, // const std::vector<VkViewport>& viewports
1006 scissors, // const std::vector<VkRect2D>& scissors
1007 topology, // const VkPrimitiveTopology topology
1008 0u, // const deUint32 subpass
1009 3u, // const deUint32 patchControlPoints
1010 DE_NULL, // const VkPipelineVertexInputStateCreateInfo* vertexInputStateCreateInfo
1011 &rasterParams); // const VkPipelineRasterizationStateCreateInfo* rasterizationStateCreateInfo
1012 }
1013
1014 // Create Framebuffer
1015 {
1016 const VkImageViewCreateInfo colorAttViewParams =
1017 {
1018 VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
1019 DE_NULL, // pNext
1020 0u, // flags
1021 *renderImage, // image
1022 VK_IMAGE_VIEW_TYPE_2D, // viewType
1023 colorFormat, // format
1024 {
1025 VK_COMPONENT_SWIZZLE_R,
1026 VK_COMPONENT_SWIZZLE_G,
1027 VK_COMPONENT_SWIZZLE_B,
1028 VK_COMPONENT_SWIZZLE_A
1029 }, // components
1030 {
1031 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1032 0u, // baseMipLevel
1033 1u, // levelCount
1034 0u, // baseArrayLayer
1035 1u, // layerCount
1036 }, // subresourceRange
1037 };
1038 colorAttView = createImageView(vk, *m_deviceGroup, &colorAttViewParams);
1039
1040 const VkFramebufferCreateInfo framebufferParams =
1041 {
1042 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // sType
1043 DE_NULL, // pNext
1044 0u, // flags
1045 *renderPass, // renderPass
1046 1u, // attachmentCount
1047 &*colorAttView, // pAttachments
1048 renderSize.x(), // width
1049 renderSize.y(), // height
1050 1u, // layers
1051 };
1052 framebuffer = createFramebuffer(vk, *m_deviceGroup, &framebufferParams);
1053 }
1054
1055 // Create Command buffer
1056 {
1057 const VkCommandPoolCreateInfo cmdPoolParams =
1058 {
1059 VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1060 DE_NULL, // pNext
1061 VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
1062 queueFamilyIndex, // queueFamilyIndex
1063 };
1064 cmdPool = createCommandPool(vk, *m_deviceGroup, &cmdPoolParams);
1065
1066 const VkCommandBufferAllocateInfo cmdBufParams =
1067 {
1068 VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1069 DE_NULL, // pNext
1070 *cmdPool, // pool
1071 VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1072 1u, // bufferCount
1073 };
1074 cmdBuffer = allocateCommandBuffer(vk, *m_deviceGroup, &cmdBufParams);
1075 }
1076
1077 // Do a layout transition for renderImage
1078 {
1079 beginCommandBuffer(vk, *cmdBuffer);
1080 const VkImageMemoryBarrier colorAttBarrier =
1081 {
1082 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1083 DE_NULL, // pNext
1084 0u, // srcAccessMask
1085 (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
1086 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT), // dstAccessMask
1087 VK_IMAGE_LAYOUT_UNDEFINED, // oldLayout
1088 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // newLayout
1089 queueFamilyIndex, // srcQueueFamilyIndex
1090 queueFamilyIndex, // dstQueueFamilyIndex
1091 *renderImage, // image
1092 {
1093 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1094 0u, // baseMipLevel
1095 1u, // levelCount
1096 0u, // baseArrayLayer
1097 1u, // layerCount
1098 } // subresourceRange
1099 };
1100 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &colorAttBarrier);
1101
1102 endCommandBuffer(vk, *cmdBuffer);
1103 const deUint32 deviceMask = (1 << firstDeviceID) | (1 << secondDeviceID);
1104 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1105 }
1106
1107 // Bind renderImage across devices for SFR
1108 if ((m_testMode & TEST_MODE_SFR) && (m_physicalDeviceCount > 1))
1109 {
1110 if (m_usePeerFetch && !isPeerFetchAllowed(memoryTypeNdx, firstDeviceID, secondDeviceID))
1111 TCU_THROW(NotSupportedError, "Peer texture reads is not supported.");
1112
1113 // Check if peer memory can be used as source of a copy command in case of SFR bindings, always allowed in case of 1 device
1114 VkPeerMemoryFeatureFlags peerMemFeatures;
1115 const VkPhysicalDeviceMemoryProperties deviceMemProps = getPhysicalDeviceMemoryProperties(m_context.getInstanceInterface(), m_physicalDevices[secondDeviceID]);
1116 vk.getDeviceGroupPeerMemoryFeatures(*m_deviceGroup, deviceMemProps.memoryTypes[memoryTypeNdx].heapIndex, firstDeviceID, secondDeviceID, &peerMemFeatures);
1117 isPeerMemAsCopySrcAllowed = (peerMemFeatures & VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT);
1118
1119 VkRect2D zeroRect = {
1120 {
1121 0, // VkOffset2D.x
1122 0, // VkOffset2D.x
1123 },
1124 {
1125 0, // VkExtent2D.x
1126 0, // VkExtent2D.x
1127 }
1128 };
1129 vector<VkRect2D> sfrRects;
1130 for (deUint32 i = 0; i < m_physicalDeviceCount*m_physicalDeviceCount; i++)
1131 sfrRects.push_back(zeroRect);
1132
1133 if (m_physicalDeviceCount == 1u)
1134 {
1135 sfrRects[0].extent.width = (deInt32)renderSize.x();
1136 sfrRects[0].extent.height = (deInt32)renderSize.y();
1137 }
1138 else
1139 {
1140 // Split into 2 vertical halves
1141 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
1142 sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID].extent.height = (deInt32)renderSize.y();
1143 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
1144 sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
1145 sfrRects[secondDeviceID * m_physicalDeviceCount + firstDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + firstDeviceID];
1146 sfrRects[secondDeviceID * m_physicalDeviceCount + secondDeviceID] = sfrRects[firstDeviceID * m_physicalDeviceCount + secondDeviceID];
1147 }
1148
1149 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
1150 {
1151 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
1152 DE_NULL, // pNext
1153 0u, // deviceIndexCount
1154 DE_NULL, // pDeviceIndices
1155 m_physicalDeviceCount*m_physicalDeviceCount, // SFRRectCount
1156 &sfrRects[0], // pSFRRects
1157 };
1158
1159 VkBindImageMemoryInfo bindInfo =
1160 {
1161 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
1162 &devGroupBindInfo, // pNext
1163 *renderImage, // image
1164 imageMemory.get(), // memory
1165 0u, // memoryOffset
1166 };
1167 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
1168 }
1169
1170 // Begin recording
1171 beginCommandBuffer(vk, *cmdBuffer);
1172
1173 // Update buffers
1174 {
1175 const VkBufferMemoryBarrier stagingVertexBufferUpdateBarrier =
1176 {
1177 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1178 DE_NULL, // const void* pNext;
1179 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1180 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1181 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1182 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1183 stagingVertexBuffer.get(), // VkBuffer buffer;
1184 0u, // VkDeviceSize offset;
1185 verticesSize // VkDeviceSize size;
1186 };
1187
1188 const VkBufferMemoryBarrier vertexBufferUpdateBarrier =
1189 {
1190 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1191 DE_NULL, // const void* pNext;
1192 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1193 VK_ACCESS_VERTEX_ATTRIBUTE_READ_BIT, // VkAccessFlags dstAccessMask;
1194 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1195 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1196 vertexBuffer.get(), // VkBuffer buffer;
1197 0u, // VkDeviceSize offset;
1198 verticesSize // VkDeviceSize size;
1199 };
1200
1201 const VkBufferMemoryBarrier stagingIndexBufferUpdateBarrier =
1202 {
1203 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1204 DE_NULL, // const void* pNext;
1205 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1206 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1207 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1208 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1209 stagingIndexBuffer.get(), // VkBuffer buffer;
1210 0u, // VkDeviceSize offset;
1211 indicesSize // VkDeviceSize size;
1212 };
1213
1214 const VkBufferMemoryBarrier indexBufferUpdateBarrier =
1215 {
1216 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1217 DE_NULL, // const void* pNext;
1218 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1219 VK_ACCESS_INDEX_READ_BIT, // VkAccessFlags dstAccessMask;
1220 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1221 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1222 indexBuffer.get(), // VkBuffer buffer;
1223 0u, // VkDeviceSize offset;
1224 indicesSize // VkDeviceSize size;
1225 };
1226
1227 const VkBufferMemoryBarrier stagingUboBufferUpdateBarrier =
1228 {
1229 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1230 DE_NULL, // const void* pNext;
1231 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1232 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1233 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1234 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1235 stagingUniformBuffer.get(), // VkBuffer buffer;
1236 0u, // VkDeviceSize offset;
1237 indicesSize // VkDeviceSize size;
1238 };
1239
1240 const VkBufferMemoryBarrier uboUpdateBarrier =
1241 {
1242 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1243 DE_NULL, // const void* pNext;
1244 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1245 VK_ACCESS_UNIFORM_READ_BIT, // VkAccessFlags dstAccessMask;
1246 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1247 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1248 uniformBuffer.get(), // VkBuffer buffer;
1249 0u, // VkDeviceSize offset;
1250 sizeof(drawColor) // VkDeviceSize size;
1251 };
1252
1253
1254 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingVertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1255 VkBufferCopy vertexBufferCopy = { 0u, 0u, verticesSize };
1256 vk.cmdCopyBuffer(*cmdBuffer, stagingVertexBuffer.get(), vertexBuffer.get(), 1u, &vertexBufferCopy);
1257 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &vertexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1258
1259 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingIndexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1260 VkBufferCopy indexBufferCopy = { 0u, 0u, indicesSize };
1261 vk.cmdCopyBuffer(*cmdBuffer, stagingIndexBuffer.get(), indexBuffer.get(), 1u, &indexBufferCopy);
1262 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_VERTEX_INPUT_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &indexBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1263
1264 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingUboBufferUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1265 VkBufferCopy uboBufferCopy = { 0u, 0u, sizeof(drawColor) };
1266 vk.cmdCopyBuffer(*cmdBuffer, stagingUniformBuffer.get(), uniformBuffer.get(), 1u, &uboBufferCopy);
1267 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &uboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1268
1269 if (m_drawTessellatedSphere)
1270 {
1271 const VkBufferMemoryBarrier stagingsboUpdateBarrier =
1272 {
1273 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1274 DE_NULL, // const void* pNext;
1275 VK_ACCESS_HOST_WRITE_BIT, // VkAccessFlags srcAccessMask;
1276 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1277 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1278 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1279 stagingSboBuffer.get(), // VkBuffer buffer;
1280 0u, // VkDeviceSize offset;
1281 sizeof(tessLevel) // VkDeviceSize size;
1282 };
1283
1284 const VkBufferMemoryBarrier sboUpdateBarrier =
1285 {
1286 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1287 DE_NULL, // const void* pNext;
1288 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1289 VK_ACCESS_SHADER_READ_BIT, // VkAccessFlags dstAccessMask;
1290 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1291 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1292 sboBuffer.get(), // VkBuffer buffer;
1293 0u, // VkDeviceSize offset;
1294 sizeof(tessLevel) // VkDeviceSize size;
1295 };
1296
1297 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &stagingsboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1298 VkBufferCopy sboBufferCopy = { 0u, 0u, sizeof(tessLevel) };
1299 vk.cmdCopyBuffer(*cmdBuffer, stagingSboBuffer.get(), sboBuffer.get(), 1u, &sboBufferCopy);
1300 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &sboUpdateBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1301 }
1302
1303 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1304 vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1, &*descriptorSet, 0u, DE_NULL);
1305 {
1306 const VkDeviceSize bindingOffset = 0;
1307 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &vertexBuffer.get(), &bindingOffset);
1308 vk.cmdBindIndexBuffer(*cmdBuffer, *indexBuffer, 0, VK_INDEX_TYPE_UINT32);
1309 }
1310 }
1311
1312 // Begin renderpass
1313 {
1314 const VkClearValue clearValue = makeClearValueColorF32(
1315 clearColor[0],
1316 clearColor[1],
1317 clearColor[2],
1318 clearColor[3]);
1319
1320 VkRect2D zeroRect = { { 0, 0, },{ 0, 0, } };
1321 vector<VkRect2D> renderAreas;
1322 for (deUint32 i = 0; i < m_physicalDeviceCount; i++)
1323 renderAreas.push_back(zeroRect);
1324
1325 // Render completely if there is only 1 device
1326 if (m_physicalDeviceCount == 1u)
1327 {
1328 renderAreas[0].extent.width = (deInt32)renderSize.x();
1329 renderAreas[0].extent.height = (deInt32)renderSize.y();
1330 }
1331 else
1332 {
1333 // Split into 2 vertical halves
1334 renderAreas[firstDeviceID].extent.width = (deInt32)renderSize.x() / 2;
1335 renderAreas[firstDeviceID].extent.height = (deInt32)renderSize.y();
1336 renderAreas[secondDeviceID] = renderAreas[firstDeviceID];
1337 renderAreas[secondDeviceID].offset.x = (deInt32)renderSize.x() / 2;
1338 }
1339
1340 const VkDeviceGroupRenderPassBeginInfo deviceGroupRPBeginInfo =
1341 {
1342 VK_STRUCTURE_TYPE_DEVICE_GROUP_RENDER_PASS_BEGIN_INFO,
1343 DE_NULL,
1344 (deUint32)((1 << m_physicalDeviceCount) - 1),
1345 m_physicalDeviceCount,
1346 &renderAreas[0]
1347 };
1348
1349 const VkRenderPassBeginInfo passBeginParams =
1350 {
1351 VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // sType
1352 (m_testMode & TEST_MODE_SFR) ? &deviceGroupRPBeginInfo : DE_NULL, // pNext
1353 *renderPass, // renderPass
1354 *framebuffer, // framebuffer
1355 {
1356 { 0, 0 },
1357 { renderSize.x(), renderSize.y() }
1358 }, // renderArea
1359 1u, // clearValueCount
1360 &clearValue, // pClearValues
1361 };
1362 vk.cmdBeginRenderPass(*cmdBuffer, &passBeginParams, VK_SUBPASS_CONTENTS_INLINE);
1363 }
1364
1365 // Draw
1366 if (m_testMode & TEST_MODE_AFR)
1367 {
1368 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1369 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1370
1371 }
1372 else
1373 {
1374 vk.cmdSetDeviceMask(*cmdBuffer, ((1 << firstDeviceID) | (1 << secondDeviceID)));
1375 vk.cmdDrawIndexed(*cmdBuffer, numIndices, 1u, 0, 0, 0);
1376 }
1377 endRenderPass(vk, *cmdBuffer);
1378
1379 // Change image layout for copy
1380 {
1381 const VkImageMemoryBarrier renderFinishBarrier =
1382 {
1383 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // sType
1384 DE_NULL, // pNext
1385 VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // outputMask
1386 VK_ACCESS_TRANSFER_READ_BIT, // inputMask
1387 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // oldLayout
1388 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // newLayout
1389 queueFamilyIndex, // srcQueueFamilyIndex
1390 queueFamilyIndex, // dstQueueFamilyIndex
1391 *renderImage, // image
1392 {
1393 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1394 0u, // baseMipLevel
1395 1u, // mipLevels
1396 0u, // baseArraySlice
1397 1u, // arraySize
1398 } // subresourceRange
1399 };
1400 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &renderFinishBarrier);
1401 }
1402
1403 endCommandBuffer(vk, *cmdBuffer);
1404
1405 // Submit & wait for completion
1406 {
1407 const deUint32 deviceMask = (1 << firstDeviceID) | (1 << secondDeviceID);
1408 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1409 }
1410
1411 // Copy image from secondDeviceID in case of AFR and SFR(only if Peer memory as copy source is not allowed)
1412 if ((m_physicalDeviceCount > 1) && ((m_testMode & TEST_MODE_AFR) || (!isPeerMemAsCopySrcAllowed)))
1413 {
1414 Move<VkImage> peerImage;
1415
1416 // Create and bind peer image
1417 {
1418 const VkImageCreateInfo peerImageParams =
1419 {
1420 VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // sType
1421 DE_NULL, // pNext
1422 VK_IMAGE_CREATE_ALIAS_BIT, // flags
1423 VK_IMAGE_TYPE_2D, // imageType
1424 colorFormat, // format
1425 { renderSize.x(), renderSize.y(), 1 }, // extent
1426 1u, // mipLevels
1427 1u, // arraySize
1428 VK_SAMPLE_COUNT_1_BIT, // samples
1429 VK_IMAGE_TILING_OPTIMAL, // tiling
1430 imageUsageFlag, // usage
1431 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1432 1u, // queueFamilyIndexCount
1433 &queueFamilyIndex, // pQueueFamilyIndices
1434 VK_IMAGE_LAYOUT_UNDEFINED, // initialLayout
1435 };
1436 peerImage = createImage(vk, *m_deviceGroup, &peerImageParams);
1437
1438 VkBindImageMemoryDeviceGroupInfo devGroupBindInfo =
1439 {
1440 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_DEVICE_GROUP_INFO, // sType
1441 DE_NULL, // pNext
1442 m_physicalDeviceCount, // deviceIndexCount
1443 &deviceIndices[0], // pDeviceIndices
1444 0u, // SFRRectCount
1445 DE_NULL, // pSFRRects
1446 };
1447
1448 VkBindImageMemoryInfo bindInfo =
1449 {
1450 VK_STRUCTURE_TYPE_BIND_IMAGE_MEMORY_INFO, // sType
1451 &devGroupBindInfo, // pNext
1452 peerImage.get(), // image
1453 imageMemory.get(), // memory
1454 0u, // memoryOffset
1455 };
1456 VK_CHECK(vk.bindImageMemory2(*m_deviceGroup, 1, &bindInfo));
1457 }
1458
1459 // Copy peer image (only needed in SFR case when peer memory as copy source is not allowed)
1460 {
1461 // Change layout on firstDeviceID
1462 {
1463 const VkImageMemoryBarrier preCopyBarrier =
1464 {
1465 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1466 DE_NULL, // const void* pNext;
1467 0, // VkAccessFlags srcAccessMask;
1468 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags dstAccessMask;
1469 VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1470 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout newLayout;
1471 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1472 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1473 *renderImage, // VkImage image;
1474 { // VkImageSubresourceRange subresourceRange;
1475 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1476 0u, // deUint32 baseMipLevel;
1477 1u, // deUint32 mipLevels;
1478 0u, // deUint32 baseArraySlice;
1479 1u // deUint32 arraySize;
1480 }
1481 };
1482
1483 beginCommandBuffer(vk, *cmdBuffer);
1484 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1485 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &preCopyBarrier);
1486 endCommandBuffer(vk, *cmdBuffer);
1487
1488 const deUint32 deviceMask = 1 << firstDeviceID;
1489 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1490 }
1491
1492 // Copy Image from secondDeviceID to firstDeviceID
1493 {
1494 // AFR: Copy entire image from secondDeviceID
1495 // SFR: Copy the right half of image from secondDeviceID to firstDeviceID, so that the copy
1496 // to a buffer below (for checking) does not require VK_PEER_MEMORY_FEATURE_COPY_SRC_BIT
1497 deInt32 imageOffsetX = (m_testMode & TEST_MODE_AFR) ? 0 : renderSize.x() / 2;
1498 deUint32 imageExtentX = (m_testMode & TEST_MODE_AFR) ? (deUint32)renderSize.x() : (deUint32)renderSize.x() / 2;
1499
1500 const VkImageCopy imageCopy =
1501 {
1502 {
1503 VK_IMAGE_ASPECT_COLOR_BIT,
1504 0, // mipLevel
1505 0, // arrayLayer
1506 1 // layerCount
1507 },
1508 { imageOffsetX, 0, 0 },
1509 {
1510 VK_IMAGE_ASPECT_COLOR_BIT,
1511 0, // mipLevel
1512 0, // arrayLayer
1513 1 // layerCount
1514 },
1515 { imageOffsetX, 0, 0 },
1516 {
1517 imageExtentX,
1518 (deUint32)renderSize.y(),
1519 1u
1520 }
1521 };
1522
1523 beginCommandBuffer(vk, *cmdBuffer);
1524 vk.cmdSetDeviceMask(*cmdBuffer, 1 << secondDeviceID);
1525 vk.cmdCopyImage(*cmdBuffer, *renderImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *peerImage, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1, &imageCopy);
1526 endCommandBuffer(vk, *cmdBuffer);
1527
1528 const deUint32 deviceMask = 1 << secondDeviceID;
1529 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1530 }
1531
1532 // Change layout back on firstDeviceID
1533 {
1534 const VkImageMemoryBarrier postCopyBarrier =
1535 {
1536 VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1537 DE_NULL, // const void* pNext;
1538 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1539 VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1540 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, // VkImageLayout oldLayout;
1541 VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1542 VK_QUEUE_FAMILY_IGNORED, // deUint32 srcQueueFamilyIndex;
1543 VK_QUEUE_FAMILY_IGNORED, // deUint32 dstQueueFamilyIndex;
1544 *renderImage, // VkImage image;
1545 { // VkImageSubresourceRange subresourceRange;
1546 VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1547 0u, // deUint32 baseMipLevel;
1548 1u, // deUint32 mipLevels;
1549 0u, // deUint32 baseArraySlice;
1550 1u // deUint32 arraySize;
1551 }
1552 };
1553
1554 beginCommandBuffer(vk, *cmdBuffer);
1555 vk.cmdSetDeviceMask(*cmdBuffer, 1 << firstDeviceID);
1556 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1u, &postCopyBarrier);
1557 endCommandBuffer(vk, *cmdBuffer);
1558
1559 const deUint32 deviceMask = 1 << firstDeviceID;
1560 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1561 }
1562 }
1563 }
1564
1565 // copy image to read buffer for checking
1566 {
1567 const VkDeviceSize imageSizeBytes = (VkDeviceSize)(sizeof(deUint32) * renderSize.x() * renderSize.y());
1568 const VkBufferCreateInfo readImageBufferParams =
1569 {
1570 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // sType
1571 DE_NULL, // pNext
1572 (VkBufferCreateFlags)0u, // flags
1573 imageSizeBytes, // size
1574 VK_BUFFER_USAGE_TRANSFER_DST_BIT, // usage
1575 VK_SHARING_MODE_EXCLUSIVE, // sharingMode
1576 1u, // queueFamilyIndexCount
1577 &queueFamilyIndex, // pQueueFamilyIndices
1578 };
1579 const Unique<VkBuffer> readImageBuffer(createBuffer(vk, *m_deviceGroup, &readImageBufferParams));
1580 const UniquePtr<Allocation> readImageBufferMemory(memAlloc.allocate(getBufferMemoryRequirements(vk, *m_deviceGroup, *readImageBuffer), MemoryRequirement::HostVisible));
1581 VK_CHECK(vk.bindBufferMemory(*m_deviceGroup, *readImageBuffer, readImageBufferMemory->getMemory(), readImageBufferMemory->getOffset()));
1582
1583 beginCommandBuffer(vk, *cmdBuffer);
1584
1585 // Copy image to buffer
1586 {
1587 const VkBufferImageCopy copyParams =
1588 {
1589 (VkDeviceSize)0u, // bufferOffset
1590 renderSize.x(), // bufferRowLength
1591 renderSize.y(), // bufferImageHeight
1592 {
1593 VK_IMAGE_ASPECT_COLOR_BIT, // aspectMask
1594 0u, // mipLevel
1595 0u, // baseArrayLayer
1596 1u, // layerCount
1597 }, // imageSubresource
1598 { 0, 0, 0 }, // imageOffset
1599 {
1600 renderSize.x(),
1601 renderSize.y(),
1602 1u
1603 } // imageExtent
1604 };
1605
1606 // Use a diffferent binding in SFR when peer memory as copy source is not allowed
1607 vk.cmdCopyImageToBuffer(*cmdBuffer, isPeerMemAsCopySrcAllowed ? *renderImage : *readImage, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
1608
1609 const VkBufferMemoryBarrier copyFinishBarrier =
1610 {
1611 VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // sType
1612 DE_NULL, // pNext
1613 VK_ACCESS_TRANSFER_WRITE_BIT, // srcAccessMask
1614 VK_ACCESS_HOST_READ_BIT, // dstAccessMask
1615 queueFamilyIndex, // srcQueueFamilyIndex
1616 queueFamilyIndex, // dstQueueFamilyIndex
1617 *readImageBuffer, // buffer
1618 0u, // offset
1619 imageSizeBytes // size
1620 };
1621 vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©FinishBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
1622 }
1623 endCommandBuffer(vk, *cmdBuffer);
1624
1625 // Submit & wait for completion
1626 {
1627 const deUint32 deviceMask = 1 << firstDeviceID;
1628 SubmitBufferAndWaitForIdle(vk, cmdBuffer.get(), deviceMask);
1629 }
1630
1631 // Read results and check against reference image
1632 if (m_drawTessellatedSphere)
1633 {
1634 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1635 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1636 invalidateAlloc(vk, *m_deviceGroup, *readImageBufferMemory);
1637
1638 tcu::TextureLevel referenceImage;
1639 string refImage = m_fillModeNonSolid ? "vulkan/data/device_group/sphere.png" : "vulkan/data/device_group/spherefilled.png";
1640 tcu::ImageIO::loadPNG(referenceImage, m_context.getTestContext().getArchive(), refImage.c_str());
1641 iterateResultSuccess = tcu::fuzzyCompare(m_context.getTestContext().getLog(), "ImageComparison", "Image Comparison",
1642 referenceImage.getAccess(), resultAccess, 0.001f, tcu::COMPARE_LOG_RESULT);
1643 }
1644 else
1645 {
1646 const tcu::TextureFormat tcuFormat = vk::mapVkFormat(colorFormat);
1647 const tcu::ConstPixelBufferAccess resultAccess(tcuFormat, renderSize.x(), renderSize.y(), 1, readImageBufferMemory->getHostPtr());
1648 invalidateAlloc(vk, *m_deviceGroup, *readImageBufferMemory);
1649
1650 // Render reference and compare
1651 {
1652 tcu::TextureLevel refImage(tcuFormat, (deInt32)renderSize.x(), (deInt32)renderSize.y());
1653 const tcu::UVec4 threshold(0u);
1654 const tcu::IVec3 posDeviation(1, 1, 0);
1655
1656 tcu::clear(refImage.getAccess(), clearColor);
1657 renderReferenceTriangle(refImage.getAccess(), triVertices);
1658
1659 iterateResultSuccess = tcu::intThresholdPositionDeviationCompare(m_context.getTestContext().getLog(),
1660 "ComparisonResult",
1661 "Image comparison result",
1662 refImage.getAccess(),
1663 resultAccess,
1664 threshold,
1665 posDeviation,
1666 false,
1667 tcu::COMPARE_LOG_RESULT);
1668 }
1669 }
1670 }
1671
1672 if (!iterateResultSuccess)
1673 return tcu::TestStatus::fail("Image comparison failed");
1674 }
1675
1676 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Device group verification passed");
1677 }
1678
1679 template<class Instance>
1680 class DeviceGroupTestCase : public TestCase
1681 {
1682 public:
DeviceGroupTestCase(tcu::TestContext & context,const char * name,const char * description,deUint32 mode)1683 DeviceGroupTestCase (tcu::TestContext& context,
1684 const char* name,
1685 const char* description,
1686 deUint32 mode)
1687 : TestCase(context, name, description)
1688 , m_testMode (mode)
1689 {}
1690
1691 private:
1692
1693 deUint32 m_testMode;
1694
createInstance(Context & context) const1695 TestInstance* createInstance (Context& context) const
1696 {
1697 return new Instance(context, m_testMode);
1698 }
1699
initPrograms(vk::SourceCollections & programCollection) const1700 void initPrograms (vk::SourceCollections& programCollection) const
1701 {
1702 programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
1703 "layout(location = 0) in vec4 in_Position;\n"
1704 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
1705 "void main() {\n"
1706 " gl_Position = in_Position;\n"
1707 " gl_PointSize = 1.0;\n"
1708 "}\n");
1709
1710 if (m_testMode & TEST_MODE_TESSELLATION)
1711 {
1712 programCollection.glslSources.add("tesc") << glu::TessellationControlSource("#version 450\n"
1713 "#extension GL_EXT_tessellation_shader : require\n"
1714 "layout(vertices=3) out;\n"
1715 "layout(set=0, binding=1) buffer tessLevel { \n"
1716 " float tessLvl;\n"
1717 "};\n"
1718 "void main()\n"
1719 "{\n"
1720 " gl_out[gl_InvocationID].gl_Position = gl_in[gl_InvocationID].gl_Position;\n"
1721 " if (gl_InvocationID == 0) {\n"
1722 " for (int i = 0; i < 4; i++)\n"
1723 " gl_TessLevelOuter[i] = tessLvl;\n"
1724 " for (int i = 0; i < 2; i++)\n"
1725 " gl_TessLevelInner[i] = tessLvl;\n"
1726 " }\n"
1727 "}\n");
1728
1729 programCollection.glslSources.add("tese") << glu::TessellationEvaluationSource("#version 450\n"
1730 "#extension GL_EXT_tessellation_shader : require\n"
1731 "layout(triangles) in;\n"
1732 "layout(equal_spacing) in;\n"
1733 "layout(ccw) in;\n"
1734 "void main()\n"
1735 "{\n"
1736 " vec4 pos = vec4(0, 0, 0, 0);\n"
1737 " vec3 tessCoord = gl_TessCoord.xyz;\n"
1738 " pos += tessCoord.z * gl_in[0].gl_Position;\n"
1739 " pos += tessCoord.x * gl_in[1].gl_Position;\n"
1740 " pos += tessCoord.y * gl_in[2].gl_Position;\n"
1741 " vec3 sign = sign(pos.xyz);\n"
1742 " pos.xyz = 0.785398 - abs(pos.xyz) * 1.5707963;\n"
1743 " pos.xyz = (1 - tan(pos.xyz))/2.0;\n"
1744 " pos.xyz = (sign * pos.xyz) / length(pos.xyz);\n"
1745 " gl_Position = pos;\n"
1746 "}\n");
1747 }
1748
1749 programCollection.glslSources.add("frag") << glu::FragmentSource("#version 430\n"
1750 "layout(location = 0) out vec4 out_FragColor;\n"
1751 "layout(std140, set=0, binding=0) uniform bufferData { \n"
1752 " vec4 color;\n"
1753 "};\n"
1754 "void main()\n"
1755 "{\n"
1756 " out_FragColor = color;\n"
1757 "}\n");
1758 }
1759 };
1760
1761 } //anonymous
1762
1763 class DeviceGroupTestRendering : public tcu::TestCaseGroup
1764 {
1765 public:
1766 DeviceGroupTestRendering (tcu::TestContext& testCtx);
~DeviceGroupTestRendering(void)1767 ~DeviceGroupTestRendering (void) {}
1768 void init(void);
1769
1770 private:
1771 DeviceGroupTestRendering (const DeviceGroupTestRendering& other);
1772 DeviceGroupTestRendering& operator= (const DeviceGroupTestRendering& other);
1773 };
1774
DeviceGroupTestRendering(tcu::TestContext & testCtx)1775 DeviceGroupTestRendering::DeviceGroupTestRendering (tcu::TestContext& testCtx)
1776 : TestCaseGroup (testCtx, "device_group", "Testing device group test cases")
1777 {
1778 // Left blank on purpose
1779 }
1780
init(void)1781 void DeviceGroupTestRendering::init (void)
1782 {
1783 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr", "Test split frame rendering", TEST_MODE_SFR));
1784 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_SFR | TEST_MODE_HOSTMEMORY));
1785 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_SFR | TEST_MODE_DEDICATED));
1786 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_SFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1787
1788 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr", "Test alternate frame rendering", TEST_MODE_AFR));
1789 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_sys", "Test split frame rendering with render target in host memory", TEST_MODE_AFR | TEST_MODE_HOSTMEMORY));
1790 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated", "Test split frame rendering with dedicated memory allocations", TEST_MODE_AFR | TEST_MODE_DEDICATED));
1791 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_dedicated_peer", "Test split frame rendering with dedicated memory allocations and peer fetching", TEST_MODE_AFR | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1792
1793 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated", "Test split frame rendering with tessellated sphere", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1794 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "sfr_tessellated_linefill", "Test split frame rendering with tessellated sphere with line segments", TEST_MODE_SFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1795 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated", "Test alternate frame rendering with tesselated sphere", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1796 addChild(new DeviceGroupTestCase<DeviceGroupTestInstance>(m_testCtx, "afr_tessellated_linefill", "Test alternate frame rendering with tesselated sphere with line segments", TEST_MODE_AFR | TEST_MODE_TESSELLATION | TEST_MODE_LINEFILL | TEST_MODE_DEDICATED | TEST_MODE_PEER_FETCH));
1797 }
1798
createTests(tcu::TestContext & testCtx)1799 tcu::TestCaseGroup* createTests(tcu::TestContext& testCtx)
1800 {
1801 return new DeviceGroupTestRendering(testCtx);
1802 }
1803 } // DeviceGroup
1804 } // vkt
1805