• Home
  • History
  • Annotate
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2017-2019 The Khronos Group Inc.
6  * Copyright (c) 2018-2020 NVIDIA Corporation
7  *
8  * Licensed under the Apache License, Version 2.0 (the "License");
9  * you may not use this file except in compliance with the License.
10  * You may obtain a copy of the License at
11  *
12  *	  http://www.apache.org/licenses/LICENSE-2.0
13  *
14  * Unless required by applicable law or agreed to in writing, software
15  * distributed under the License is distributed on an "AS IS" BASIS,
16  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17  * See the License for the specific language governing permissions and
18  * limitations under the License.
19  *
20  *//*!
21  * \file
22  * \brief Vulkan robustness2 tests
23  *//*--------------------------------------------------------------------*/
24 
25 #include "vktRobustnessExtsTests.hpp"
26 
27 #include "vkBufferWithMemory.hpp"
28 #include "vkImageWithMemory.hpp"
29 #include "vkImageUtil.hpp"
30 #include "vkQueryUtil.hpp"
31 #include "vkBuilderUtil.hpp"
32 #include "vkCmdUtil.hpp"
33 #include "vkTypeUtil.hpp"
34 #include "vkObjUtil.hpp"
35 #include "vkBarrierUtil.hpp"
36 #include "vktRobustnessUtil.hpp"
37 
38 #include "vktTestGroupUtil.hpp"
39 #include "vktTestCase.hpp"
40 
41 #include "deDefs.h"
42 #include "deMath.h"
43 #include "deRandom.h"
44 #include "deSharedPtr.hpp"
45 #include "deString.h"
46 
47 #include "tcuVectorType.hpp"
48 #include "tcuTestCase.hpp"
49 #include "tcuTestLog.hpp"
50 
51 #include <string>
52 #include <sstream>
53 #include <algorithm>
54 #include <limits>
55 
56 namespace vkt
57 {
58 namespace robustness
59 {
60 namespace
61 {
62 using namespace vk;
63 using namespace std;
64 using de::SharedPtr;
65 
66 enum RobustnessFeatureBits
67 {
68 	RF_IMG_ROBUSTNESS	= (1		),
69 	RF_ROBUSTNESS2		= (1 << 1	),
70 	SIF_INT64ATOMICS	= (1 << 2	),
71 };
72 
73 using RobustnessFeatures = deUint32;
74 
75 // Class to wrap a singleton device with the indicated robustness features.
76 template <RobustnessFeatures FEATURES>
77 class SingletonDevice
78 {
SingletonDevice(Context & context)79 	SingletonDevice	(Context& context)
80 		: m_logicalDevice ()
81 	{
82 		// Note we are already checking the needed features are available in checkSupport().
83 		VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
84 		VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
85 		VkPhysicalDeviceScalarBlockLayoutFeatures			scalarBlockLayoutFeatures		= initVulkanStructure();
86 		VkPhysicalDeviceShaderImageAtomicInt64FeaturesEXT	shaderImageAtomicInt64Features	= initVulkanStructure();
87 		VkPhysicalDeviceFeatures2							features2						= initVulkanStructure();
88 
89 		features2.pNext = &scalarBlockLayoutFeatures;
90 
91 		if (FEATURES & RF_IMG_ROBUSTNESS)
92 		{
93 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"));
94 			imageRobustnessFeatures.pNext = features2.pNext;
95 			features2.pNext = &imageRobustnessFeatures;
96 		}
97 
98 		if (FEATURES & RF_ROBUSTNESS2)
99 		{
100 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_robustness2"));
101 			robustness2Features.pNext = features2.pNext;
102 			features2.pNext = &robustness2Features;
103 		}
104 
105 		if (FEATURES & SIF_INT64ATOMICS)
106 		{
107 			DE_ASSERT(context.isDeviceFunctionalitySupported("VK_EXT_shader_image_atomic_int64"));
108 			shaderImageAtomicInt64Features.pNext = features2.pNext;
109 			features2.pNext = &shaderImageAtomicInt64Features;
110 		}
111 
112 		context.getInstanceInterface().getPhysicalDeviceFeatures2(context.getPhysicalDevice(), &features2);
113 		m_logicalDevice = createRobustBufferAccessDevice(context, &features2);
114 	}
115 
116 public:
getDevice(Context & context)117 	static VkDevice getDevice(Context& context)
118 	{
119 		if (!m_singletonDevice)
120 			m_singletonDevice = SharedPtr<SingletonDevice>(new SingletonDevice(context));
121 		DE_ASSERT(m_singletonDevice);
122 		return m_singletonDevice->m_logicalDevice.get();
123 	}
124 
destroy()125 	static void destroy()
126 	{
127 		m_singletonDevice.clear();
128 	}
129 
130 private:
131 	Move<vk::VkDevice>							m_logicalDevice;
132 	static SharedPtr<SingletonDevice<FEATURES>>	m_singletonDevice;
133 };
134 
135 template <RobustnessFeatures FEATURES>
136 SharedPtr<SingletonDevice<FEATURES>> SingletonDevice<FEATURES>::m_singletonDevice;
137 
138 constexpr RobustnessFeatures kImageRobustness			= RF_IMG_ROBUSTNESS;
139 constexpr RobustnessFeatures kRobustness2				= RF_ROBUSTNESS2;
140 constexpr RobustnessFeatures kShaderImageInt64Atomics	= SIF_INT64ATOMICS;
141 
142 using ImageRobustnessSingleton	= SingletonDevice<kImageRobustness>;
143 using Robustness2Singleton		= SingletonDevice<kRobustness2>;
144 
145 using ImageRobustnessInt64AtomicsSingleton	= SingletonDevice<kImageRobustness | kShaderImageInt64Atomics>;
146 using Robustness2Int64AtomicsSingleton		= SingletonDevice<kRobustness2 | kShaderImageInt64Atomics>;
147 
148 // Render target / compute grid dimensions
149 static const deUint32 DIM = 8;
150 
151 // treated as a phony VkDescriptorType value
152 #define VERTEX_ATTRIBUTE_FETCH 999
153 
154 typedef enum
155 {
156 	STAGE_COMPUTE = 0,
157 	STAGE_VERTEX,
158 	STAGE_FRAGMENT,
159 	STAGE_RAYGEN
160 } Stage;
161 
162 struct CaseDef
163 {
164 	VkFormat format;
165 	Stage stage;
166 	VkFlags allShaderStages;
167 	VkFlags allPipelineStages;
168 	int/*VkDescriptorType*/ descriptorType;
169 	VkImageViewType viewType;
170 	VkSampleCountFlagBits samples;
171 	int bufferLen;
172 	bool unroll;
173 	bool vol;
174 	bool nullDescriptor;
175 	bool useTemplate;
176 	bool formatQualifier;
177 	bool pushDescriptor;
178 	bool testRobustness2;
179 	deUint32 imageDim[3]; // width, height, depth or layers
180 };
181 
formatIsR64(const VkFormat & f)182 static bool formatIsR64(const VkFormat& f)
183 {
184 	switch (f)
185 	{
186 	case VK_FORMAT_R64_SINT:
187 	case VK_FORMAT_R64_UINT:
188 		return true;
189 	default:
190 		return false;
191 	}
192 }
193 
194 // Returns the appropriate singleton device for the given case.
getLogicalDevice(Context & ctx,const CaseDef & caseDef)195 VkDevice getLogicalDevice (Context& ctx, const CaseDef& caseDef)
196 {
197 	if (formatIsR64(caseDef.format))
198 	{
199 		if (caseDef.testRobustness2)
200 			return Robustness2Int64AtomicsSingleton::getDevice(ctx);
201 		return ImageRobustnessInt64AtomicsSingleton::getDevice(ctx);
202 	}
203 
204 	if (caseDef.testRobustness2)
205 		return Robustness2Singleton::getDevice(ctx);
206 	return ImageRobustnessSingleton::getDevice(ctx);
207 }
208 
209 class Layout
210 {
211 public:
212 	vector<VkDescriptorSetLayoutBinding> layoutBindings;
213 	vector<deUint8> refData;
214 };
215 
216 
217 class RobustnessExtsTestInstance : public TestInstance
218 {
219 public:
220 						RobustnessExtsTestInstance		(Context& context, const CaseDef& data);
221 						~RobustnessExtsTestInstance	(void);
222 	tcu::TestStatus		iterate								(void);
223 private:
224 	CaseDef				m_data;
225 };
226 
RobustnessExtsTestInstance(Context & context,const CaseDef & data)227 RobustnessExtsTestInstance::RobustnessExtsTestInstance (Context& context, const CaseDef& data)
228 	: vkt::TestInstance		(context)
229 	, m_data				(data)
230 {
231 }
232 
~RobustnessExtsTestInstance(void)233 RobustnessExtsTestInstance::~RobustnessExtsTestInstance (void)
234 {
235 }
236 
237 class RobustnessExtsTestCase : public TestCase
238 {
239 	public:
240 								RobustnessExtsTestCase		(tcu::TestContext& context, const char* name, const char* desc, const CaseDef data);
241 								~RobustnessExtsTestCase	(void);
242 	virtual	void				initPrograms					(SourceCollections& programCollection) const;
243 	virtual TestInstance*		createInstance					(Context& context) const;
244 	virtual void				checkSupport					(Context& context) const;
245 
246 private:
247 	CaseDef					m_data;
248 };
249 
RobustnessExtsTestCase(tcu::TestContext & context,const char * name,const char * desc,const CaseDef data)250 RobustnessExtsTestCase::RobustnessExtsTestCase (tcu::TestContext& context, const char* name, const char* desc, const CaseDef data)
251 	: vkt::TestCase	(context, name, desc)
252 	, m_data		(data)
253 {
254 }
255 
~RobustnessExtsTestCase(void)256 RobustnessExtsTestCase::~RobustnessExtsTestCase	(void)
257 {
258 }
259 
formatIsFloat(const VkFormat & f)260 static bool formatIsFloat(const VkFormat& f)
261 {
262 	switch (f)
263 	{
264 	case VK_FORMAT_R32_SFLOAT:
265 	case VK_FORMAT_R32G32_SFLOAT:
266 	case VK_FORMAT_R32G32B32A32_SFLOAT:
267 		return true;
268 	default:
269 		return false;
270 	}
271 }
272 
formatIsSignedInt(const VkFormat & f)273 static bool formatIsSignedInt(const VkFormat& f)
274 {
275 	switch (f)
276 	{
277 	case VK_FORMAT_R32_SINT:
278 	case VK_FORMAT_R64_SINT:
279 	case VK_FORMAT_R32G32_SINT:
280 	case VK_FORMAT_R32G32B32A32_SINT:
281 		return true;
282 	default:
283 		return false;
284 	}
285 }
286 
supportsStores(int descriptorType)287 static bool supportsStores(int descriptorType)
288 {
289 	switch (descriptorType)
290 	{
291 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
292 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
293 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
294 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
295 		return true;
296 	default:
297 		return false;
298 	}
299 }
300 
makeComputePipeline(const DeviceInterface & vk,const VkDevice device,const VkPipelineLayout pipelineLayout,const VkShaderModule shaderModule)301 Move<VkPipeline> makeComputePipeline (const DeviceInterface&	vk,
302 									  const VkDevice			device,
303 									  const VkPipelineLayout	pipelineLayout,
304 									  const VkShaderModule		shaderModule)
305 {
306 	const VkPipelineShaderStageCreateInfo pipelineShaderStageParams =
307 	{
308 		VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	// VkStructureType						sType;
309 		DE_NULL,												// const void*							pNext;
310 		(VkPipelineShaderStageCreateFlags)0,					// VkPipelineShaderStageCreateFlags		flags;
311 		VK_SHADER_STAGE_COMPUTE_BIT,							// VkShaderStageFlagBits				stage;
312 		shaderModule,											// VkShaderModule						module;
313 		"main",													// const char*							pName;
314 		DE_NULL,												// const VkSpecializationInfo*			pSpecializationInfo;
315 	};
316 
317 	const VkComputePipelineCreateInfo pipelineCreateInfo =
318 	{
319 		VK_STRUCTURE_TYPE_COMPUTE_PIPELINE_CREATE_INFO,		// VkStructureType					sType;
320 		DE_NULL,											// const void*						pNext;
321 		0u,													// VkPipelineCreateFlags			flags;
322 		pipelineShaderStageParams,							// VkPipelineShaderStageCreateInfo	stage;
323 		pipelineLayout,										// VkPipelineLayout					layout;
324 		(vk::VkPipeline)0,									// VkPipeline						basePipelineHandle;
325 		0,													// deInt32							basePipelineIndex;
326 	};
327 
328 	return createComputePipeline(vk, device, DE_NULL , &pipelineCreateInfo);
329 }
330 
checkSupport(Context & context) const331 void RobustnessExtsTestCase::checkSupport(Context& context) const
332 {
333 	const auto&	vki				= context.getInstanceInterface();
334 	const auto	physicalDevice	= context.getPhysicalDevice();
335 
336 	// We need to query feature support using the physical device instead of using the reported context features because robustness2
337 	// and image robustness are always disabled in the default device but they may be available.
338 	VkPhysicalDeviceRobustness2FeaturesEXT				robustness2Features				= initVulkanStructure();
339 	VkPhysicalDeviceImageRobustnessFeaturesEXT			imageRobustnessFeatures			= initVulkanStructure();
340 	VkPhysicalDeviceScalarBlockLayoutFeatures			scalarLayoutFeatures			= initVulkanStructure();
341 	VkPhysicalDeviceFeatures2KHR						features2						= initVulkanStructure();
342 
343 	context.requireInstanceFunctionality("VK_KHR_get_physical_device_properties2");
344 
345 	context.requireDeviceFunctionality("VK_EXT_scalar_block_layout");
346 	features2.pNext = &scalarLayoutFeatures;
347 
348 	if (context.isDeviceFunctionalitySupported("VK_EXT_image_robustness"))
349 	{
350 		imageRobustnessFeatures.pNext = features2.pNext;
351 		features2.pNext = &imageRobustnessFeatures;
352 	}
353 
354 	if (context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
355 	{
356 		robustness2Features.pNext = features2.pNext;
357 		features2.pNext = &robustness2Features;
358 	}
359 
360 	vki.getPhysicalDeviceFeatures2(physicalDevice, &features2);
361 
362 	if (formatIsR64(m_data.format))
363 	{
364 		context.requireDeviceFunctionality("VK_EXT_shader_image_atomic_int64");
365 
366 		VkFormatProperties formatProperties;
367 		vki.getPhysicalDeviceFormatProperties(context.getPhysicalDevice(), m_data.format, &formatProperties);
368 
369 		switch (m_data.descriptorType)
370 		{
371 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
372 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)
373 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT is not supported");
374 			break;
375 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
376 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT) != VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)
377 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT is not supported");
378 			break;
379 		case VERTEX_ATTRIBUTE_FETCH:
380 			if ((formatProperties.bufferFeatures & VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT) != VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT)
381 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
382 			break;
383 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
384 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
385 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_VERTEX_BUFFER_BIT is not supported");
386 			break;
387 		default: DE_ASSERT(true);
388 		}
389 
390 		if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
391 		{
392 			if ((formatProperties.optimalTilingFeatures & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) != VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)
393 				TCU_THROW(NotSupportedError, "VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT is not supported");
394 		}
395 	}
396 
397 	// Check needed properties and features
398 	if (!scalarLayoutFeatures.scalarBlockLayout)
399 		TCU_THROW(NotSupportedError, "Scalar block layout not supported");
400 
401 	if (m_data.stage == STAGE_VERTEX && !features2.features.vertexPipelineStoresAndAtomics)
402 		TCU_THROW(NotSupportedError, "Vertex pipeline stores and atomics not supported");
403 
404 	if (m_data.stage == STAGE_RAYGEN)
405 		context.requireDeviceFunctionality("VK_NV_ray_tracing");
406 
407 	switch (m_data.descriptorType)
408 	{
409 	default: DE_ASSERT(0); // Fallthrough
410 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
411 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
412 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
413 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
414 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
415 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
416 	case VERTEX_ATTRIBUTE_FETCH:
417 		if (m_data.testRobustness2)
418 		{
419 			if (!robustness2Features.robustBufferAccess2)
420 				TCU_THROW(NotSupportedError, "robustBufferAccess2 not supported");
421 		}
422 		else
423 		{
424 			// This case is not tested here.
425 			DE_ASSERT(false);
426 		}
427 		break;
428 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
429 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
430 		if (m_data.testRobustness2)
431 		{
432 			if (!robustness2Features.robustImageAccess2)
433 				TCU_THROW(NotSupportedError, "robustImageAccess2 not supported");
434 		}
435 		else
436 		{
437 			if (!imageRobustnessFeatures.robustImageAccess)
438 				TCU_THROW(NotSupportedError, "robustImageAccess not supported");
439 		}
440 		break;
441 	}
442 
443 	if (m_data.nullDescriptor && !robustness2Features.nullDescriptor)
444 		TCU_THROW(NotSupportedError, "nullDescriptor not supported");
445 
446 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
447 		m_data.samples != VK_SAMPLE_COUNT_1_BIT &&
448 		!features2.features.shaderStorageImageMultisample)
449 		TCU_THROW(NotSupportedError, "shaderStorageImageMultisample not supported");
450 
451 	if ((m_data.useTemplate || formatIsR64(m_data.format)) && !context.contextSupports(vk::ApiVersion(1, 1, 0)))
452 		TCU_THROW(NotSupportedError, "Vulkan 1.1 not supported");
453 
454 	if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE) &&
455 		!m_data.formatQualifier &&
456 		(!features2.features.shaderStorageImageReadWithoutFormat || !features2.features.shaderStorageImageWriteWithoutFormat))
457 		TCU_THROW(NotSupportedError, "shaderStorageImageReadWithoutFormat or shaderStorageImageWriteWithoutFormat not supported");
458 
459 	if (m_data.pushDescriptor)
460 		context.requireDeviceFunctionality("VK_KHR_push_descriptor");
461 }
462 
generateLayout(Layout & layout,const CaseDef & caseDef)463 void generateLayout(Layout &layout, const CaseDef &caseDef)
464 {
465 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
466 	int numBindings = caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH ? 2 : 1;
467 	bindings = vector<VkDescriptorSetLayoutBinding>(numBindings);
468 
469 	for (deUint32 b = 0; b < layout.layoutBindings.size(); ++b)
470 	{
471 		VkDescriptorSetLayoutBinding &binding = bindings[b];
472 		binding.binding = b;
473 		binding.pImmutableSamplers = NULL;
474 		binding.stageFlags = caseDef.allShaderStages;
475 		binding.descriptorCount = 1;
476 
477 		// Output image
478 		if (b == 0)
479 			binding.descriptorType = VK_DESCRIPTOR_TYPE_STORAGE_IMAGE;
480 		else if (caseDef.descriptorType != VERTEX_ATTRIBUTE_FETCH)
481 			binding.descriptorType = (VkDescriptorType)caseDef.descriptorType;
482 	}
483 
484 	if (caseDef.nullDescriptor)
485 		return;
486 
487 	if (caseDef.bufferLen == 0)
488 	{
489 		// Clear color values for image tests
490 		static deUint32 urefData[4]		= { 0x12345678, 0x23456789, 0x34567890, 0x45678901 };
491 		static deUint64 urefData64[4]	= { 0x1234567887654321, 0x234567899, 0x345678909, 0x456789019 };
492 		static float frefData[4]		= { 123.f, 234.f, 345.f, 456.f };
493 
494 		if (formatIsR64(caseDef.format))
495 		{
496 			layout.refData.resize(32);
497 			deUint64 *ptr = (deUint64 *)layout.refData.data();
498 
499 			for (unsigned int i = 0; i < 4; ++i)
500 			{
501 				ptr[i] = urefData64[i];
502 			}
503 		}
504 		else
505 		{
506 			layout.refData.resize(16);
507 			deMemcpy(layout.refData.data(), formatIsFloat(caseDef.format) ? (const void *)frefData : (const void *)urefData, sizeof(frefData));
508 		}
509 	}
510 	else
511 	{
512 		layout.refData.resize(caseDef.bufferLen & (formatIsR64(caseDef.format) ? ~7: ~3));
513 		for (unsigned int i = 0; i < caseDef.bufferLen / (formatIsR64(caseDef.format) ? sizeof(deUint64) : sizeof(deUint32)); ++i)
514 		{
515 			if (formatIsFloat(caseDef.format))
516 			{
517 				float *f = (float *)layout.refData.data() + i;
518 				*f = 2.0f*(float)i + 3.0f;
519 			}
520 			if (formatIsR64(caseDef.format))
521 			{
522 				deUint64 *u = (deUint64 *)layout.refData.data() + i;
523 				*u = 2 * i + 3;
524 			}
525 			else
526 			{
527 				int *u = (int *)layout.refData.data() + i;
528 				*u = 2*i + 3;
529 			}
530 		}
531 	}
532 }
533 
genFetch(const CaseDef & caseDef,int numComponents,const string & vecType,const string & coord,const string & lod)534 static string genFetch(const CaseDef &caseDef, int numComponents, const string& vecType, const string& coord, const string& lod)
535 {
536 	std::stringstream s;
537 	// Fetch from the descriptor.
538 	switch (caseDef.descriptorType)
539 	{
540 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
541 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
542 		s << vecType << "(ubo0_1.val[" << coord << "]";
543 		for (int i = numComponents; i < 4; ++i) s << ", 0";
544 		s << ")";
545 		break;
546 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
547 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
548 		s << vecType << "(ssbo0_1.val[" << coord << "]";
549 		for (int i = numComponents; i < 4; ++i) s << ", 0";
550 		s << ")";
551 		break;
552 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
553 		s << "texelFetch(texbo0_1, " << coord << ")";
554 		break;
555 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
556 		s << "imageLoad(image0_1, " << coord << ")";
557 		break;
558 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
559 		if (caseDef.samples > VK_SAMPLE_COUNT_1_BIT)
560 			s << "texelFetch(texture0_1, " << coord << ")";
561 		else
562 			s << "texelFetch(texture0_1, " << coord << ", " << lod << ")";
563 		break;
564 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
565 		s << "imageLoad(image0_1, " << coord << ")";
566 		break;
567 	case VERTEX_ATTRIBUTE_FETCH:
568 		s << "attr";
569 		break;
570 	default: DE_ASSERT(0);
571 	}
572 	return s.str();
573 }
574 
575 static const int storeValue = 123;
576 
577 // Get the value stored by genStore.
getStoreValue(int descriptorType,int numComponents,const string & vecType,const string & bufType)578 static string getStoreValue(int descriptorType, int numComponents, const string& vecType, const string& bufType)
579 {
580 	std::stringstream s;
581 	switch (descriptorType)
582 	{
583 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
584 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
585 		s << vecType  << "(" << bufType << "(" << storeValue << ")";
586 		for (int i = numComponents; i < 4; ++i) s << ", 0";
587 		s << ")";
588 		break;
589 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
590 		s << vecType << "(" << storeValue << ")";
591 		break;
592 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
593 		s << vecType << "(" << storeValue << ")";
594 		break;
595 	default: DE_ASSERT(0);
596 	}
597 	return s.str();
598 }
599 
genStore(int descriptorType,const string & vecType,const string & bufType,const string & coord)600 static string genStore(int descriptorType, const string& vecType, const string& bufType, const string& coord)
601 {
602 	std::stringstream s;
603 	// Store to the descriptor.
604 	switch (descriptorType)
605 	{
606 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
607 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
608 		s << "ssbo0_1.val[" << coord << "] = " << bufType << "(" << storeValue << ")";
609 		break;
610 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
611 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
612 		break;
613 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
614 		s << "imageStore(image0_1, " << coord << ", " << vecType << "(" << storeValue << "))";
615 		break;
616 	default: DE_ASSERT(0);
617 	}
618 	return s.str();
619 }
620 
genAtomic(int descriptorType,const string & bufType,const string & coord)621 static string genAtomic(int descriptorType, const string& bufType, const string& coord)
622 {
623 	std::stringstream s;
624 	// Store to the descriptor. The value doesn't matter, since we only test out of bounds coordinates.
625 	switch (descriptorType)
626 	{
627 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
628 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
629 		s << "atomicAdd(ssbo0_1.val[" << coord << "], " << bufType << "(10))";
630 		break;
631 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
632 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
633 		break;
634 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
635 		s << "imageAtomicAdd(image0_1, " << coord << ", " << bufType << "(10))";
636 		break;
637 	default: DE_ASSERT(0);
638 	}
639 	return s.str();
640 }
641 
getShaderImageFormatQualifier(const tcu::TextureFormat & format)642 static std::string getShaderImageFormatQualifier (const tcu::TextureFormat& format)
643 {
644 	const char* orderPart;
645 	const char* typePart;
646 
647 	switch (format.order)
648 	{
649 		case tcu::TextureFormat::R:		orderPart = "r";	break;
650 		case tcu::TextureFormat::RG:	orderPart = "rg";	break;
651 		case tcu::TextureFormat::RGB:	orderPart = "rgb";	break;
652 		case tcu::TextureFormat::RGBA:	orderPart = "rgba";	break;
653 
654 		default:
655 			DE_FATAL("Impossible");
656 			orderPart = DE_NULL;
657 	}
658 
659 	switch (format.type)
660 	{
661 		case tcu::TextureFormat::FLOAT:				typePart = "32f";		break;
662 		case tcu::TextureFormat::HALF_FLOAT:		typePart = "16f";		break;
663 
664 		case tcu::TextureFormat::UNSIGNED_INT64:	typePart = "64ui";		break;
665 		case tcu::TextureFormat::UNSIGNED_INT32:	typePart = "32ui";		break;
666 		case tcu::TextureFormat::UNSIGNED_INT16:	typePart = "16ui";		break;
667 		case tcu::TextureFormat::UNSIGNED_INT8:		typePart = "8ui";		break;
668 
669 		case tcu::TextureFormat::SIGNED_INT64:		typePart = "64i";		break;
670 		case tcu::TextureFormat::SIGNED_INT32:		typePart = "32i";		break;
671 		case tcu::TextureFormat::SIGNED_INT16:		typePart = "16i";		break;
672 		case tcu::TextureFormat::SIGNED_INT8:		typePart = "8i";		break;
673 
674 		case tcu::TextureFormat::UNORM_INT16:		typePart = "16";		break;
675 		case tcu::TextureFormat::UNORM_INT8:		typePart = "8";			break;
676 
677 		case tcu::TextureFormat::SNORM_INT16:		typePart = "16_snorm";	break;
678 		case tcu::TextureFormat::SNORM_INT8:		typePart = "8_snorm";	break;
679 
680 		default:
681 			DE_FATAL("Impossible");
682 			typePart = DE_NULL;
683 	}
684 
685 	return std::string() + orderPart + typePart;
686 }
687 
genCoord(string c,int numCoords,VkSampleCountFlagBits samples,int dim)688 string genCoord(string c, int numCoords, VkSampleCountFlagBits samples, int dim)
689 {
690 	if (numCoords == 1)
691 		return c;
692 
693 	if (samples != VK_SAMPLE_COUNT_1_BIT)
694 		numCoords--;
695 
696 	string coord = "ivec" + to_string(numCoords) + "(";
697 
698 	for (int i = 0; i < numCoords; ++i)
699 	{
700 		if (i == dim)
701 			coord += c;
702 		else
703 			coord += "0";
704 		if (i < numCoords - 1)
705 			coord += ", ";
706 	}
707 	coord += ")";
708 
709 	// Append sample coordinate
710 	if (samples != VK_SAMPLE_COUNT_1_BIT)
711 	{
712 		coord += ", ";
713 		if (dim == numCoords)
714 			coord += c;
715 		else
716 			coord += "0";
717 	}
718 	return coord;
719 }
720 
721 // Normalized coordinates. Divide by "imageDim" and add 0.25 so we're not on a pixel boundary.
genCoordNorm(const CaseDef & caseDef,string c,int numCoords,int numNormalizedCoords,int dim)722 string genCoordNorm(const CaseDef &caseDef, string c, int numCoords, int numNormalizedCoords, int dim)
723 {
724 	if (numCoords == 1)
725 		return c + " / float(" + to_string(caseDef.imageDim[dim]) + ")";
726 
727 	string coord = "vec" + to_string(numCoords) + "(";
728 
729 	for (int i = 0; i < numCoords; ++i)
730 	{
731 		if (i == dim)
732 			coord += c;
733 		else
734 			coord += "0.25";
735 		if (i < numNormalizedCoords)
736 			coord += " / float(" + to_string(caseDef.imageDim[dim]) + ")";
737 		if (i < numCoords - 1)
738 			coord += ", ";
739 	}
740 	coord += ")";
741 	return coord;
742 }
743 
initPrograms(SourceCollections & programCollection) const744 void RobustnessExtsTestCase::initPrograms (SourceCollections& programCollection) const
745 {
746 	VkFormat format = m_data.format;
747 
748 	Layout layout;
749 	generateLayout(layout, m_data);
750 
751 	if (layout.layoutBindings.size() > 1 &&
752 		layout.layoutBindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
753 	{
754 		if (format == VK_FORMAT_R64_SINT)
755 			format = VK_FORMAT_R32G32_SINT;
756 
757 		if (format == VK_FORMAT_R64_UINT)
758 			format = VK_FORMAT_R32G32_UINT;
759 	}
760 
761 	std::stringstream decls, checks;
762 
763 	const string	r64			= formatIsR64(format) ? "64" : "";
764 	const string	i64Type		= formatIsR64(format) ? "64_t" : "";
765 	const string	vecType		= formatIsFloat(format) ? "vec4" : (formatIsSignedInt(format) ? ("i" + r64 + "vec4") : ("u" + r64 + "vec4"));
766 	const string	qLevelType	= vecType == "vec4" ? "float" : ((vecType == "ivec4") | (vecType == "i64vec4")) ? ("int" + i64Type) : ("uint" + i64Type);
767 
768 	decls << "uvec4 abs(uvec4 x) { return x; }\n";
769 	if (formatIsR64(format))
770 		decls << "u64vec4 abs(u64vec4 x) { return x; }\n";
771 	decls << "int smod(int a, int b) { if (a < 0) a += b*(abs(a)/b+1); return a%b; }\n";
772 
773 
774 	const int	componetsSize = (formatIsR64(format) ? 8 : 4);
775 	int			refDataNumElements = deIntRoundToPow2(((int)layout.refData.size() / componetsSize), 4);
776 	// Pad reference data to include zeros, up to max value of robustUniformBufferAccessSizeAlignment (256).
777 	// robustStorageBufferAccessSizeAlignment is 4, so no extra padding needed.
778 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
779 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
780 	{
781 		refDataNumElements = deIntRoundToPow2(refDataNumElements, 256 / (formatIsR64(format) ? 8 : 4));
782 	}
783 	if (m_data.nullDescriptor)
784 		refDataNumElements = 4;
785 
786 	if (formatIsFloat(format))
787 	{
788 		decls << "float refData[" << refDataNumElements << "] = {";
789 		int i;
790 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
791 		{
792 			if (i != 0)
793 				decls << ", ";
794 			decls << ((const float *)layout.refData.data())[i];
795 		}
796 		while (i < refDataNumElements)
797 		{
798 			if (i != 0)
799 				decls << ", ";
800 			decls << "0";
801 			i++;
802 		}
803 	}
804 	else if (formatIsR64(format))
805 	{
806 		decls << "int" << i64Type << " refData[" << refDataNumElements << "] = {";
807 		int i;
808 		for (i = 0; i < (int)layout.refData.size() / 8; ++i)
809 		{
810 			if (i != 0)
811 				decls << ", ";
812 			decls << ((const deUint64 *)layout.refData.data())[i] << "l";
813 		}
814 		while (i < refDataNumElements)
815 		{
816 			if (i != 0)
817 				decls << ", ";
818 			decls << "0l";
819 			i++;
820 		}
821 	}
822 	else
823 	{
824 		decls << "int" << " refData[" << refDataNumElements << "] = {";
825 		int i;
826 		for (i = 0; i < (int)layout.refData.size() / 4; ++i)
827 		{
828 			if (i != 0)
829 				decls << ", ";
830 			decls << ((const int *)layout.refData.data())[i];
831 		}
832 		while (i < refDataNumElements)
833 		{
834 			if (i != 0)
835 				decls << ", ";
836 			decls << "0";
837 			i++;
838 		}
839 	}
840 
841 	decls << "};\n";
842 	decls << vecType << " zzzz = " << vecType << "(0);\n";
843 	decls << vecType << " zzzo = " << vecType << "(0, 0, 0, 1);\n";
844 	decls << vecType << " expectedIB;\n";
845 
846 	string imgprefix = (formatIsFloat(format) ? "" : formatIsSignedInt(format) ? "i" : "u") + r64;
847 	string imgqualif = (m_data.formatQualifier) ? getShaderImageFormatQualifier(mapVkFormat(format)) + ", " : "";
848 	string outputimgqualif = getShaderImageFormatQualifier(mapVkFormat(format));
849 
850 	string imageDim = "";
851 	int numCoords, numNormalizedCoords;
852 	bool layered = false;
853 	switch (m_data.viewType)
854 	{
855 		default: DE_ASSERT(0); // Fallthrough
856 		case VK_IMAGE_VIEW_TYPE_1D:			imageDim = "1D";		numCoords = 1;	numNormalizedCoords = 1;	break;
857 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	imageDim = "1DArray";	numCoords = 2;	numNormalizedCoords = 1;	layered = true;	break;
858 		case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2D";		numCoords = 2;	numNormalizedCoords = 2;	break;
859 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DArray";	numCoords = 3;	numNormalizedCoords = 2;	layered = true;	break;
860 		case VK_IMAGE_VIEW_TYPE_3D:			imageDim = "3D";		numCoords = 3;	numNormalizedCoords = 3;	break;
861 		case VK_IMAGE_VIEW_TYPE_CUBE:		imageDim = "Cube";		numCoords = 3;	numNormalizedCoords = 3;	break;
862 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	imageDim = "CubeArray";	numCoords = 4;	numNormalizedCoords = 3;	layered = true;	break;
863 	}
864 	if (m_data.samples > VK_SAMPLE_COUNT_1_BIT)
865 	{
866 		switch (m_data.viewType)
867 		{
868 			default: DE_ASSERT(0); // Fallthrough
869 			case VK_IMAGE_VIEW_TYPE_2D:			imageDim = "2DMS";		break;
870 			case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	imageDim = "2DMSArray";	break;
871 		}
872 		numCoords++;
873 	}
874 	bool dataDependsOnLayer = (m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY || m_data.viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY) && !m_data.nullDescriptor;
875 
876 	// Special case imageLoad(imageCubeArray, ...) which uses ivec3
877 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
878 		m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
879 	{
880 		numCoords = 3;
881 	}
882 
883 	int numComponents = tcu::getPixelSize(mapVkFormat(format)) / tcu::getChannelSize(mapVkFormat(format).type);
884 	string bufType;
885 	if (numComponents == 1)
886 		bufType = string(formatIsFloat(format) ? "float" : formatIsSignedInt(format) ? "int" : "uint") + i64Type;
887 	else
888 		bufType = imgprefix + "vec" + std::to_string(numComponents);
889 
890 	// For UBO's, which have a declared size in the shader, don't access outside that size.
891 	bool declaredSize = false;
892 	switch (m_data.descriptorType) {
893 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
894 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
895 		declaredSize = true;
896 		break;
897 	default:
898 		break;
899 	}
900 
901 	checks << "  int inboundcoords, clampedLayer;\n";
902 	checks << "  " << vecType << " expectedIB2;\n";
903 	if (m_data.unroll)
904 	{
905 		if (declaredSize)
906 			checks << "  [[unroll]] for (int c = 0; c <= 10; ++c) {\n";
907 		else
908 			checks << "  [[unroll]] for (int c = -10; c <= 10; ++c) {\n";
909 	}
910 	else
911 	{
912 		if (declaredSize)
913 			checks << "  [[dont_unroll]] for (int c = 1023; c >= 0; --c) {\n";
914 		else
915 			checks << "  [[dont_unroll]] for (int c = 1050; c >= -1050; --c) {\n";
916 	}
917 
918 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
919 		checks << "    int idx = smod(gl_VertexIndex * " << numComponents << ", " << refDataNumElements << ");\n";
920 	else
921 		checks << "    int idx = smod(c * " << numComponents << ", " << refDataNumElements << ");\n";
922 
923 	decls << "layout(" << outputimgqualif << ", set = 0, binding = 0) uniform " << imgprefix << "image2D image0_0;\n";
924 
925 	const char *vol = m_data.vol ? "volatile " : "";
926 
927 	// Construct the declaration for the binding
928 	switch (m_data.descriptorType)
929 	{
930 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
931 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
932 		decls << "layout(scalar, set = 0, binding = 1) uniform ubodef0_1 { " << bufType << " val[1024]; } ubo0_1;\n";
933 		break;
934 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
935 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
936 		decls << "layout(scalar, set = 0, binding = 1) " << vol << "buffer sbodef0_1 { " << bufType << " val[]; } ssbo0_1;\n";
937 		decls << "layout(scalar, set = 0, binding = 1) " << vol << "buffer sbodef0_1_pad { vec4 pad; " << bufType << " val[]; } ssbo0_1_pad;\n";
938 		break;
939 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
940 		switch(format)
941 		{
942 		case VK_FORMAT_R64_SINT:
943 			decls << "layout(set = 0, binding = 1) uniform itextureBuffer texbo0_1;\n";
944 			break;
945 		case VK_FORMAT_R64_UINT:
946 			decls << "layout(set = 0, binding = 1) uniform utextureBuffer texbo0_1;\n";
947 			break;
948 		default:
949 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "textureBuffer texbo0_1;\n";
950 		}
951 		break;
952 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
953 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "imageBuffer image0_1;\n";
954 		break;
955 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
956 		decls << "layout(" << imgqualif << "set = 0, binding = 1) " << vol << "uniform " << imgprefix << "image" << imageDim << " image0_1;\n";
957 		break;
958 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
959 		switch (format)
960 		{
961 		case VK_FORMAT_R64_SINT:
962 			decls << "layout(set = 0, binding = 1) uniform isampler" << imageDim << " texture0_1; \n";
963 			break;
964 		case VK_FORMAT_R64_UINT:
965 			decls << "layout(set = 0, binding = 1) uniform usampler" << imageDim << " texture0_1; \n";
966 			break;
967 		default:
968 			decls << "layout(set = 0, binding = 1) uniform " << imgprefix << "sampler" << imageDim << " texture0_1;\n";
969 			break;
970 		}
971 		break;
972 	case VERTEX_ATTRIBUTE_FETCH:
973 		if (formatIsR64(format))
974 		{
975 			decls << "layout(location = 0) in " << (formatIsSignedInt(format) ? ("int64_t") : ("uint64_t")) << " attr;\n";
976 		}
977 		else
978 		{
979 			decls << "layout(location = 0) in " << vecType << " attr;\n";
980 		}
981 		break;
982 	default: DE_ASSERT(0);
983 	}
984 
985 	string expectedOOB;
986 	string defaultw;
987 
988 	switch (m_data.descriptorType)
989 	{
990 	default: DE_ASSERT(0); // Fallthrough
991 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
992 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
993 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
994 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
995 		expectedOOB = "zzzz";
996 		defaultw = "0";
997 		break;
998 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
999 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1000 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1001 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1002 	case VERTEX_ATTRIBUTE_FETCH:
1003 		if (numComponents == 1)
1004 		{
1005 			expectedOOB = "zzzo";
1006 		}
1007 		else if (numComponents == 2)
1008 		{
1009 			expectedOOB = "zzzo";
1010 		}
1011 		else
1012 		{
1013 			expectedOOB = "zzzz";
1014 		}
1015 		defaultw = "1";
1016 		break;
1017 	}
1018 
1019 	string idx;
1020 	switch (m_data.descriptorType)
1021 	{
1022 	default: DE_ASSERT(0); // Fallthrough
1023 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1024 	case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1025 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1026 	case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1027 	case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1028 	case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1029 	case VERTEX_ATTRIBUTE_FETCH:
1030 		idx = "idx";
1031 		break;
1032 	case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1033 	case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1034 		idx = "0";
1035 		break;
1036 	}
1037 
1038 	if (m_data.nullDescriptor)
1039 	{
1040 		checks << "    expectedIB = zzzz;\n";
1041 		checks << "    inboundcoords = 0;\n";
1042 		checks << "    int paddedinboundcoords = 0;\n";
1043 		// Vertex attribute fetch still gets format conversion applied
1044 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH)
1045 			expectedOOB = "zzzz";
1046 	}
1047 	else
1048 	{
1049 		checks << "    expectedIB.x = refData[" << idx << "];\n";
1050 		if (numComponents > 1)
1051 		{
1052 			checks << "    expectedIB.y = refData[" << idx << "+1];\n";
1053 		}
1054 		else
1055 		{
1056 			checks << "    expectedIB.y = 0;\n";
1057 		}
1058 		if (numComponents > 2)
1059 		{
1060 			checks << "    expectedIB.z = refData[" << idx << "+2];\n";
1061 			checks << "    expectedIB.w = refData[" << idx << "+3];\n";
1062 		}
1063 		else
1064 		{
1065 			checks << "    expectedIB.z = 0;\n";
1066 			checks << "    expectedIB.w = " << defaultw << ";\n";
1067 		}
1068 
1069 		switch (m_data.descriptorType)
1070 		{
1071 		default: DE_ASSERT(0); // Fallthrough
1072 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1073 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1074 			// UBOs can either strictly bounds check against inboundcoords, or can
1075 			// return the contents from memory for the range padded up to paddedinboundcoords.
1076 			checks << "    int paddedinboundcoords = " << refDataNumElements / numComponents << ";\n";
1077 			// fallthrough
1078 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1079 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1080 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1081 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1082 		case VERTEX_ATTRIBUTE_FETCH:
1083 			checks << "    inboundcoords = " << layout.refData.size() / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32)) / numComponents << ";\n";
1084 			break;
1085 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1086 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1087 			// set per-component below
1088 			break;
1089 		}
1090 	}
1091 
1092 	if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ||
1093 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER ||
1094 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1095 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1096 	{
1097 		for (int i = 0; i < numCoords; ++i)
1098 		{
1099 			// Treat i==3 coord (cube array layer) like i == 2
1100 			deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1101 			if (!m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1102 				checks << "    inboundcoords = " << coordDim << ";\n";
1103 
1104 			string coord = genCoord("c", numCoords, m_data.samples, i);
1105 			string inboundcoords =
1106 				m_data.nullDescriptor ? "0" :
1107 				(m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1) ? to_string(m_data.samples) : "inboundcoords";
1108 
1109 			checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genStore(m_data.descriptorType, vecType, bufType, coord) << ";\n";
1110 			if (m_data.formatQualifier &&
1111 				(format == VK_FORMAT_R32_SINT || format == VK_FORMAT_R32_UINT))
1112 			{
1113 				checks << "    if (c < 0 || c >= " << inboundcoords << ") " << genAtomic(m_data.descriptorType, bufType, coord) << ";\n";
1114 			}
1115 		}
1116 	}
1117 
1118 	for (int i = 0; i < numCoords; ++i)
1119 	{
1120 		// Treat i==3 coord (cube array layer) like i == 2
1121 		deUint32 coordDim = m_data.imageDim[i == 3 ? 2 : i];
1122 		if (!m_data.nullDescriptor)
1123 		{
1124 			switch (m_data.descriptorType)
1125 			{
1126 			default:
1127 				break;
1128 			case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
1129 			case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
1130 				checks << "    inboundcoords = " << coordDim << ";\n";
1131 				break;
1132 			}
1133 		}
1134 
1135 		string coord = genCoord("c", numCoords, m_data.samples, i);
1136 
1137 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1138 		{
1139 			if (formatIsR64(format))
1140 			{
1141 				checks << "    temp.x = attr;\n";
1142 				checks << "    temp.y = 0l;\n";
1143 				checks << "    temp.z = 0l;\n";
1144 				checks << "    temp.w = 0l;\n";
1145 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp.x -= expectedIB.x; else temp -= zzzz;\n";
1146 			}
1147 			else
1148 			{
1149 				checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1150 				checks << "    if (gl_VertexIndex >= 0 && gl_VertexIndex < inboundcoords) temp -= expectedIB; else temp -= " << expectedOOB << ";\n";
1151 			}
1152 			// Accumulate any incorrect values.
1153 			checks << "    accum += abs(temp);\n";
1154 		}
1155 		// Skip texelFetch testing for cube(array) - texelFetch doesn't support it
1156 		if (m_data.descriptorType != VERTEX_ATTRIBUTE_FETCH &&
1157 			!(m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1158 			  (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)))
1159 		{
1160 			checks << "    temp = " << genFetch(m_data, numComponents, vecType, coord, "0") << ";\n";
1161 
1162 			checks << "    expectedIB2 = expectedIB;\n";
1163 
1164 			// Expected data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1165 			if (dataDependsOnLayer && i == numNormalizedCoords)
1166 				checks << "    if (c >= 0 && c < inboundcoords) expectedIB2 += " << vecType << "(c, 0, 0, 0);\n";
1167 
1168 			if (m_data.samples > VK_SAMPLE_COUNT_1_BIT && i == numCoords - 1)
1169 			{
1170 				if (m_data.nullDescriptor && m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1171 				{
1172 					checks << "    if (temp == zzzz) temp = " << vecType << "(0);\n";
1173 					if (m_data.formatQualifier && numComponents < 4)
1174 						checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1175 					checks << "    else temp = " << vecType << "(1);\n";
1176 				}
1177 				else
1178 					// multisample coord doesn't have defined behavior for OOB, so just set temp to 0.
1179 					checks << "    if (c >= 0 && c < " << m_data.samples << ") temp -= expectedIB2; else temp = " << vecType << "(0);\n";
1180 			}
1181 			else
1182 			{
1183 				// Storage buffers may be split into per-component loads. Generate a second
1184 				// expected out of bounds value where some subset of the components are
1185 				// actually in-bounds. If both loads and stores are split into per-component
1186 				// accesses, then the result value can be a mix of storeValue and zero.
1187 				string expectedOOB2 = expectedOOB;
1188 				string expectedOOB3 = expectedOOB;
1189 				if ((m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1190 					 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC) &&
1191 					 !m_data.nullDescriptor)
1192 				{
1193 					int len = m_data.bufferLen & (formatIsR64(format) ? ~7 : ~3);
1194 					int mod = (int)((len / (formatIsR64(format) ? sizeof(deUint64) : sizeof(deUint32))) % numComponents);
1195 					string sstoreValue = de::toString(storeValue);
1196 					switch (mod)
1197 					{
1198 					case 0:
1199 						break;
1200 					case 1:
1201 						expectedOOB2 = vecType + "(expectedIB2.x, 0, 0, 0)";
1202 						expectedOOB3 = vecType + "(" + sstoreValue + ", 0, 0, 0)";
1203 						break;
1204 					case 2:
1205 						expectedOOB2 = vecType + "(expectedIB2.xy, 0, 0)";
1206 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", 0, 0)";
1207 						break;
1208 					case 3:
1209 						expectedOOB2 = vecType + "(expectedIB2.xyz, 0)";
1210 						expectedOOB3 = vecType + "(" + sstoreValue + ", " + sstoreValue + ", " + sstoreValue + ", 0)";
1211 						break;
1212 					}
1213 				}
1214 
1215 				// Entirely in-bounds.
1216 				checks << "    if (c >= 0 && c < inboundcoords) {\n"
1217 						  "       if (temp == expectedIB2) temp = " << vecType << "(0); else temp = " << vecType << "(1);\n"
1218 						  "    }\n";
1219 
1220 				// normal out-of-bounds value
1221 				if (m_data.testRobustness2)
1222 					checks << "    else if (temp == " << expectedOOB << ") temp = " << vecType << "(0);\n";
1223 				else
1224 					// image_robustness relaxes alpha which is allowed to be zero or one
1225 					checks << "    else if (temp == zzzz || temp == zzzo) temp = " << vecType << "(0);\n";
1226 
1227 				if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1228 					m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1229 				{
1230 					checks << "    else if (c >= 0 && c < paddedinboundcoords && temp == expectedIB2) temp = " << vecType << "(0);\n";
1231 				}
1232 
1233 				// null descriptor loads with image format layout qualifier that doesn't include alpha may return alpha=1
1234 				if (m_data.nullDescriptor && m_data.formatQualifier &&
1235 					(m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE || m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER) &&
1236 					numComponents < 4)
1237 					checks << "    else if (temp == zzzo) temp = " << vecType << "(0);\n";
1238 
1239 				// non-volatile value replaced with stored value
1240 				if (supportsStores(m_data.descriptorType) && !m_data.vol)
1241 					checks << "    else if (temp == " << getStoreValue(m_data.descriptorType, numComponents, vecType, bufType) << ") temp = " << vecType << "(0);\n";
1242 
1243 				// value straddling the boundary, returning a partial vector
1244 				if (expectedOOB2 != expectedOOB)
1245 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB2 << ") temp = " << vecType << "(0);\n";
1246 				if (expectedOOB3 != expectedOOB)
1247 					checks << "    else if (c == inboundcoords && temp == " << expectedOOB3 << ") temp = " << vecType << "(0);\n";
1248 
1249 				// failure
1250 				checks << "    else temp = " << vecType << "(1);\n";
1251 			}
1252 			// Accumulate any incorrect values.
1253 			checks << "    accum += abs(temp);\n";
1254 
1255 			// Only the full robustness2 extension provides guarantees about out-of-bounds mip levels.
1256 			if (m_data.testRobustness2 && m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER && m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1257 			{
1258 				// Fetch from an out of bounds mip level. Expect this to always return the OOB value.
1259 				string coord0 = genCoord("0", numCoords, m_data.samples, i);
1260 				checks << "    if (c != 0) temp = " << genFetch(m_data, numComponents, vecType, coord0, "c") << "; else temp = " << vecType << "(0);\n";
1261 				checks << "    if (c != 0) temp -= " << expectedOOB << ";\n";
1262 				checks << "    accum += abs(temp);\n";
1263 			}
1264 		}
1265 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1266 			m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1267 		{
1268 			string coordNorm = genCoordNorm(m_data, "(c+0.25)", numCoords, numNormalizedCoords, i);
1269 
1270 			checks << "    expectedIB2 = expectedIB;\n";
1271 
1272 			// Data is a function of layer, for array images. Subtract out the layer value for in-bounds coordinates.
1273 			if (dataDependsOnLayer && i == numNormalizedCoords)
1274 			{
1275 				checks << "    clampedLayer = clamp(c, 0, " << coordDim-1 << ");\n";
1276 				checks << "    expectedIB2 += " << vecType << "(clampedLayer, 0, 0, 0);\n";
1277 			}
1278 
1279 			stringstream normexpected;
1280 			// Cubemap fetches are always in-bounds. Layer coordinate is clamped, so is always in-bounds.
1281 			if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE ||
1282 				m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
1283 				(layered && i == numCoords-1))
1284 				normexpected << "    temp -= expectedIB2;\n";
1285 			else
1286 			{
1287 				normexpected << "    if (c >= 0 && c < inboundcoords)\n";
1288 				normexpected << "        temp -= expectedIB2;\n";
1289 				normexpected << "    else\n";
1290 				if (m_data.testRobustness2)
1291 					normexpected << "        temp -= " << expectedOOB << ";\n";
1292 				else
1293 					// image_robustness relaxes alpha which is allowed to be zero or one
1294 					normexpected << "        temp = " << vecType << "((temp == zzzz || temp == zzzo) ? 0 : 1);\n";
1295 			}
1296 
1297 			checks << "    temp = texture(texture0_1, " << coordNorm << ");\n";
1298 			checks << normexpected.str();
1299 			checks << "    accum += abs(temp);\n";
1300 			checks << "    temp = textureLod(texture0_1, " << coordNorm << ", 0.0f);\n";
1301 			checks << normexpected.str();
1302 			checks << "    accum += abs(temp);\n";
1303 			checks << "    temp = textureGrad(texture0_1, " << coordNorm << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ", " << genCoord("1.0", numNormalizedCoords, m_data.samples, i) << ");\n";
1304 			checks << normexpected.str();
1305 			checks << "    accum += abs(temp);\n";
1306 		}
1307 		if (m_data.nullDescriptor)
1308 		{
1309 			const char *sizeswiz;
1310 			switch (m_data.viewType)
1311 			{
1312 				default: DE_ASSERT(0); // Fallthrough
1313 				case VK_IMAGE_VIEW_TYPE_1D:			sizeswiz = ".xxxx";	break;
1314 				case VK_IMAGE_VIEW_TYPE_1D_ARRAY:	sizeswiz = ".xyxx";	break;
1315 				case VK_IMAGE_VIEW_TYPE_2D:			sizeswiz = ".xyxx";	break;
1316 				case VK_IMAGE_VIEW_TYPE_2D_ARRAY:	sizeswiz = ".xyzx";	break;
1317 				case VK_IMAGE_VIEW_TYPE_3D:			sizeswiz = ".xyzx";	break;
1318 				case VK_IMAGE_VIEW_TYPE_CUBE:		sizeswiz = ".xyxx";	break;
1319 				case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:	sizeswiz = ".xyzx";	break;
1320 			}
1321 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
1322 			{
1323 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1324 				{
1325 					checks << "    temp = textureSize(texture0_1, 0)" << sizeswiz <<";\n";
1326 					checks << "    accum += abs(temp);\n";
1327 
1328 					// checking textureSize with clearly out of range LOD values
1329 					checks << "    temp = textureSize(texture0_1, " << -i << ")" << sizeswiz <<";\n";
1330 					checks << "    accum += abs(temp);\n";
1331 					checks << "    temp = textureSize(texture0_1, " << (std::numeric_limits<deInt32>::max() - i) << ")" << sizeswiz <<";\n";
1332 					checks << "    accum += abs(temp);\n";
1333 				}
1334 				else
1335 				{
1336 					checks << "    temp = textureSize(texture0_1)" << sizeswiz <<";\n";
1337 					checks << "    accum += abs(temp);\n";
1338 					checks << "    temp = textureSamples(texture0_1).xxxx;\n";
1339 					checks << "    accum += abs(temp);\n";
1340 				}
1341 			}
1342 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE)
1343 			{
1344 				if (m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1345 				{
1346 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1347 					checks << "    accum += abs(temp);\n";
1348 				}
1349 				else
1350 				{
1351 					checks << "    temp = imageSize(image0_1)" << sizeswiz <<";\n";
1352 					checks << "    accum += abs(temp);\n";
1353 					checks << "    temp = imageSamples(image0_1).xxxx;\n";
1354 					checks << "    accum += abs(temp);\n";
1355 				}
1356 			}
1357 			if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1358 				m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1359 			{
1360 				// expect zero for runtime-sized array .length()
1361 				checks << "    temp = " << vecType << "(ssbo0_1.val.length());\n";
1362 				checks << "    accum += abs(temp);\n";
1363 				checks << "    temp = " << vecType << "(ssbo0_1_pad.val.length());\n";
1364 				checks << "    accum += abs(temp);\n";
1365 			}
1366 		}
1367 	}
1368 	checks << "  }\n";
1369 
1370 	// outside the coordinates loop because we only need to call it once
1371 	if (m_data.nullDescriptor &&
1372 		m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1373 		m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1374 	{
1375 		checks << "  temp_ql = " << qLevelType << "(textureQueryLevels(texture0_1));\n";
1376 		checks << "  temp = " << vecType << "(temp_ql);\n";
1377 		checks << "  accum += abs(temp);\n";
1378 	}
1379 
1380 	const bool is64BitFormat = formatIsR64(m_data.format);
1381 	std::string SupportR64 = (is64BitFormat ?
1382 							std::string("#extension GL_EXT_shader_explicit_arithmetic_types_int64 : require\n"
1383 							"#extension GL_EXT_shader_image_int64 : require\n") :
1384 							std::string());
1385 
1386 	switch (m_data.stage)
1387 	{
1388 	default: DE_ASSERT(0); // Fallthrough
1389 	case STAGE_COMPUTE:
1390 		{
1391 			std::stringstream css;
1392 			css <<
1393 				"#version 450 core\n"
1394 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1395 				"#extension GL_EXT_scalar_block_layout : enable\n"
1396 				"#extension GL_EXT_samplerless_texture_functions : enable\n"
1397 				"#extension GL_EXT_control_flow_attributes : enable\n"
1398 				"#extension GL_EXT_shader_image_load_formatted : enable\n"
1399 				<< SupportR64
1400 				<< decls.str() <<
1401 				"layout(local_size_x = 1, local_size_y = 1) in;\n"
1402 				"void main()\n"
1403 				"{\n"
1404 				"  " << vecType << " accum = " << vecType << "(0);\n"
1405 				"  " << vecType << " temp;\n"
1406 				"  " << qLevelType << " temp_ql;\n"
1407 				<< checks.str() <<
1408 				"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1409 				"  imageStore(image0_0, ivec2(gl_GlobalInvocationID.xy), color);\n"
1410 				"}\n";
1411 
1412 			programCollection.glslSources.add("test") << glu::ComputeSource(css.str())
1413 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1414 			break;
1415 		}
1416 	case STAGE_RAYGEN:
1417 	{
1418 		std::stringstream css;
1419 		css <<
1420 			"#version 460 core\n"
1421 			"#extension GL_EXT_samplerless_texture_functions : enable\n"
1422 			"#extension GL_EXT_scalar_block_layout : enable\n"
1423 			"#extension GL_EXT_nonuniform_qualifier : enable\n"
1424 			"#extension GL_EXT_control_flow_attributes : enable\n"
1425 			"#extension GL_NV_ray_tracing : require\n"
1426 			"#extension GL_EXT_shader_image_load_formatted : enable\n"
1427 			<< SupportR64
1428 			<< decls.str() <<
1429 			"void main()\n"
1430 			"{\n"
1431 			"  " << vecType << " accum = " << vecType << "(0);\n"
1432 			"  " << vecType << " temp;\n"
1433 			"  " << qLevelType << " temp_ql;\n"
1434 			<< checks.str() <<
1435 			"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1436 			"  imageStore(image0_0, ivec2(gl_LaunchIDNV.xy), color);\n"
1437 			"}\n";
1438 
1439 		programCollection.glslSources.add("test") << glu::RaygenSource(css.str())
1440 			<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1441 		break;
1442 	}
1443 	case STAGE_VERTEX:
1444 		{
1445 			std::stringstream vss;
1446 			vss <<
1447 				"#version 450 core\n"
1448 				"#extension GL_EXT_samplerless_texture_functions : enable\n"
1449 				"#extension GL_EXT_scalar_block_layout : enable\n"
1450 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1451 				"#extension GL_EXT_control_flow_attributes : enable\n"
1452 				"#extension GL_EXT_shader_image_load_formatted : enable\n"
1453 				<< SupportR64
1454 				<< decls.str() <<
1455 				"void main()\n"
1456 				"{\n"
1457 				"  " << vecType << " accum = " << vecType << "(0);\n"
1458 				"  " << vecType << " temp;\n"
1459 				"  " << qLevelType << " temp_ql;\n"
1460 				<< checks.str() <<
1461 				"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1462 				"  imageStore(image0_0, ivec2(gl_VertexIndex % " << DIM << ", gl_VertexIndex / " << DIM << "), color);\n"
1463 				"  gl_PointSize = 1.0f;\n"
1464 				"  gl_Position = vec4(0.0f, 0.0f, 0.0f, 1.0f);\n"
1465 				"}\n";
1466 
1467 			programCollection.glslSources.add("test") << glu::VertexSource(vss.str())
1468 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1469 			break;
1470 		}
1471 	case STAGE_FRAGMENT:
1472 		{
1473 			if (m_data.nullDescriptor &&
1474 				m_data.descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER &&
1475 				m_data.samples == VK_SAMPLE_COUNT_1_BIT)
1476 			{
1477 				// as here we only want to check that textureQueryLod returns 0 when
1478 				// texture0_1 is null, we don't need to use the actual texture coordinates
1479 				// (and modify the vertex shader below to do so). Any coordinates are fine.
1480 				// gl_FragCoord has been selected "randomly", instead of selecting 0 for example.
1481 				std::string lod_str = (numNormalizedCoords == 1) ? ");" : (numNormalizedCoords == 2) ? "y);" : "yz);";
1482 				checks << "  vec2 lod = textureQueryLod(texture0_1, gl_FragCoord.x" << lod_str << "\n";
1483 				checks << "  temp_ql = " << qLevelType <<
1484 "(ceil(abs(lod.x) + abs(lod.y)));\n";
1485 				checks << "  temp = " << vecType << "(temp_ql);\n";
1486 				checks << "  accum += abs(temp);\n";
1487 			}
1488 
1489 			std::stringstream vss;
1490 			vss <<
1491 				"#version 450 core\n"
1492 				"void main()\n"
1493 				"{\n"
1494 				// full-viewport quad
1495 				"  gl_Position = vec4( 2.0*float(gl_VertexIndex&2) - 1.0, 4.0*(gl_VertexIndex&1)-1.0, 1.0 - 2.0 * float(gl_VertexIndex&1), 1);\n"
1496 				"}\n";
1497 
1498 			programCollection.glslSources.add("vert") << glu::VertexSource(vss.str())
1499 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1500 
1501 			std::stringstream fss;
1502 			fss <<
1503 				"#version 450 core\n"
1504 				"#extension GL_EXT_samplerless_texture_functions : enable\n"
1505 				"#extension GL_EXT_scalar_block_layout : enable\n"
1506 				"#extension GL_EXT_nonuniform_qualifier : enable\n"
1507 				"#extension GL_EXT_control_flow_attributes : enable\n"
1508 				"#extension GL_EXT_shader_image_load_formatted : enable\n"
1509 				<< SupportR64
1510 				<< decls.str() <<
1511 				"void main()\n"
1512 				"{\n"
1513 				"  " << vecType << " accum = " << vecType << "(0);\n"
1514 				"  " << vecType << " temp;\n"
1515 				"  " << qLevelType << " temp_ql;\n"
1516 				<< checks.str() <<
1517 				"  " << vecType << " color = (accum != " << vecType << "(0)) ? " << vecType << "(0,0,0,0) : " << vecType << "(1,0,0,1);\n"
1518 				"  imageStore(image0_0, ivec2(gl_FragCoord.x, gl_FragCoord.y), color);\n"
1519 				"}\n";
1520 
1521 			programCollection.glslSources.add("test") << glu::FragmentSource(fss.str())
1522 				<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1523 			break;
1524 		}
1525 	}
1526 
1527 	// The 64-bit conditions below are redundant. Can we support the below shader for other than 64-bit formats?
1528 	if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && is64BitFormat)
1529 	{
1530 		const std::string	ivecCords = (m_data.viewType == VK_IMAGE_VIEW_TYPE_2D ? "ivec2(gx, gy)" : "ivec3(gx, gy, gz)");
1531 		std::stringstream	fillShader;
1532 
1533 		fillShader <<
1534 			"#version 450\n"
1535 			<< SupportR64
1536 			<< "\n"
1537 			"layout(local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n"
1538 			"layout (" + getShaderImageFormatQualifier(mapVkFormat(m_data.format)) + ", binding=0) volatile uniform "
1539 			<< string(formatIsSignedInt(m_data.format) ? "i" : "u") + string(is64BitFormat ? "64" : "") << "image" << imageDim << +" u_resultImage;\n"
1540 			"\n"
1541 			"layout(std430, binding = 1) buffer inputBuffer\n"
1542 			"{\n"
1543 			"  int" << (is64BitFormat ? "64_t" : "") << " data[];\n"
1544 			"} inBuffer;\n"
1545 			"\n"
1546 			"void main(void)\n"
1547 			"{\n"
1548 			"  int gx = int(gl_GlobalInvocationID.x);\n"
1549 			"  int gy = int(gl_GlobalInvocationID.y);\n"
1550 			"  int gz = int(gl_GlobalInvocationID.z);\n"
1551 			"  uint index = gx + (gy * gl_NumWorkGroups.x) + (gz *gl_NumWorkGroups.x * gl_NumWorkGroups.y);\n";
1552 
1553 			for(int ndx = 0; ndx < static_cast<int>(m_data.samples); ++ndx)
1554 			{
1555 				fillShader << "  imageStore(u_resultImage, " << ivecCords << ", " << ndx << ", i64vec4(inBuffer.data[index]));\n";
1556 			}
1557 
1558 			fillShader << "}\n";
1559 
1560 		programCollection.glslSources.add("fillShader") << glu::ComputeSource(fillShader.str())
1561 			<< vk::ShaderBuildOptions(programCollection.usedVulkanVersion, is64BitFormat ? vk::SPIRV_VERSION_1_3 : vk::SPIRV_VERSION_1_0, vk::ShaderBuildOptions::FLAG_ALLOW_SCALAR_OFFSETS);
1562 	}
1563 
1564 }
1565 
imageViewTypeToImageType(VkImageViewType type)1566 VkImageType imageViewTypeToImageType (VkImageViewType type)
1567 {
1568 	switch (type)
1569 	{
1570 		case VK_IMAGE_VIEW_TYPE_1D:
1571 		case VK_IMAGE_VIEW_TYPE_1D_ARRAY:		return VK_IMAGE_TYPE_1D;
1572 		case VK_IMAGE_VIEW_TYPE_2D:
1573 		case VK_IMAGE_VIEW_TYPE_2D_ARRAY:
1574 		case VK_IMAGE_VIEW_TYPE_CUBE:
1575 		case VK_IMAGE_VIEW_TYPE_CUBE_ARRAY:		return VK_IMAGE_TYPE_2D;
1576 		case VK_IMAGE_VIEW_TYPE_3D:				return VK_IMAGE_TYPE_3D;
1577 		default:
1578 			DE_ASSERT(false);
1579 	}
1580 
1581 	return VK_IMAGE_TYPE_2D;
1582 }
1583 
createInstance(Context & context) const1584 TestInstance* RobustnessExtsTestCase::createInstance (Context& context) const
1585 {
1586 	return new RobustnessExtsTestInstance(context, m_data);
1587 }
1588 
iterate(void)1589 tcu::TestStatus RobustnessExtsTestInstance::iterate (void)
1590 {
1591 	const InstanceInterface&	vki					= m_context.getInstanceInterface();
1592 	const VkDevice				device				= getLogicalDevice(m_context, m_data);
1593 	const DeviceDriver			vk					(m_context.getPlatformInterface(), m_context.getInstance(), device);
1594 	const VkPhysicalDevice		physicalDevice		= m_context.getPhysicalDevice();
1595 	SimpleAllocator				allocator			(vk, device, getPhysicalDeviceMemoryProperties(vki, physicalDevice));
1596 
1597 	Layout layout;
1598 	generateLayout(layout, m_data);
1599 
1600 	// Get needed properties.
1601 	VkPhysicalDeviceProperties2 properties;
1602 	deMemset(&properties, 0, sizeof(properties));
1603 	properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_PROPERTIES_2;
1604 	void** pNextTail = &properties.pNext;
1605 
1606 	VkPhysicalDeviceRayTracingPropertiesNV rayTracingProperties;
1607 	deMemset(&rayTracingProperties, 0, sizeof(rayTracingProperties));
1608 	rayTracingProperties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_RAY_TRACING_PROPERTIES_NV;
1609 
1610 	VkPhysicalDeviceRobustness2PropertiesEXT robustness2Properties;
1611 	deMemset(&robustness2Properties, 0, sizeof(robustness2Properties));
1612 	robustness2Properties.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_ROBUSTNESS_2_PROPERTIES_EXT;
1613 
1614 	if (m_context.isDeviceFunctionalitySupported("VK_NV_ray_tracing"))
1615 	{
1616 		*pNextTail = &rayTracingProperties;
1617 		pNextTail = &rayTracingProperties.pNext;
1618 	}
1619 
1620 	if (m_context.isDeviceFunctionalitySupported("VK_EXT_robustness2"))
1621 	{
1622 		*pNextTail = &robustness2Properties;
1623 		pNextTail = &robustness2Properties.pNext;
1624 	}
1625 
1626 	vki.getPhysicalDeviceProperties2(physicalDevice, &properties);
1627 
1628 	if (m_data.testRobustness2)
1629 	{
1630 		if (robustness2Properties.robustStorageBufferAccessSizeAlignment != 1 &&
1631 			robustness2Properties.robustStorageBufferAccessSizeAlignment != 4)
1632 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustStorageBufferAccessSizeAlignment must be 1 or 4");
1633 
1634 		if (robustness2Properties.robustUniformBufferAccessSizeAlignment < 1 ||
1635 			robustness2Properties.robustUniformBufferAccessSizeAlignment > 256 ||
1636 			!deIntIsPow2((int)robustness2Properties.robustUniformBufferAccessSizeAlignment))
1637 			return tcu::TestStatus(QP_TEST_RESULT_FAIL, "robustUniformBufferAccessSizeAlignment must be a power of two in [1,256]");
1638 	}
1639 
1640 	VkPipelineBindPoint bindPoint;
1641 
1642 	switch (m_data.stage)
1643 	{
1644 	case STAGE_COMPUTE:
1645 		bindPoint = VK_PIPELINE_BIND_POINT_COMPUTE;
1646 		break;
1647 	case STAGE_RAYGEN:
1648 		bindPoint = VK_PIPELINE_BIND_POINT_RAY_TRACING_NV;
1649 		break;
1650 	default:
1651 		bindPoint = VK_PIPELINE_BIND_POINT_GRAPHICS;
1652 		break;
1653 	}
1654 
1655 	Move<vk::VkDescriptorSetLayout>	descriptorSetLayout;
1656 	Move<vk::VkDescriptorPool>		descriptorPool;
1657 	Move<vk::VkDescriptorSet>		descriptorSet;
1658 
1659 	int formatBytes = tcu::getPixelSize(mapVkFormat(m_data.format));
1660 	int numComponents = formatBytes / tcu::getChannelSize(mapVkFormat(m_data.format).type);
1661 
1662 	vector<VkDescriptorSetLayoutBinding> &bindings = layout.layoutBindings;
1663 
1664 	VkDescriptorPoolCreateFlags poolCreateFlags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
1665 	VkDescriptorSetLayoutCreateFlags layoutCreateFlags = m_data.pushDescriptor ? VK_DESCRIPTOR_SET_LAYOUT_CREATE_PUSH_DESCRIPTOR_BIT_KHR : 0;
1666 
1667 	// Create a layout and allocate a descriptor set for it.
1668 
1669 	const VkDescriptorSetLayoutCreateInfo setLayoutCreateInfo =
1670 	{
1671 		vk::VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
1672 		DE_NULL,
1673 
1674 		layoutCreateFlags,
1675 		(deUint32)bindings.size(),
1676 		bindings.empty() ? DE_NULL : bindings.data()
1677 	};
1678 
1679 	descriptorSetLayout = vk::createDescriptorSetLayout(vk, device, &setLayoutCreateInfo);
1680 
1681 	vk::DescriptorPoolBuilder poolBuilder;
1682 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1);
1683 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC, 1);
1684 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1);
1685 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC, 1);
1686 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER, 1);
1687 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, 1);
1688 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, 1);
1689 	poolBuilder.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 2);
1690 
1691 	descriptorPool = poolBuilder.build(vk, device, poolCreateFlags, 1u, DE_NULL);
1692 
1693 	const void *pNext = DE_NULL;
1694 
1695 	if (!m_data.pushDescriptor)
1696 		descriptorSet = makeDescriptorSet(vk, device, *descriptorPool, *descriptorSetLayout, pNext);
1697 
1698 	de::MovePtr<BufferWithMemory> buffer;
1699 
1700 	deUint8 *bufferPtr = DE_NULL;
1701 	if (!m_data.nullDescriptor)
1702 	{
1703 		// Create a buffer to hold data for all descriptors.
1704 		VkDeviceSize	size = de::max(
1705 			(VkDeviceSize)(m_data.bufferLen ? m_data.bufferLen : 1),
1706 			(VkDeviceSize)256);
1707 
1708 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1709 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1710 		{
1711 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment);
1712 		}
1713 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1714 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1715 		{
1716 			size = deIntRoundToPow2((int)size, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment);
1717 		}
1718 		else if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1719 		{
1720 			size = m_data.bufferLen;
1721 		}
1722 
1723 		buffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1724 			vk, device, allocator, makeBufferCreateInfo(size,
1725 														VK_BUFFER_USAGE_STORAGE_BUFFER_BIT |
1726 														VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT |
1727 														VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT |
1728 														VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT |
1729 														VK_BUFFER_USAGE_VERTEX_BUFFER_BIT),
1730 														MemoryRequirement::HostVisible));
1731 		bufferPtr = (deUint8 *)buffer->getAllocation().getHostPtr();
1732 
1733 		deMemset(bufferPtr, 0x3f, (size_t)size);
1734 
1735 		deMemset(bufferPtr, 0, m_data.bufferLen);
1736 		if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER ||
1737 			m_data.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
1738 		{
1739 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustUniformBufferAccessSizeAlignment));
1740 		}
1741 		else if (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER ||
1742 				 m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
1743 		{
1744 			deMemset(bufferPtr, 0, deIntRoundToPow2(m_data.bufferLen, (int)robustness2Properties.robustStorageBufferAccessSizeAlignment));
1745 		}
1746 	}
1747 
1748 	const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1749 
1750 	Move<VkDescriptorSetLayout>		descriptorSetLayoutR64;
1751 	Move<VkDescriptorPool>			descriptorPoolR64;
1752 	Move<VkDescriptorSet>			descriptorSetFillImage;
1753 	Move<VkShaderModule>			shaderModuleFillImage;
1754 	Move<VkPipelineLayout>			pipelineLayoutFillImage;
1755 	Move<VkPipeline>				pipelineFillImage;
1756 
1757 	Move<VkCommandPool>				cmdPool		= createCommandPool(vk, device, 0, queueFamilyIndex);
1758 	Move<VkCommandBuffer>			cmdBuffer	= allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1759 	VkQueue							queue;
1760 
1761 	vk.getDeviceQueue(device, queueFamilyIndex, 0, &queue);
1762 
1763 	const VkImageSubresourceRange	barrierRange				=
1764 	{
1765 		VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
1766 		0u,							// deUint32				baseMipLevel;
1767 		VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
1768 		0u,							// deUint32				baseArrayLayer;
1769 		VK_REMAINING_ARRAY_LAYERS	// deUint32				layerCount;
1770 	};
1771 
1772 	VkImageMemoryBarrier			preImageBarrier				=
1773 	{
1774 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
1775 		DE_NULL,											// const void*			pNext
1776 		0u,													// VkAccessFlags		srcAccessMask
1777 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
1778 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
1779 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,				// VkImageLayout		newLayout
1780 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
1781 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
1782 		DE_NULL,											// VkImage				image
1783 		barrierRange,										// VkImageSubresourceRange	subresourceRange;
1784 	};
1785 
1786 	VkImageMemoryBarrier			postImageBarrier			=
1787 	{
1788 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
1789 		DE_NULL,									// const void*				pNext;
1790 		VK_ACCESS_TRANSFER_WRITE_BIT,				// VkAccessFlags			srcAccessMask;
1791 		VK_ACCESS_SHADER_READ_BIT,					// VkAccessFlags			dstAccessMask;
1792 		VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,		// VkImageLayout			oldLayout;
1793 		VK_IMAGE_LAYOUT_GENERAL,					// VkImageLayout			newLayout;
1794 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					srcQueueFamilyIndex;
1795 		VK_QUEUE_FAMILY_IGNORED,					// deUint32					dstQueueFamilyIndex;
1796 		DE_NULL,									// VkImage					image;
1797 		barrierRange,								// VkImageSubresourceRange	subresourceRange;
1798 	};
1799 
1800 	vk::VkClearColorValue			clearValue;
1801 	clearValue.uint32[0] = 0u;
1802 	clearValue.uint32[1] = 0u;
1803 	clearValue.uint32[2] = 0u;
1804 	clearValue.uint32[3] = 0u;
1805 
1806 	beginCommandBuffer(vk, *cmdBuffer, 0u);
1807 
1808 	typedef vk::Unique<vk::VkBufferView>		BufferViewHandleUp;
1809 	typedef de::SharedPtr<BufferViewHandleUp>	BufferViewHandleSp;
1810 	typedef de::SharedPtr<ImageWithMemory>		ImageWithMemorySp;
1811 	typedef de::SharedPtr<Unique<VkImageView> >	VkImageViewSp;
1812 	typedef de::MovePtr<BufferWithMemory>		BufferWithMemoryMp;
1813 
1814 	vector<BufferViewHandleSp>					bufferViews(1);
1815 
1816 	VkImageCreateFlags imageCreateFlags = 0;
1817 	if (m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE || m_data.viewType == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
1818 		imageCreateFlags = VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT;
1819 
1820 	const bool featureSampledImage = ((getPhysicalDeviceFormatProperties(m_context.getInstanceInterface(),
1821 										m_context.getPhysicalDevice(),
1822 										m_data.format).optimalTilingFeatures &
1823 										VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT) == VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT);
1824 
1825 	const VkImageUsageFlags usageSampledImage = (featureSampledImage ? VK_IMAGE_USAGE_SAMPLED_BIT : (VkImageUsageFlagBits)0);
1826 
1827 	const VkImageCreateInfo			outputImageCreateInfo			=
1828 	{
1829 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
1830 		DE_NULL,								// const void*				pNext;
1831 		(VkImageCreateFlags)0u,					// VkImageCreateFlags		flags;
1832 		VK_IMAGE_TYPE_2D,						// VkImageType				imageType;
1833 		m_data.format,							// VkFormat					format;
1834 		{
1835 			DIM,								// deUint32	width;
1836 			DIM,								// deUint32	height;
1837 			1u									// deUint32	depth;
1838 		},										// VkExtent3D				extent;
1839 		1u,										// deUint32					mipLevels;
1840 		1u,										// deUint32					arrayLayers;
1841 		VK_SAMPLE_COUNT_1_BIT,					// VkSampleCountFlagBits	samples;
1842 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
1843 		VK_IMAGE_USAGE_STORAGE_BIT
1844 		| usageSampledImage
1845 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1846 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
1847 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
1848 		0u,										// deUint32					queueFamilyIndexCount;
1849 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
1850 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
1851 	};
1852 
1853 	deUint32 width = m_data.imageDim[0];
1854 	deUint32 height = m_data.viewType != VK_IMAGE_VIEW_TYPE_1D && m_data.viewType != VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] : 1;
1855 	deUint32 depth = m_data.viewType == VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1856 	deUint32 layers = m_data.viewType == VK_IMAGE_VIEW_TYPE_1D_ARRAY ? m_data.imageDim[1] :
1857 						m_data.viewType != VK_IMAGE_VIEW_TYPE_1D &&
1858 						m_data.viewType != VK_IMAGE_VIEW_TYPE_2D &&
1859 						m_data.viewType != VK_IMAGE_VIEW_TYPE_3D ? m_data.imageDim[2] : 1;
1860 
1861 	const VkImageUsageFlags usageImage = (m_data.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE ? VK_IMAGE_USAGE_STORAGE_BIT : (VkImageUsageFlagBits)0);
1862 
1863 	const VkImageCreateInfo			imageCreateInfo			=
1864 	{
1865 		VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType;
1866 		DE_NULL,								// const void*				pNext;
1867 		imageCreateFlags,						// VkImageCreateFlags		flags;
1868 		imageViewTypeToImageType(m_data.viewType),	// VkImageType				imageType;
1869 		m_data.format,							// VkFormat					format;
1870 		{
1871 			width,								// deUint32	width;
1872 			height,								// deUint32	height;
1873 			depth								// deUint32	depth;
1874 		},										// VkExtent3D				extent;
1875 		1u,										// deUint32					mipLevels;
1876 		layers,									// deUint32					arrayLayers;
1877 		m_data.samples,							// VkSampleCountFlagBits	samples;
1878 		VK_IMAGE_TILING_OPTIMAL,				// VkImageTiling			tiling;
1879 		usageImage
1880 		| usageSampledImage
1881 		| VK_IMAGE_USAGE_TRANSFER_SRC_BIT
1882 		| VK_IMAGE_USAGE_TRANSFER_DST_BIT,		// VkImageUsageFlags		usage;
1883 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode;
1884 		0u,										// deUint32					queueFamilyIndexCount;
1885 		DE_NULL,								// const deUint32*			pQueueFamilyIndices;
1886 		VK_IMAGE_LAYOUT_UNDEFINED				// VkImageLayout			initialLayout;
1887 	};
1888 
1889 	VkImageViewCreateInfo		imageViewCreateInfo		=
1890 	{
1891 		VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,	// VkStructureType			sType;
1892 		DE_NULL,									// const void*				pNext;
1893 		(VkImageViewCreateFlags)0u,					// VkImageViewCreateFlags	flags;
1894 		DE_NULL,									// VkImage					image;
1895 		VK_IMAGE_VIEW_TYPE_2D,						// VkImageViewType			viewType;
1896 		m_data.format,								// VkFormat					format;
1897 		{
1898 			VK_COMPONENT_SWIZZLE_IDENTITY,
1899 			VK_COMPONENT_SWIZZLE_IDENTITY,
1900 			VK_COMPONENT_SWIZZLE_IDENTITY,
1901 			VK_COMPONENT_SWIZZLE_IDENTITY
1902 		},											// VkComponentMapping		 components;
1903 		{
1904 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask;
1905 			0u,										// deUint32				baseMipLevel;
1906 			VK_REMAINING_MIP_LEVELS,				// deUint32				levelCount;
1907 			0u,										// deUint32				baseArrayLayer;
1908 			VK_REMAINING_ARRAY_LAYERS				// deUint32				layerCount;
1909 		}											// VkImageSubresourceRange	subresourceRange;
1910 	};
1911 
1912 	vector<ImageWithMemorySp> images(2);
1913 	vector<VkImageViewSp> imageViews(2);
1914 
1915 	if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
1916 	{
1917 		deUint32 *ptr = (deUint32 *)bufferPtr;
1918 		deMemcpy(ptr, layout.refData.data(), layout.refData.size());
1919 	}
1920 
1921 	BufferWithMemoryMp				bufferImageR64;
1922 	BufferWithMemoryMp				bufferOutputImageR64;
1923 	const VkDeviceSize				sizeOutputR64	= 8 * outputImageCreateInfo.extent.width * outputImageCreateInfo.extent.height * outputImageCreateInfo.extent.depth;
1924 	const VkDeviceSize				sizeOneLayers	= 8 * imageCreateInfo.extent.width * imageCreateInfo.extent.height * imageCreateInfo.extent.depth;
1925 	const VkDeviceSize				sizeImageR64	= sizeOneLayers * layers;
1926 
1927 	if (formatIsR64(m_data.format))
1928 	{
1929 		bufferOutputImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1930 			vk, device, allocator,
1931 			makeBufferCreateInfo(sizeOutputR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
1932 			MemoryRequirement::HostVisible));
1933 
1934 		deUint64* bufferUint64Ptr = (deUint64 *)bufferOutputImageR64->getAllocation().getHostPtr();
1935 
1936 		for (int ndx = 0; ndx < static_cast<int>(sizeOutputR64 / 8); ++ndx)
1937 		{
1938 			bufferUint64Ptr[ndx] = 0;
1939 		}
1940 		flushAlloc(vk, device, bufferOutputImageR64->getAllocation());
1941 
1942 		bufferImageR64 = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
1943 			vk, device, allocator,
1944 			makeBufferCreateInfo(sizeImageR64, VK_BUFFER_USAGE_TRANSFER_SRC_BIT | VK_BUFFER_USAGE_STORAGE_BUFFER_BIT),
1945 			MemoryRequirement::HostVisible));
1946 
1947 		for (deUint32 layerNdx = 0; layerNdx < layers; ++layerNdx)
1948 		{
1949 			bufferUint64Ptr = (deUint64 *)bufferImageR64->getAllocation().getHostPtr();
1950 			bufferUint64Ptr = bufferUint64Ptr + ((sizeOneLayers * layerNdx) / 8);
1951 
1952 			for (int ndx = 0; ndx < static_cast<int>(sizeOneLayers / 8); ++ndx)
1953 			{
1954 				bufferUint64Ptr[ndx] = 0x1234567887654321 + ((m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) ? layerNdx : 0);
1955 			}
1956 		}
1957 		flushAlloc(vk, device, bufferImageR64->getAllocation());
1958 	}
1959 
1960 	for (size_t b = 0; b < bindings.size(); ++b)
1961 	{
1962 		VkDescriptorSetLayoutBinding &binding = bindings[b];
1963 
1964 		if (binding.descriptorCount == 0)
1965 			continue;
1966 		if (b == 1 && m_data.nullDescriptor)
1967 			continue;
1968 
1969 		DE_ASSERT(binding.descriptorCount == 1);
1970 		switch (binding.descriptorType)
1971 		{
1972 		default: DE_ASSERT(0); // Fallthrough
1973 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
1974 		case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
1975 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
1976 		case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
1977 			{
1978 				deUint32 *ptr = (deUint32 *)bufferPtr;
1979 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
1980 			}
1981 			break;
1982 		case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
1983 		case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
1984 			{
1985 				deUint32 *ptr = (deUint32 *)bufferPtr;
1986 				deMemcpy(ptr, layout.refData.data(), layout.refData.size());
1987 
1988 				const vk::VkBufferViewCreateInfo viewCreateInfo =
1989 				{
1990 					vk::VK_STRUCTURE_TYPE_BUFFER_VIEW_CREATE_INFO,
1991 					DE_NULL,
1992 					(vk::VkBufferViewCreateFlags)0,
1993 					**buffer,								// buffer
1994 					m_data.format,							// format
1995 					(vk::VkDeviceSize)0,					// offset
1996 					(vk::VkDeviceSize)m_data.bufferLen		// range
1997 				};
1998 				vk::Move<vk::VkBufferView> bufferView = vk::createBufferView(vk, device, &viewCreateInfo);
1999 				bufferViews[0] = BufferViewHandleSp(new BufferViewHandleUp(bufferView));
2000 			}
2001 			break;
2002 		case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2003 		case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2004 			{
2005 				if (bindings.size() > 1 &&
2006 					bindings[1].descriptorType == VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2007 				{
2008 					if (m_data.format == VK_FORMAT_R64_SINT)
2009 						imageViewCreateInfo.format = VK_FORMAT_R32G32_SINT;
2010 
2011 					if (m_data.format == VK_FORMAT_R64_UINT)
2012 						imageViewCreateInfo.format = VK_FORMAT_R32G32_UINT;
2013 				}
2014 
2015 				if (b == 0)
2016 				{
2017 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, outputImageCreateInfo, MemoryRequirement::Any));
2018 					imageViewCreateInfo.viewType = VK_IMAGE_VIEW_TYPE_2D;
2019 				}
2020 				else
2021 				{
2022 					images[b] = ImageWithMemorySp(new ImageWithMemory(vk, device, allocator, imageCreateInfo, MemoryRequirement::Any));
2023 					imageViewCreateInfo.viewType = m_data.viewType;
2024 				}
2025 				imageViewCreateInfo.image = **images[b];
2026 				imageViews[b] = VkImageViewSp(new Unique<VkImageView>(createImageView(vk, device, &imageViewCreateInfo, NULL)));
2027 
2028 				VkImage						img			= **images[b];
2029 				const VkBuffer&				bufferR64= ((b == 0) ? *(*bufferOutputImageR64) : *(*(bufferImageR64)));
2030 				const VkImageCreateInfo&	imageInfo	= ((b == 0) ? outputImageCreateInfo : imageCreateInfo);
2031 				const deUint32				clearLayers	= b == 0 ? 1 : layers;
2032 
2033 				if (!formatIsR64(m_data.format))
2034 				{
2035 					preImageBarrier.image	= img;
2036 					if (b == 1)
2037 					{
2038 						if (formatIsFloat(m_data.format))
2039 						{
2040 							deMemcpy(&clearValue.float32[0], layout.refData.data(), layout.refData.size());
2041 						}
2042 						else if (formatIsSignedInt(m_data.format))
2043 						{
2044 							deMemcpy(&clearValue.int32[0], layout.refData.data(), layout.refData.size());
2045 						}
2046 						else
2047 						{
2048 							deMemcpy(&clearValue.uint32[0], layout.refData.data(), layout.refData.size());
2049 						}
2050 					}
2051 					postImageBarrier.image	= img;
2052 
2053 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &preImageBarrier);
2054 
2055 					for (unsigned int i = 0; i < clearLayers; ++i)
2056 					{
2057 						const VkImageSubresourceRange	clearRange				=
2058 						{
2059 							VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspectFlags	aspectMask;
2060 							0u,							// deUint32				baseMipLevel;
2061 							VK_REMAINING_MIP_LEVELS,	// deUint32				levelCount;
2062 							i,							// deUint32				baseArrayLayer;
2063 							1							// deUint32				layerCount;
2064 						};
2065 
2066 						vk.cmdClearColorImage(*cmdBuffer, img, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, &clearValue, 1, &clearRange);
2067 
2068 						// Use same data for all faces for cube(array), otherwise make value a function of the layer
2069 						if (m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE && m_data.viewType != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY)
2070 						{
2071 							if (formatIsFloat(m_data.format))
2072 								clearValue.float32[0] += 1;
2073 							else if (formatIsSignedInt(m_data.format))
2074 								clearValue.int32[0] += 1;
2075 							else
2076 								clearValue.uint32[0] += 1;
2077 						}
2078 					}
2079 					vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &postImageBarrier);
2080 				}
2081 				else
2082 				{
2083 					if ((m_data.samples > VK_SAMPLE_COUNT_1_BIT) && (b == 1))
2084 					{
2085 						const VkImageSubresourceRange	subresourceRange	= makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, clearLayers);
2086 						const VkImageMemoryBarrier		imageBarrierPre		= makeImageMemoryBarrier(0,
2087 																				VK_ACCESS_SHADER_WRITE_BIT,
2088 																				VK_IMAGE_LAYOUT_UNDEFINED,
2089 																				VK_IMAGE_LAYOUT_GENERAL,
2090 																				img,
2091 																				subresourceRange);
2092 						const VkImageMemoryBarrier		imageBarrierPost	= makeImageMemoryBarrier(VK_ACCESS_SHADER_WRITE_BIT,
2093 																				VK_ACCESS_SHADER_READ_BIT,
2094 																				VK_IMAGE_LAYOUT_GENERAL,
2095 																				VK_IMAGE_LAYOUT_GENERAL,
2096 																				img,
2097 																				subresourceRange);
2098 
2099 						descriptorSetLayoutR64 =
2100 							DescriptorSetLayoutBuilder()
2101 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
2102 							.addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
2103 							.build(vk, device);
2104 
2105 						descriptorPoolR64 =
2106 							DescriptorPoolBuilder()
2107 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, 1)
2108 							.addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,1)
2109 							.build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 2u);
2110 
2111 						descriptorSetFillImage = makeDescriptorSet(vk,
2112 							device,
2113 							*descriptorPoolR64,
2114 							*descriptorSetLayoutR64);
2115 
2116 						shaderModuleFillImage	= createShaderModule(vk, device, m_context.getBinaryCollection().get("fillShader"), 0);
2117 						pipelineLayoutFillImage	= makePipelineLayout(vk, device, *descriptorSetLayoutR64);
2118 						pipelineFillImage		= makeComputePipeline(vk, device, *pipelineLayoutFillImage, *shaderModuleFillImage);
2119 
2120 						const VkDescriptorImageInfo		descResultImageInfo		= makeDescriptorImageInfo(DE_NULL, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2121 						const VkDescriptorBufferInfo	descResultBufferInfo	= makeDescriptorBufferInfo(bufferR64, 0, sizeImageR64);
2122 
2123 						DescriptorSetUpdateBuilder()
2124 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descResultImageInfo)
2125 							.writeSingle(*descriptorSetFillImage, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &descResultBufferInfo)
2126 							.update(vk, device);
2127 
2128 						vk.cmdPipelineBarrier(*cmdBuffer,
2129 							VK_PIPELINE_STAGE_HOST_BIT,
2130 							VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2131 							(VkDependencyFlags)0,
2132 							0, (const VkMemoryBarrier*)DE_NULL,
2133 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2134 							1, &imageBarrierPre);
2135 
2136 						vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineFillImage);
2137 						vk.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayoutFillImage, 0u, 1u, &(*descriptorSetFillImage), 0u, DE_NULL);
2138 
2139 						vk.cmdDispatch(*cmdBuffer, imageInfo.extent.width, imageInfo.extent.height, clearLayers);
2140 
2141 						vk.cmdPipelineBarrier(*cmdBuffer,
2142 									VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT,
2143 									VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT,
2144 									(VkDependencyFlags)0,
2145 									0, (const VkMemoryBarrier*)DE_NULL,
2146 									0, (const VkBufferMemoryBarrier*)DE_NULL,
2147 									1, &imageBarrierPost);
2148 					}
2149 					else
2150 					{
2151 						VkDeviceSize					size			= ((b == 0) ? sizeOutputR64 : sizeImageR64);
2152 						const vector<VkBufferImageCopy>	bufferImageCopy	(1, makeBufferImageCopy(imageInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, clearLayers)));
2153 
2154 						copyBufferToImage(vk,
2155 							*cmdBuffer,
2156 							bufferR64,
2157 							size,
2158 							bufferImageCopy,
2159 							VK_IMAGE_ASPECT_COLOR_BIT,
2160 							1,
2161 							clearLayers, img, VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2162 					}
2163 				}
2164 			}
2165 			break;
2166 		}
2167 	}
2168 
2169 	const VkSamplerCreateInfo	samplerParams	=
2170 	{
2171 		VK_STRUCTURE_TYPE_SAMPLER_CREATE_INFO,		// VkStructureType			sType;
2172 		DE_NULL,									// const void*				pNext;
2173 		0,											// VkSamplerCreateFlags		flags;
2174 		VK_FILTER_NEAREST,							// VkFilter					magFilter:
2175 		VK_FILTER_NEAREST,							// VkFilter					minFilter;
2176 		VK_SAMPLER_MIPMAP_MODE_NEAREST,				// VkSamplerMipmapMode		mipmapMode;
2177 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeU;
2178 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeV;
2179 		VK_SAMPLER_ADDRESS_MODE_CLAMP_TO_BORDER,	// VkSamplerAddressMode		addressModeW;
2180 		0.0f,										// float					mipLodBias;
2181 		VK_FALSE,									// VkBool32					anistoropyEnable;
2182 		1.0f,										// float					maxAnisotropy;
2183 		VK_FALSE,									// VkBool32					compareEnable;
2184 		VK_COMPARE_OP_ALWAYS,						// VkCompareOp				compareOp;
2185 		0.0f,										// float					minLod;
2186 		0.0f,										// float					maxLod;
2187 		formatIsFloat(m_data.format) ?
2188 			VK_BORDER_COLOR_FLOAT_TRANSPARENT_BLACK :
2189 			VK_BORDER_COLOR_INT_TRANSPARENT_BLACK,	// VkBorderColor			borderColor;
2190 		VK_FALSE									// VkBool32					unnormalizedCoordinates;
2191 	};
2192 
2193 	Move<VkSampler>				sampler			(createSampler(vk, device, &samplerParams));
2194 
2195 	// Flush modified memory.
2196 	if (!m_data.nullDescriptor)
2197 		flushAlloc(vk, device, buffer->getAllocation());
2198 
2199 	const VkPipelineLayoutCreateInfo pipelineLayoutCreateInfo =
2200 	{
2201 		VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,				// sType
2202 		DE_NULL,													// pNext
2203 		(VkPipelineLayoutCreateFlags)0,
2204 		1u,															// setLayoutCount
2205 		&descriptorSetLayout.get(),									// pSetLayouts
2206 		0u,															// pushConstantRangeCount
2207 		DE_NULL,													// pPushConstantRanges
2208 	};
2209 
2210 	Move<VkPipelineLayout> pipelineLayout = createPipelineLayout(vk, device, &pipelineLayoutCreateInfo, NULL);
2211 
2212 	de::MovePtr<BufferWithMemory> copyBuffer;
2213 	copyBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2214 		vk, device, allocator, makeBufferCreateInfo(DIM*DIM*16, VK_BUFFER_USAGE_TRANSFER_DST_BIT), MemoryRequirement::HostVisible));
2215 
2216 	{
2217 		vector<VkDescriptorBufferInfo> bufferInfoVec(2);
2218 		vector<VkDescriptorImageInfo> imageInfoVec(2);
2219 		vector<VkBufferView> bufferViewVec(2);
2220 		vector<VkWriteDescriptorSet> writesBeforeBindVec(0);
2221 		int vecIndex = 0;
2222 		int numDynamic = 0;
2223 
2224 		vector<VkDescriptorUpdateTemplateEntry> imgTemplateEntriesBefore,
2225 												bufTemplateEntriesBefore,
2226 												texelBufTemplateEntriesBefore;
2227 
2228 		for (size_t b = 0; b < bindings.size(); ++b)
2229 		{
2230 			VkDescriptorSetLayoutBinding &binding = bindings[b];
2231 			// Construct the declaration for the binding
2232 			if (binding.descriptorCount > 0)
2233 			{
2234 				// output image
2235 				switch (binding.descriptorType)
2236 				{
2237 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2238 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2239 					// Output image.
2240 					if (b == 1 && m_data.nullDescriptor)
2241 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, DE_NULL, VK_IMAGE_LAYOUT_GENERAL);
2242 					else
2243 						imageInfoVec[vecIndex] = makeDescriptorImageInfo(*sampler, **imageViews[b], VK_IMAGE_LAYOUT_GENERAL);
2244 					break;
2245 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2246 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2247 					if (b == 1 && m_data.nullDescriptor)
2248 						bufferViewVec[vecIndex] = DE_NULL;
2249 					else
2250 						bufferViewVec[vecIndex] = **bufferViews[0];
2251 					break;
2252 				default:
2253 					// Other descriptor types.
2254 					if (b == 1 && m_data.nullDescriptor)
2255 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(DE_NULL, 0, VK_WHOLE_SIZE);
2256 					else
2257 						bufferInfoVec[vecIndex] = makeDescriptorBufferInfo(**buffer, 0, layout.refData.size());
2258 					break;
2259 				}
2260 
2261 				VkWriteDescriptorSet w =
2262 				{
2263 					VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,				// sType
2264 					DE_NULL,											// pNext
2265 					m_data.pushDescriptor ? DE_NULL : *descriptorSet,	// dstSet
2266 					(deUint32)b,										// binding
2267 					0,													// dstArrayElement
2268 					1u,													// descriptorCount
2269 					binding.descriptorType,								// descriptorType
2270 					&imageInfoVec[vecIndex],							// pImageInfo
2271 					&bufferInfoVec[vecIndex],							// pBufferInfo
2272 					&bufferViewVec[vecIndex],							// pTexelBufferView
2273 				};
2274 
2275 				VkDescriptorUpdateTemplateEntry templateEntry =
2276 				{
2277 					(deUint32)b,				// uint32_t				dstBinding;
2278 					0,							// uint32_t				dstArrayElement;
2279 					1u,							// uint32_t				descriptorCount;
2280 					binding.descriptorType,		// VkDescriptorType		descriptorType;
2281 					0,							// size_t				offset;
2282 					0,							// size_t				stride;
2283 				};
2284 
2285 				switch (binding.descriptorType)
2286 				{
2287 				default: DE_ASSERT(0); // Fallthrough
2288 				case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2289 				case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE:
2290 					templateEntry.offset = vecIndex * sizeof(VkDescriptorImageInfo);
2291 					imgTemplateEntriesBefore.push_back(templateEntry);
2292 					break;
2293 				case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2294 				case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2295 					templateEntry.offset = vecIndex * sizeof(VkBufferView);
2296 					texelBufTemplateEntriesBefore.push_back(templateEntry);
2297 					break;
2298 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2299 				case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2300 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2301 				case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC:
2302 					templateEntry.offset = vecIndex * sizeof(VkDescriptorBufferInfo);
2303 					bufTemplateEntriesBefore.push_back(templateEntry);
2304 					break;
2305 				}
2306 
2307 				vecIndex++;
2308 
2309 				writesBeforeBindVec.push_back(w);
2310 
2311 				// Count the number of dynamic descriptors in this set.
2312 				if (binding.descriptorType == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ||
2313 					binding.descriptorType == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC)
2314 				{
2315 					numDynamic++;
2316 				}
2317 			}
2318 		}
2319 
2320 		// Make zeros have at least one element so &zeros[0] works
2321 		vector<deUint32> zeros(de::max(1,numDynamic));
2322 		deMemset(&zeros[0], 0, numDynamic * sizeof(deUint32));
2323 
2324 		// Randomly select between vkUpdateDescriptorSets and vkUpdateDescriptorSetWithTemplate
2325 		if (m_data.useTemplate)
2326 		{
2327 			VkDescriptorUpdateTemplateCreateInfo templateCreateInfo =
2328 			{
2329 				VK_STRUCTURE_TYPE_DESCRIPTOR_UPDATE_TEMPLATE_CREATE_INFO,	// VkStructureType							sType;
2330 				NULL,														// void*									pNext;
2331 				0,															// VkDescriptorUpdateTemplateCreateFlags	flags;
2332 				0,															// uint32_t									descriptorUpdateEntryCount;
2333 				DE_NULL,													// uint32_t									descriptorUpdateEntryCount;
2334 				m_data.pushDescriptor ?
2335 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_PUSH_DESCRIPTORS_KHR :
2336 					VK_DESCRIPTOR_UPDATE_TEMPLATE_TYPE_DESCRIPTOR_SET,		// VkDescriptorUpdateTemplateType			templateType;
2337 				descriptorSetLayout.get(),									// VkDescriptorSetLayout					descriptorSetLayout;
2338 				bindPoint,													// VkPipelineBindPoint						pipelineBindPoint;
2339 				*pipelineLayout,											// VkPipelineLayout							pipelineLayout;
2340 				0,															// uint32_t									set;
2341 			};
2342 
2343 			void *templateVectorData[] =
2344 			{
2345 				imageInfoVec.data(),
2346 				bufferInfoVec.data(),
2347 				bufferViewVec.data(),
2348 			};
2349 
2350 			vector<VkDescriptorUpdateTemplateEntry> *templateVectorsBefore[] =
2351 			{
2352 				&imgTemplateEntriesBefore,
2353 				&bufTemplateEntriesBefore,
2354 				&texelBufTemplateEntriesBefore,
2355 			};
2356 
2357 			if (m_data.pushDescriptor)
2358 			{
2359 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2360 				{
2361 					if (templateVectorsBefore[i]->size())
2362 					{
2363 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2364 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2365 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2366 						vk.cmdPushDescriptorSetWithTemplateKHR(*cmdBuffer, *descriptorUpdateTemplate, *pipelineLayout, 0, templateVectorData[i]);
2367 					}
2368 				}
2369 			}
2370 			else
2371 			{
2372 				for (size_t i = 0; i < DE_LENGTH_OF_ARRAY(templateVectorsBefore); ++i)
2373 				{
2374 					if (templateVectorsBefore[i]->size())
2375 					{
2376 						templateCreateInfo.descriptorUpdateEntryCount = (deUint32)templateVectorsBefore[i]->size();
2377 						templateCreateInfo.pDescriptorUpdateEntries = templateVectorsBefore[i]->data();
2378 						Move<VkDescriptorUpdateTemplate> descriptorUpdateTemplate = createDescriptorUpdateTemplate(vk, device, &templateCreateInfo, NULL);
2379 						vk.updateDescriptorSetWithTemplate(device, descriptorSet.get(), *descriptorUpdateTemplate, templateVectorData[i]);
2380 					}
2381 				}
2382 
2383 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2384 			}
2385 		}
2386 		else
2387 		{
2388 			if (m_data.pushDescriptor)
2389 			{
2390 				if (writesBeforeBindVec.size())
2391 				{
2392 					vk.cmdPushDescriptorSetKHR(*cmdBuffer, bindPoint, *pipelineLayout, 0, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0]);
2393 				}
2394 			}
2395 			else
2396 			{
2397 				if (writesBeforeBindVec.size())
2398 				{
2399 					vk.updateDescriptorSets(device, (deUint32)writesBeforeBindVec.size(), &writesBeforeBindVec[0], 0, NULL);
2400 				}
2401 
2402 				vk.cmdBindDescriptorSets(*cmdBuffer, bindPoint, *pipelineLayout, 0, 1, &descriptorSet.get(), numDynamic, &zeros[0]);
2403 			}
2404 		}
2405 	}
2406 
2407 	Move<VkPipeline> pipeline;
2408 	Move<VkRenderPass> renderPass;
2409 	Move<VkFramebuffer> framebuffer;
2410 
2411 	de::MovePtr<BufferWithMemory> sbtBuffer;
2412 
2413 	if (m_data.stage == STAGE_COMPUTE)
2414 	{
2415 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2416 
2417 		pipeline = makeComputePipeline(vk, device, *pipelineLayout, *shader);
2418 
2419 	}
2420 	else if (m_data.stage == STAGE_RAYGEN)
2421 	{
2422 		const Unique<VkShaderModule>	shader(createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0));
2423 
2424 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo =
2425 		{
2426 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2427 			DE_NULL,
2428 			(VkPipelineShaderStageCreateFlags)0,
2429 			VK_SHADER_STAGE_RAYGEN_BIT_NV,								// stage
2430 			*shader,													// shader
2431 			"main",
2432 			DE_NULL,													// pSpecializationInfo
2433 		};
2434 
2435 		VkRayTracingShaderGroupCreateInfoNV group =
2436 		{
2437 			VK_STRUCTURE_TYPE_RAY_TRACING_SHADER_GROUP_CREATE_INFO_NV,
2438 			DE_NULL,
2439 			VK_RAY_TRACING_SHADER_GROUP_TYPE_GENERAL_NV,			// type
2440 			0,														// generalShader
2441 			VK_SHADER_UNUSED_NV,									// closestHitShader
2442 			VK_SHADER_UNUSED_NV,									// anyHitShader
2443 			VK_SHADER_UNUSED_NV,									// intersectionShader
2444 		};
2445 
2446 		VkRayTracingPipelineCreateInfoNV pipelineCreateInfo = {
2447 			VK_STRUCTURE_TYPE_RAY_TRACING_PIPELINE_CREATE_INFO_NV,	// sType
2448 			DE_NULL,												// pNext
2449 			0,														// flags
2450 			1,														// stageCount
2451 			&shaderCreateInfo,										// pStages
2452 			1,														// groupCount
2453 			&group,													// pGroups
2454 			0,														// maxRecursionDepth
2455 			*pipelineLayout,										// layout
2456 			(vk::VkPipeline)0,										// basePipelineHandle
2457 			0u,														// basePipelineIndex
2458 		};
2459 
2460 		pipeline = createRayTracingPipelineNV(vk, device, DE_NULL, &pipelineCreateInfo, NULL);
2461 
2462 		sbtBuffer = de::MovePtr<BufferWithMemory>(new BufferWithMemory(
2463 			vk, device, allocator, makeBufferCreateInfo(rayTracingProperties.shaderGroupHandleSize, VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_RAY_TRACING_BIT_NV), MemoryRequirement::HostVisible));
2464 
2465 		deUint32 *ptr = (deUint32 *)sbtBuffer->getAllocation().getHostPtr();
2466 		invalidateAlloc(vk, device, sbtBuffer->getAllocation());
2467 
2468 		vk.getRayTracingShaderGroupHandlesNV(device, *pipeline, 0, 1, rayTracingProperties.shaderGroupHandleSize, ptr);
2469 	}
2470 	else
2471 	{
2472 		const VkSubpassDescription		subpassDesc				=
2473 		{
2474 			(VkSubpassDescriptionFlags)0,											// VkSubpassDescriptionFlags	flags
2475 			VK_PIPELINE_BIND_POINT_GRAPHICS,										// VkPipelineBindPoint			pipelineBindPoint
2476 			0u,																		// deUint32						inputAttachmentCount
2477 			DE_NULL,																// const VkAttachmentReference*	pInputAttachments
2478 			0u,																		// deUint32						colorAttachmentCount
2479 			DE_NULL,																// const VkAttachmentReference*	pColorAttachments
2480 			DE_NULL,																// const VkAttachmentReference*	pResolveAttachments
2481 			DE_NULL,																// const VkAttachmentReference*	pDepthStencilAttachment
2482 			0u,																		// deUint32						preserveAttachmentCount
2483 			DE_NULL																	// const deUint32*				pPreserveAttachments
2484 		};
2485 
2486 		const VkSubpassDependency		subpassDependency		=
2487 		{
2488 			VK_SUBPASS_EXTERNAL,							// deUint32				srcSubpass
2489 			0,												// deUint32				dstSubpass
2490 			VK_PIPELINE_STAGE_TRANSFER_BIT,					// VkPipelineStageFlags	srcStageMask
2491 			VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT,			// VkPipelineStageFlags	dstStageMask
2492 			VK_ACCESS_TRANSFER_WRITE_BIT,					// VkAccessFlags		srcAccessMask
2493 			VK_ACCESS_INPUT_ATTACHMENT_READ_BIT | VK_ACCESS_SHADER_READ_BIT,	//	dstAccessMask
2494 			VK_DEPENDENCY_BY_REGION_BIT						// VkDependencyFlags	dependencyFlags
2495 		};
2496 
2497 		const VkRenderPassCreateInfo	renderPassParams		=
2498 		{
2499 			VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,				// VkStructureTypei					sType
2500 			DE_NULL,												// const void*						pNext
2501 			(VkRenderPassCreateFlags)0,								// VkRenderPassCreateFlags			flags
2502 			0u,														// deUint32							attachmentCount
2503 			DE_NULL,												// const VkAttachmentDescription*	pAttachments
2504 			1u,														// deUint32							subpassCount
2505 			&subpassDesc,											// const VkSubpassDescription*		pSubpasses
2506 			1u,														// deUint32							dependencyCount
2507 			&subpassDependency										// const VkSubpassDependency*		pDependencies
2508 		};
2509 
2510 		renderPass = createRenderPass(vk, device, &renderPassParams);
2511 
2512 		const vk::VkFramebufferCreateInfo	framebufferParams	=
2513 		{
2514 			vk::VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,			// sType
2515 			DE_NULL,												// pNext
2516 			(vk::VkFramebufferCreateFlags)0,
2517 			*renderPass,											// renderPass
2518 			0u,														// attachmentCount
2519 			DE_NULL,												// pAttachments
2520 			DIM,													// width
2521 			DIM,													// height
2522 			1u,														// layers
2523 		};
2524 
2525 		framebuffer = createFramebuffer(vk, device, &framebufferParams);
2526 
2527 		const VkVertexInputBindingDescription			vertexInputBindingDescription		=
2528 		{
2529 			0u,								// deUint32			 binding
2530 			(deUint32)formatBytes,			// deUint32			 stride
2531 			VK_VERTEX_INPUT_RATE_VERTEX,	// VkVertexInputRate	inputRate
2532 		};
2533 
2534 		const VkVertexInputAttributeDescription			vertexInputAttributeDescription		=
2535 		{
2536 			0u,								// deUint32	location
2537 			0u,								// deUint32	binding
2538 			m_data.format,					// VkFormat	format
2539 			0u								// deUint32	offset
2540 		};
2541 
2542 		deUint32 numAttribs = m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH ? 1u : 0u;
2543 
2544 		const VkPipelineVertexInputStateCreateInfo		vertexInputStateCreateInfo		=
2545 		{
2546 			VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// VkStructureType							sType;
2547 			DE_NULL,													// const void*								pNext;
2548 			(VkPipelineVertexInputStateCreateFlags)0,					// VkPipelineVertexInputStateCreateFlags	flags;
2549 			numAttribs,													// deUint32									vertexBindingDescriptionCount;
2550 			&vertexInputBindingDescription,								// const VkVertexInputBindingDescription*	pVertexBindingDescriptions;
2551 			numAttribs,													// deUint32									vertexAttributeDescriptionCount;
2552 			&vertexInputAttributeDescription							// const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
2553 		};
2554 
2555 		const VkPipelineInputAssemblyStateCreateInfo	inputAssemblyStateCreateInfo	=
2556 		{
2557 			VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,	// VkStructureType							sType;
2558 			DE_NULL,														// const void*								pNext;
2559 			(VkPipelineInputAssemblyStateCreateFlags)0,						// VkPipelineInputAssemblyStateCreateFlags	flags;
2560 			(m_data.stage == STAGE_VERTEX) ? VK_PRIMITIVE_TOPOLOGY_POINT_LIST : VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP, // VkPrimitiveTopology						topology;
2561 			VK_FALSE														// VkBool32									primitiveRestartEnable;
2562 		};
2563 
2564 		const VkPipelineRasterizationStateCreateInfo	rasterizationStateCreateInfo	=
2565 		{
2566 			VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,		// VkStructureType							sType;
2567 			DE_NULL,														// const void*								pNext;
2568 			(VkPipelineRasterizationStateCreateFlags)0,						// VkPipelineRasterizationStateCreateFlags	flags;
2569 			VK_FALSE,														// VkBool32									depthClampEnable;
2570 			(m_data.stage == STAGE_VERTEX) ? VK_TRUE : VK_FALSE,			// VkBool32									rasterizerDiscardEnable;
2571 			VK_POLYGON_MODE_FILL,											// VkPolygonMode							polygonMode;
2572 			VK_CULL_MODE_NONE,												// VkCullModeFlags							cullMode;
2573 			VK_FRONT_FACE_CLOCKWISE,										// VkFrontFace								frontFace;
2574 			VK_FALSE,														// VkBool32									depthBiasEnable;
2575 			0.0f,															// float									depthBiasConstantFactor;
2576 			0.0f,															// float									depthBiasClamp;
2577 			0.0f,															// float									depthBiasSlopeFactor;
2578 			1.0f															// float									lineWidth;
2579 		};
2580 
2581 		const VkPipelineMultisampleStateCreateInfo		multisampleStateCreateInfo =
2582 		{
2583 			VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,	// VkStructureType							sType
2584 			DE_NULL,													// const void*								pNext
2585 			0u,															// VkPipelineMultisampleStateCreateFlags	flags
2586 			VK_SAMPLE_COUNT_1_BIT,										// VkSampleCountFlagBits					rasterizationSamples
2587 			VK_FALSE,													// VkBool32									sampleShadingEnable
2588 			1.0f,														// float									minSampleShading
2589 			DE_NULL,													// const VkSampleMask*						pSampleMask
2590 			VK_FALSE,													// VkBool32									alphaToCoverageEnable
2591 			VK_FALSE													// VkBool32									alphaToOneEnable
2592 		};
2593 
2594 		VkViewport viewport = makeViewport(DIM, DIM);
2595 		VkRect2D scissor = makeRect2D(DIM, DIM);
2596 
2597 		const VkPipelineViewportStateCreateInfo			viewportStateCreateInfo				=
2598 		{
2599 			VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,	// VkStructureType							sType
2600 			DE_NULL,												// const void*								pNext
2601 			(VkPipelineViewportStateCreateFlags)0,					// VkPipelineViewportStateCreateFlags		flags
2602 			1u,														// deUint32									viewportCount
2603 			&viewport,												// const VkViewport*						pViewports
2604 			1u,														// deUint32									scissorCount
2605 			&scissor												// const VkRect2D*							pScissors
2606 		};
2607 
2608 		Move<VkShaderModule> fs;
2609 		Move<VkShaderModule> vs;
2610 
2611 		deUint32 numStages;
2612 		if (m_data.stage == STAGE_VERTEX)
2613 		{
2614 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2615 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0); // bogus
2616 			numStages = 1u;
2617 		}
2618 		else
2619 		{
2620 			vs = createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0);
2621 			fs = createShaderModule(vk, device, m_context.getBinaryCollection().get("test"), 0);
2622 			numStages = 2u;
2623 		}
2624 
2625 		const VkPipelineShaderStageCreateInfo	shaderCreateInfo[2] =
2626 		{
2627 			{
2628 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2629 				DE_NULL,
2630 				(VkPipelineShaderStageCreateFlags)0,
2631 				VK_SHADER_STAGE_VERTEX_BIT,									// stage
2632 				*vs,														// shader
2633 				"main",
2634 				DE_NULL,													// pSpecializationInfo
2635 			},
2636 			{
2637 				VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
2638 				DE_NULL,
2639 				(VkPipelineShaderStageCreateFlags)0,
2640 				VK_SHADER_STAGE_FRAGMENT_BIT,								// stage
2641 				*fs,														// shader
2642 				"main",
2643 				DE_NULL,													// pSpecializationInfo
2644 			}
2645 		};
2646 
2647 		const VkGraphicsPipelineCreateInfo				graphicsPipelineCreateInfo		=
2648 		{
2649 			VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,	// VkStructureType									sType;
2650 			DE_NULL,											// const void*										pNext;
2651 			(VkPipelineCreateFlags)0,							// VkPipelineCreateFlags							flags;
2652 			numStages,											// deUint32											stageCount;
2653 			&shaderCreateInfo[0],								// const VkPipelineShaderStageCreateInfo*			pStages;
2654 			&vertexInputStateCreateInfo,						// const VkPipelineVertexInputStateCreateInfo*		pVertexInputState;
2655 			&inputAssemblyStateCreateInfo,						// const VkPipelineInputAssemblyStateCreateInfo*	pInputAssemblyState;
2656 			DE_NULL,											// const VkPipelineTessellationStateCreateInfo*		pTessellationState;
2657 			&viewportStateCreateInfo,							// const VkPipelineViewportStateCreateInfo*			pViewportState;
2658 			&rasterizationStateCreateInfo,						// const VkPipelineRasterizationStateCreateInfo*	pRasterizationState;
2659 			&multisampleStateCreateInfo,						// const VkPipelineMultisampleStateCreateInfo*		pMultisampleState;
2660 			DE_NULL,											// const VkPipelineDepthStencilStateCreateInfo*		pDepthStencilState;
2661 			DE_NULL,											// const VkPipelineColorBlendStateCreateInfo*		pColorBlendState;
2662 			DE_NULL,											// const VkPipelineDynamicStateCreateInfo*			pDynamicState;
2663 			pipelineLayout.get(),								// VkPipelineLayout									layout;
2664 			renderPass.get(),									// VkRenderPass										renderPass;
2665 			0u,													// deUint32											subpass;
2666 			DE_NULL,											// VkPipeline										basePipelineHandle;
2667 			0													// int												basePipelineIndex;
2668 		};
2669 
2670 		pipeline = createGraphicsPipeline(vk, device, DE_NULL, &graphicsPipelineCreateInfo);
2671 	}
2672 
2673 	const VkImageMemoryBarrier imageBarrier =
2674 	{
2675 		VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,				// VkStructureType		sType
2676 		DE_NULL,											// const void*			pNext
2677 		0u,													// VkAccessFlags		srcAccessMask
2678 		VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		dstAccessMask
2679 		VK_IMAGE_LAYOUT_UNDEFINED,							// VkImageLayout		oldLayout
2680 		VK_IMAGE_LAYOUT_GENERAL,							// VkImageLayout		newLayout
2681 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				srcQueueFamilyIndex
2682 		VK_QUEUE_FAMILY_IGNORED,							// uint32_t				dstQueueFamilyIndex
2683 		**images[0],										// VkImage				image
2684 		{
2685 			VK_IMAGE_ASPECT_COLOR_BIT,				// VkImageAspectFlags	aspectMask
2686 			0u,										// uint32_t				baseMipLevel
2687 			1u,										// uint32_t				mipLevels,
2688 			0u,										// uint32_t				baseArray
2689 			1u,										// uint32_t				arraySize
2690 		}
2691 	};
2692 
2693 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT,
2694 							(VkDependencyFlags)0,
2695 							0, (const VkMemoryBarrier*)DE_NULL,
2696 							0, (const VkBufferMemoryBarrier*)DE_NULL,
2697 							1, &imageBarrier);
2698 
2699 	vk.cmdBindPipeline(*cmdBuffer, bindPoint, *pipeline);
2700 
2701 	if (!formatIsR64(m_data.format))
2702 	{
2703 		VkImageSubresourceRange range = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, 1u);
2704 		VkClearValue clearColor = makeClearValueColorU32(0,0,0,0);
2705 
2706 		vk.cmdClearColorImage(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, &clearColor.color, 1, &range);
2707 	}
2708 	else
2709 	{
2710 		const vector<VkBufferImageCopy>	bufferImageCopy(1, makeBufferImageCopy(outputImageCreateInfo.extent, makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0, 0, 1)));
2711 		copyBufferToImage(vk,
2712 			*cmdBuffer,
2713 			*(*bufferOutputImageR64),
2714 			sizeOutputR64,
2715 			bufferImageCopy,
2716 			VK_IMAGE_ASPECT_COLOR_BIT,
2717 			1,
2718 			1, **images[0], VK_IMAGE_LAYOUT_GENERAL, VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT);
2719 	}
2720 
2721 	VkMemoryBarrier					memBarrier =
2722 	{
2723 		VK_STRUCTURE_TYPE_MEMORY_BARRIER,	// sType
2724 		DE_NULL,							// pNext
2725 		0u,									// srcAccessMask
2726 		0u,									// dstAccessMask
2727 	};
2728 
2729 	memBarrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
2730 	memBarrier.dstAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2731 	vk.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, m_data.allPipelineStages,
2732 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2733 
2734 	if (m_data.stage == STAGE_COMPUTE)
2735 	{
2736 		vk.cmdDispatch(*cmdBuffer, DIM, DIM, 1);
2737 	}
2738 	else if (m_data.stage == STAGE_RAYGEN)
2739 	{
2740 		vk.cmdTraceRaysNV(*cmdBuffer,
2741 			**sbtBuffer, 0,
2742 			DE_NULL, 0, 0,
2743 			DE_NULL, 0, 0,
2744 			DE_NULL, 0, 0,
2745 			DIM, DIM, 1);
2746 	}
2747 	else
2748 	{
2749 		beginRenderPass(vk, *cmdBuffer, *renderPass, *framebuffer,
2750 						makeRect2D(DIM, DIM),
2751 						0, DE_NULL, VK_SUBPASS_CONTENTS_INLINE);
2752 		// Draw a point cloud for vertex shader testing, and a single quad for fragment shader testing
2753 		if (m_data.descriptorType == VERTEX_ATTRIBUTE_FETCH)
2754 		{
2755 			VkDeviceSize zeroOffset = 0;
2756 			VkBuffer b = m_data.nullDescriptor ? DE_NULL : **buffer;
2757 			vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &b, &zeroOffset);
2758 			vk.cmdDraw(*cmdBuffer, 1000u, 1u, 0u, 0u);
2759 		}
2760 		if (m_data.stage == STAGE_VERTEX)
2761 		{
2762 			vk.cmdDraw(*cmdBuffer, DIM*DIM, 1u, 0u, 0u);
2763 		}
2764 		else
2765 		{
2766 			vk.cmdDraw(*cmdBuffer, 4u, 1u, 0u, 0u);
2767 		}
2768 		endRenderPass(vk, *cmdBuffer);
2769 	}
2770 
2771 	memBarrier.srcAccessMask = VK_ACCESS_SHADER_READ_BIT | VK_ACCESS_SHADER_WRITE_BIT;
2772 	memBarrier.dstAccessMask = VK_ACCESS_TRANSFER_READ_BIT | VK_ACCESS_TRANSFER_WRITE_BIT;
2773 	vk.cmdPipelineBarrier(*cmdBuffer, m_data.allPipelineStages, VK_PIPELINE_STAGE_TRANSFER_BIT,
2774 		0, 1, &memBarrier, 0, DE_NULL, 0, DE_NULL);
2775 
2776 	const VkBufferImageCopy copyRegion = makeBufferImageCopy(makeExtent3D(DIM, DIM, 1u),
2777 															 makeImageSubresourceLayers(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 0u, 1u));
2778 	vk.cmdCopyImageToBuffer(*cmdBuffer, **images[0], VK_IMAGE_LAYOUT_GENERAL, **copyBuffer, 1u, &copyRegion);
2779 
2780 	endCommandBuffer(vk, *cmdBuffer);
2781 
2782 	submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
2783 
2784 	void *ptr = copyBuffer->getAllocation().getHostPtr();
2785 
2786 	invalidateAlloc(vk, device, copyBuffer->getAllocation());
2787 
2788 	qpTestResult res = QP_TEST_RESULT_PASS;
2789 
2790 	for (deUint32 i = 0; i < DIM*DIM; ++i)
2791 	{
2792 		if (formatIsFloat(m_data.format))
2793 		{
2794 			if (((float *)ptr)[i * numComponents] != 1.0f)
2795 			{
2796 				res = QP_TEST_RESULT_FAIL;
2797 			}
2798 		}
2799 		else if (formatIsR64(m_data.format))
2800 		{
2801 			if (((deUint64 *)ptr)[i * numComponents] != 1)
2802 			{
2803 				res = QP_TEST_RESULT_FAIL;
2804 			}
2805 		}
2806 		else
2807 		{
2808 			if (((deUint32 *)ptr)[i * numComponents] != 1)
2809 			{
2810 				res = QP_TEST_RESULT_FAIL;
2811 			}
2812 		}
2813 	}
2814 
2815 	return tcu::TestStatus(res, qpGetTestResultName(res));
2816 }
2817 
2818 }	// anonymous
2819 
createTests(tcu::TestCaseGroup * group,bool robustness2)2820 static void createTests (tcu::TestCaseGroup* group, bool robustness2)
2821 {
2822 	tcu::TestContext& testCtx = group->getTestContext();
2823 
2824 	typedef struct
2825 	{
2826 		deUint32				count;
2827 		const char*				name;
2828 		const char*				description;
2829 	} TestGroupCase;
2830 
2831 	TestGroupCase fmtCases[] =
2832 	{
2833 		{ VK_FORMAT_R32_SINT,				"r32i",		""		},
2834 		{ VK_FORMAT_R32_UINT,				"r32ui",	""		},
2835 		{ VK_FORMAT_R32_SFLOAT,				"r32f",		""		},
2836 		{ VK_FORMAT_R32G32_SINT,			"rg32i",	""		},
2837 		{ VK_FORMAT_R32G32_UINT,			"rg32ui",	""		},
2838 		{ VK_FORMAT_R32G32_SFLOAT,			"rg32f",	""		},
2839 		{ VK_FORMAT_R32G32B32A32_SINT,		"rgba32i",	""		},
2840 		{ VK_FORMAT_R32G32B32A32_UINT,		"rgba32ui",	""		},
2841 		{ VK_FORMAT_R32G32B32A32_SFLOAT,	"rgba32f",	""		},
2842 		{ VK_FORMAT_R64_SINT,				"r64i",		""		},
2843 		{ VK_FORMAT_R64_UINT,				"r64ui",	""		},
2844 	};
2845 
2846 	TestGroupCase fullDescCases[] =
2847 	{
2848 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,				"uniform_buffer",			""		},
2849 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER,				"storage_buffer",			""		},
2850 		{ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC,		"uniform_buffer_dynamic",	""		},
2851 		{ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC,		"storage_buffer_dynamic",	""		},
2852 		{ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER,			"uniform_texel_buffer",		""		},
2853 		{ VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER,			"storage_texel_buffer",		""		},
2854 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
2855 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
2856 		{ VERTEX_ATTRIBUTE_FETCH,							"vertex_attribute_fetch",	""		},
2857 	};
2858 
2859 	TestGroupCase imgDescCases[] =
2860 	{
2861 		{ VK_DESCRIPTOR_TYPE_STORAGE_IMAGE,					"storage_image",			""		},
2862 		{ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER,		"sampled_image",			""		},
2863 	};
2864 
2865 	TestGroupCase fullLenCases32Bit[] =
2866 	{
2867 		{ ~0U,			"null_descriptor",	""		},
2868 		{ 0,			"img",				""		},
2869 		{ 4,			"len_4",			""		},
2870 		{ 8,			"len_8",			""		},
2871 		{ 12,			"len_12",			""		},
2872 		{ 16,			"len_16",			""		},
2873 		{ 20,			"len_20",			""		},
2874 		{ 31,			"len_31",			""		},
2875 		{ 32,			"len_32",			""		},
2876 		{ 33,			"len_33",			""		},
2877 		{ 35,			"len_35",			""		},
2878 		{ 36,			"len_36",			""		},
2879 		{ 39,			"len_39",			""		},
2880 		{ 40,			"len_41",			""		},
2881 		{ 252,			"len_252",			""		},
2882 		{ 256,			"len_256",			""		},
2883 		{ 260,			"len_260",			""		},
2884 	};
2885 
2886 	TestGroupCase fullLenCases64Bit[] =
2887 	{
2888 		{ ~0U,			"null_descriptor",	""		},
2889 		{ 0,			"img",				""		},
2890 		{ 8,			"len_8",			""		},
2891 		{ 16,			"len_16",			""		},
2892 		{ 24,			"len_24",			""		},
2893 		{ 32,			"len_32",			""		},
2894 		{ 40,			"len_40",			""		},
2895 		{ 62,			"len_62",			""		},
2896 		{ 64,			"len_64",			""		},
2897 		{ 66,			"len_66",			""		},
2898 		{ 70,			"len_70",			""		},
2899 		{ 72,			"len_72",			""		},
2900 		{ 78,			"len_78",			""		},
2901 		{ 80,			"len_80",			""		},
2902 		{ 504,			"len_504",			""		},
2903 		{ 512,			"len_512",			""		},
2904 		{ 520,			"len_520",			""		},
2905 	};
2906 
2907 	TestGroupCase imgLenCases[] =
2908 	{
2909 		{ 0,	"img",	""		},
2910 	};
2911 
2912 	TestGroupCase viewCases[] =
2913 	{
2914 		{ VK_IMAGE_VIEW_TYPE_1D,			"1d",			""		},
2915 		{ VK_IMAGE_VIEW_TYPE_2D,			"2d",			""		},
2916 		{ VK_IMAGE_VIEW_TYPE_3D,			"3d",			""		},
2917 		{ VK_IMAGE_VIEW_TYPE_CUBE,			"cube",			""		},
2918 		{ VK_IMAGE_VIEW_TYPE_1D_ARRAY,		"1d_array",		""		},
2919 		{ VK_IMAGE_VIEW_TYPE_2D_ARRAY,		"2d_array",		""		},
2920 		{ VK_IMAGE_VIEW_TYPE_CUBE_ARRAY,	"cube_array",	""		},
2921 	};
2922 
2923 	TestGroupCase sampCases[] =
2924 	{
2925 		{ VK_SAMPLE_COUNT_1_BIT,			"samples_1",	""		},
2926 		{ VK_SAMPLE_COUNT_4_BIT,			"samples_4",	""		},
2927 	};
2928 
2929 	TestGroupCase stageCases[] =
2930 	{
2931 		{ STAGE_COMPUTE,	"comp",		"compute"	},
2932 		{ STAGE_FRAGMENT,	"frag",		"fragment"	},
2933 		{ STAGE_VERTEX,		"vert",		"vertex"	},
2934 		{ STAGE_RAYGEN,		"rgen",		"raygen"	},
2935 	};
2936 
2937 	TestGroupCase volCases[] =
2938 	{
2939 		{ 0,			"nonvolatile",	""		},
2940 		{ 1,			"volatile",		""		},
2941 	};
2942 
2943 	TestGroupCase unrollCases[] =
2944 	{
2945 		{ 0,			"dontunroll",	""		},
2946 		{ 1,			"unroll",		""		},
2947 	};
2948 
2949 	TestGroupCase tempCases[] =
2950 	{
2951 		{ 0,			"notemplate",	""		},
2952 		{ 1,			"template",		""		},
2953 	};
2954 
2955 	TestGroupCase pushCases[] =
2956 	{
2957 		{ 0,			"bind",			""		},
2958 		{ 1,			"push",			""		},
2959 	};
2960 
2961 	TestGroupCase fmtQualCases[] =
2962 	{
2963 		{ 0,			"no_fmt_qual",	""		},
2964 		{ 1,			"fmt_qual",		""		},
2965 	};
2966 
2967 	for (int pushNdx = 0; pushNdx < DE_LENGTH_OF_ARRAY(pushCases); pushNdx++)
2968 	{
2969 		de::MovePtr<tcu::TestCaseGroup> pushGroup(new tcu::TestCaseGroup(testCtx, pushCases[pushNdx].name, pushCases[pushNdx].name));
2970 		for (int tempNdx = 0; tempNdx < DE_LENGTH_OF_ARRAY(tempCases); tempNdx++)
2971 		{
2972 			de::MovePtr<tcu::TestCaseGroup> tempGroup(new tcu::TestCaseGroup(testCtx, tempCases[tempNdx].name, tempCases[tempNdx].name));
2973 			for (int fmtNdx = 0; fmtNdx < DE_LENGTH_OF_ARRAY(fmtCases); fmtNdx++)
2974 			{
2975 				de::MovePtr<tcu::TestCaseGroup> fmtGroup(new tcu::TestCaseGroup(testCtx, fmtCases[fmtNdx].name, fmtCases[fmtNdx].name));
2976 
2977 				int fmtSize = tcu::getPixelSize(mapVkFormat((VkFormat)fmtCases[fmtNdx].count));
2978 
2979 				for (int unrollNdx = 0; unrollNdx < DE_LENGTH_OF_ARRAY(unrollCases); unrollNdx++)
2980 				{
2981 					de::MovePtr<tcu::TestCaseGroup> unrollGroup(new tcu::TestCaseGroup(testCtx, unrollCases[unrollNdx].name, unrollCases[unrollNdx].name));
2982 					for (int volNdx = 0; volNdx < DE_LENGTH_OF_ARRAY(volCases); volNdx++)
2983 					{
2984 						de::MovePtr<tcu::TestCaseGroup> volGroup(new tcu::TestCaseGroup(testCtx, volCases[volNdx].name, volCases[volNdx].name));
2985 
2986 						int numDescCases = robustness2 ? DE_LENGTH_OF_ARRAY(fullDescCases) : DE_LENGTH_OF_ARRAY(imgDescCases);
2987 						TestGroupCase *descCases = robustness2 ? fullDescCases : imgDescCases;
2988 
2989 						for (int descNdx = 0; descNdx < numDescCases; descNdx++)
2990 						{
2991 							de::MovePtr<tcu::TestCaseGroup> descGroup(new tcu::TestCaseGroup(testCtx, descCases[descNdx].name, descCases[descNdx].name));
2992 							for (int fmtQualNdx = 0; fmtQualNdx < DE_LENGTH_OF_ARRAY(fmtQualCases); fmtQualNdx++)
2993 							{
2994 								de::MovePtr<tcu::TestCaseGroup> fmtQualGroup(new tcu::TestCaseGroup(testCtx, fmtQualCases[fmtQualNdx].name, fmtQualCases[fmtQualNdx].name));
2995 
2996 								// format qualifier is only used for storage image and storage texel buffers
2997 								if (fmtQualCases[fmtQualNdx].count &&
2998 									!(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_IMAGE))
2999 									continue;
3000 
3001 								if (pushCases[pushNdx].count &&
3002 									(descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC || descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH))
3003 									continue;
3004 
3005 								const bool isR64 = formatIsR64((VkFormat)fmtCases[fmtNdx].count);
3006 								int numLenCases = robustness2 ? DE_LENGTH_OF_ARRAY((isR64 ? fullLenCases64Bit : fullLenCases32Bit)) : DE_LENGTH_OF_ARRAY(imgLenCases);
3007 								TestGroupCase *lenCases = robustness2 ? (isR64 ? fullLenCases64Bit : fullLenCases32Bit) : imgLenCases;
3008 
3009 								for (int lenNdx = 0; lenNdx < numLenCases; lenNdx++)
3010 								{
3011 									if (lenCases[lenNdx].count != ~0U)
3012 									{
3013 										bool bufferLen = lenCases[lenNdx].count != 0;
3014 										bool bufferDesc = descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE && descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER;
3015 										if (bufferLen != bufferDesc)
3016 											continue;
3017 
3018 										// Add template tests cases only for null_descriptor cases
3019 										if (tempCases[tempNdx].count)
3020 											continue;
3021 									}
3022 
3023 									if ((descCases[descNdx].count == VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER || descCases[descNdx].count == VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER) &&
3024 										((lenCases[lenNdx].count % fmtSize) != 0) &&
3025 										lenCases[lenNdx].count != ~0U)
3026 									{
3027 										continue;
3028 									}
3029 
3030 									// "volatile" only applies to storage images/buffers
3031 									if (volCases[volNdx].count && !supportsStores(descCases[descNdx].count))
3032 										continue;
3033 
3034 									de::MovePtr<tcu::TestCaseGroup> lenGroup(new tcu::TestCaseGroup(testCtx, lenCases[lenNdx].name, lenCases[lenNdx].name));
3035 									for (int sampNdx = 0; sampNdx < DE_LENGTH_OF_ARRAY(sampCases); sampNdx++)
3036 									{
3037 										de::MovePtr<tcu::TestCaseGroup> sampGroup(new tcu::TestCaseGroup(testCtx, sampCases[sampNdx].name, sampCases[sampNdx].name));
3038 										for (int viewNdx = 0; viewNdx < DE_LENGTH_OF_ARRAY(viewCases); viewNdx++)
3039 										{
3040 											if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_1D &&
3041 												descCases[descNdx].count != VK_DESCRIPTOR_TYPE_STORAGE_IMAGE &&
3042 												descCases[descNdx].count != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
3043 											{
3044 												// buffer descriptors don't have different dimensionalities. Only test "1D"
3045 												continue;
3046 											}
3047 
3048 											if (viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D && viewCases[viewNdx].count != VK_IMAGE_VIEW_TYPE_2D_ARRAY &&
3049 												sampCases[sampNdx].count != VK_SAMPLE_COUNT_1_BIT)
3050 											{
3051 												continue;
3052 											}
3053 
3054 											de::MovePtr<tcu::TestCaseGroup> viewGroup(new tcu::TestCaseGroup(testCtx, viewCases[viewNdx].name, viewCases[viewNdx].name));
3055 											for (int stageNdx = 0; stageNdx < DE_LENGTH_OF_ARRAY(stageCases); stageNdx++)
3056 											{
3057 												Stage currentStage = static_cast<Stage>(stageCases[stageNdx].count);
3058 												VkFlags allShaderStages = VK_SHADER_STAGE_COMPUTE_BIT | VK_SHADER_STAGE_VERTEX_BIT | VK_SHADER_STAGE_FRAGMENT_BIT;
3059 												VkFlags allPipelineStages = VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT;
3060 												if ((Stage)stageCases[stageNdx].count == STAGE_RAYGEN)
3061 												{
3062 													allShaderStages |= VK_SHADER_STAGE_RAYGEN_BIT_NV;
3063 													allPipelineStages |= VK_PIPELINE_STAGE_RAY_TRACING_SHADER_BIT_NV;
3064 												}
3065 
3066 												if (descCases[descNdx].count == VERTEX_ATTRIBUTE_FETCH &&
3067 													currentStage != STAGE_VERTEX)
3068 													continue;
3069 
3070 												deUint32 imageDim[3] = {5, 11, 6};
3071 												if (viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY ||
3072 													viewCases[viewNdx].count == VK_IMAGE_VIEW_TYPE_CUBE)
3073 													imageDim[1] = imageDim[0];
3074 
3075 												CaseDef c =
3076 												{
3077 													(VkFormat)fmtCases[fmtNdx].count,								// VkFormat format;
3078 													currentStage,													// Stage stage;
3079 													allShaderStages,												// VkFlags allShaderStages;
3080 													allPipelineStages,												// VkFlags allPipelineStages;
3081 													(int)descCases[descNdx].count,									// VkDescriptorType descriptorType;
3082 													(VkImageViewType)viewCases[viewNdx].count,						// VkImageViewType viewType;
3083 													(VkSampleCountFlagBits)sampCases[sampNdx].count,				// VkSampleCountFlagBits samples;
3084 													(int)lenCases[lenNdx].count,									// int bufferLen;
3085 													(bool)unrollCases[unrollNdx].count,								// bool unroll;
3086 													(bool)volCases[volNdx].count,									// bool vol;
3087 													(bool)(lenCases[lenNdx].count == ~0U),							// bool nullDescriptor
3088 													(bool)tempCases[tempNdx].count,									// bool useTemplate
3089 													(bool)fmtQualCases[fmtQualNdx].count,							// bool formatQualifier
3090 													(bool)pushCases[pushNdx].count,									// bool pushDescriptor;
3091 													(bool)robustness2,												// bool testRobustness2;
3092 													{ imageDim[0], imageDim[1], imageDim[2] },						// deUint32 imageDim[3];
3093 												};
3094 
3095 												viewGroup->addChild(new RobustnessExtsTestCase(testCtx, stageCases[stageNdx].name, stageCases[stageNdx].name, c));
3096 											}
3097 											sampGroup->addChild(viewGroup.release());
3098 										}
3099 										lenGroup->addChild(sampGroup.release());
3100 									}
3101 									fmtQualGroup->addChild(lenGroup.release());
3102 								}
3103 								descGroup->addChild(fmtQualGroup.release());
3104 							}
3105 							volGroup->addChild(descGroup.release());
3106 						}
3107 						unrollGroup->addChild(volGroup.release());
3108 					}
3109 					fmtGroup->addChild(unrollGroup.release());
3110 				}
3111 				tempGroup->addChild(fmtGroup.release());
3112 			}
3113 			pushGroup->addChild(tempGroup.release());
3114 		}
3115 		group->addChild(pushGroup.release());
3116 	}
3117 }
3118 
createRobustness2Tests(tcu::TestCaseGroup * group)3119 static void createRobustness2Tests (tcu::TestCaseGroup* group)
3120 {
3121 	createTests(group, /*robustness2=*/true);
3122 }
3123 
createImageRobustnessTests(tcu::TestCaseGroup * group)3124 static void createImageRobustnessTests (tcu::TestCaseGroup* group)
3125 {
3126 	createTests(group, /*robustness2=*/false);
3127 }
3128 
cleanupGroup(tcu::TestCaseGroup * group)3129 static void cleanupGroup (tcu::TestCaseGroup* group)
3130 {
3131 	DE_UNREF(group);
3132 	// Destroy singleton objects.
3133 	Robustness2Int64AtomicsSingleton::destroy();
3134 	ImageRobustnessInt64AtomicsSingleton::destroy();
3135 	ImageRobustnessSingleton::destroy();
3136 	Robustness2Singleton::destroy();
3137 }
3138 
createRobustness2Tests(tcu::TestContext & testCtx)3139 tcu::TestCaseGroup* createRobustness2Tests (tcu::TestContext& testCtx)
3140 {
3141 	return createTestGroup(testCtx, "robustness2", "VK_EXT_robustness2 tests",
3142 							createRobustness2Tests, cleanupGroup);
3143 }
3144 
createImageRobustnessTests(tcu::TestContext & testCtx)3145 tcu::TestCaseGroup* createImageRobustnessTests (tcu::TestContext& testCtx)
3146 {
3147 	return createTestGroup(testCtx, "image_robustness", "VK_EXT_image_robustness tests",
3148 							createImageRobustnessTests, cleanupGroup);
3149 }
3150 
3151 }	// robustness
3152 }	// vkt
3153