1 /*------------------------------------------------------------------------
2  * Vulkan Conformance Tests
3  * ------------------------
4  *
5  * Copyright (c) 2015 The Khronos Group Inc.
6  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7  * Copyright (c) 2016 The Android Open Source Project
8  *
9  * Licensed under the Apache License, Version 2.0 (the "License");
10  * you may not use this file except in compliance with the License.
11  * You may obtain a copy of the License at
12  *
13  *      http://www.apache.org/licenses/LICENSE-2.0
14  *
15  * Unless required by applicable law or agreed to in writing, software
16  * distributed under the License is distributed on an "AS IS" BASIS,
17  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18  * See the License for the specific language governing permissions and
19  * limitations under the License.
20  *
21  *//*!
22  * \file
23  * \brief Uniform block case.
24  *//*--------------------------------------------------------------------*/
25 
26 #include "vktUniformBlockCase.hpp"
27 
28 #include "vkPrograms.hpp"
29 
30 #include "gluVarType.hpp"
31 #include "tcuTestLog.hpp"
32 #include "tcuSurface.hpp"
33 #include "deRandom.hpp"
34 #include "deStringUtil.hpp"
35 
36 #include "tcuTextureUtil.hpp"
37 #include "deSharedPtr.hpp"
38 
39 #include "vkMemUtil.hpp"
40 #include "vkQueryUtil.hpp"
41 #include "vkTypeUtil.hpp"
42 #include "vkRef.hpp"
43 #include "vkRefUtil.hpp"
44 #include "vkBuilderUtil.hpp"
45 
46 #include <map>
47 #include <set>
48 
49 namespace vkt
50 {
51 namespace ubo
52 {
53 
54 using namespace vk;
55 
56 // VarType implementation.
57 
VarType(void)58 VarType::VarType (void)
59 	: m_type	(TYPE_LAST)
60 	, m_flags	(0)
61 {
62 }
63 
VarType(const VarType & other)64 VarType::VarType (const VarType& other)
65 	: m_type	(TYPE_LAST)
66 	, m_flags	(0)
67 {
68 	*this = other;
69 }
70 
VarType(glu::DataType basicType,deUint32 flags)71 VarType::VarType (glu::DataType basicType, deUint32 flags)
72 	: m_type	(TYPE_BASIC)
73 	, m_flags	(flags)
74 {
75 	m_data.basicType = basicType;
76 }
77 
VarType(const VarType & elementType,int arraySize)78 VarType::VarType (const VarType& elementType, int arraySize)
79 	: m_type	(TYPE_ARRAY)
80 	, m_flags	(0)
81 {
82 	m_data.array.size			= arraySize;
83 	m_data.array.elementType	= new VarType(elementType);
84 }
85 
VarType(const StructType * structPtr,deUint32 flags)86 VarType::VarType (const StructType* structPtr, deUint32 flags)
87 	: m_type	(TYPE_STRUCT)
88 	, m_flags	(flags)
89 {
90 	m_data.structPtr = structPtr;
91 }
92 
~VarType(void)93 VarType::~VarType (void)
94 {
95 	if (m_type == TYPE_ARRAY)
96 		delete m_data.array.elementType;
97 }
98 
operator =(const VarType & other)99 VarType& VarType::operator= (const VarType& other)
100 {
101 	if (this == &other)
102 		return *this; // Self-assignment.
103 
104 	if (m_type == TYPE_ARRAY)
105 		delete m_data.array.elementType;
106 
107 	m_type	= other.m_type;
108 	m_flags	= other.m_flags;
109 	m_data	= Data();
110 
111 	if (m_type == TYPE_ARRAY)
112 	{
113 		m_data.array.elementType	= new VarType(*other.m_data.array.elementType);
114 		m_data.array.size			= other.m_data.array.size;
115 	}
116 	else
117 		m_data = other.m_data;
118 
119 	return *this;
120 }
121 
122 // StructType implementation.
123 
addMember(const std::string & name,const VarType & type,deUint32 flags)124 void StructType::addMember (const std::string& name, const VarType& type, deUint32 flags)
125 {
126 	m_members.push_back(StructMember(name, type, flags));
127 }
128 
129 // Uniform implementation.
130 
Uniform(const std::string & name,const VarType & type,deUint32 flags)131 Uniform::Uniform (const std::string& name, const VarType& type, deUint32 flags)
132 	: m_name	(name)
133 	, m_type	(type)
134 	, m_flags	(flags)
135 {
136 }
137 
138 // UniformBlock implementation.
139 
UniformBlock(const std::string & blockName)140 UniformBlock::UniformBlock (const std::string& blockName)
141 	: m_blockName	(blockName)
142 	, m_arraySize	(0)
143 	, m_flags		(0)
144 {
145 }
146 
operator <<(std::ostream & stream,const BlockLayoutEntry & entry)147 std::ostream& operator<< (std::ostream& stream, const BlockLayoutEntry& entry)
148 {
149 	stream << entry.name << " { name = " << entry.name
150 		   << ", size = " << entry.size
151 		   << ", activeUniformIndices = [";
152 
153 	for (std::vector<int>::const_iterator i = entry.activeUniformIndices.begin(); i != entry.activeUniformIndices.end(); i++)
154 	{
155 		if (i != entry.activeUniformIndices.begin())
156 			stream << ", ";
157 		stream << *i;
158 	}
159 
160 	stream << "] }";
161 	return stream;
162 }
163 
operator <<(std::ostream & stream,const UniformLayoutEntry & entry)164 std::ostream& operator<< (std::ostream& stream, const UniformLayoutEntry& entry)
165 {
166 	stream << entry.name << " { type = " << glu::getDataTypeName(entry.type)
167 		   << ", size = " << entry.size
168 		   << ", blockNdx = " << entry.blockLayoutNdx
169 		   << ", offset = " << entry.offset
170 		   << ", arrayStride = " << entry.arrayStride
171 		   << ", matrixStride = " << entry.matrixStride
172 		   << ", isRowMajor = " << (entry.isRowMajor ? "true" : "false")
173 		   << " }";
174 	return stream;
175 }
176 
getUniformLayoutIndex(int blockNdx,const std::string & name) const177 int UniformLayout::getUniformLayoutIndex (int blockNdx, const std::string& name) const
178 {
179 	for (int ndx = 0; ndx < (int)uniforms.size(); ndx++)
180 	{
181 		if (blocks[uniforms[ndx].blockLayoutNdx].blockDeclarationNdx == blockNdx &&
182 			uniforms[ndx].name == name)
183 			return ndx;
184 	}
185 
186 	return -1;
187 }
188 
getBlockLayoutIndex(int blockNdx,int instanceNdx) const189 int UniformLayout::getBlockLayoutIndex (int blockNdx, int instanceNdx) const
190 {
191 	for (int ndx = 0; ndx < (int)blocks.size(); ndx++)
192 	{
193 		if (blocks[ndx].blockDeclarationNdx == blockNdx &&
194 			blocks[ndx].instanceNdx == instanceNdx)
195 			return ndx;
196 	}
197 
198 	return -1;
199 }
200 
201 // ShaderInterface implementation.
202 
ShaderInterface(void)203 ShaderInterface::ShaderInterface (void)
204 {
205 }
206 
~ShaderInterface(void)207 ShaderInterface::~ShaderInterface (void)
208 {
209 }
210 
allocStruct(const std::string & name)211 StructType& ShaderInterface::allocStruct (const std::string& name)
212 {
213 	m_structs.push_back(StructTypeSP(new StructType(name)));
214 	return *m_structs.back();
215 }
216 
217 struct StructNameEquals
218 {
219 	std::string name;
220 
StructNameEqualsvkt::ubo::StructNameEquals221 	StructNameEquals (const std::string& name_) : name(name_) {}
222 
operator ()vkt::ubo::StructNameEquals223 	bool operator() (const StructTypeSP type) const
224 	{
225 		return type->hasTypeName() && name == type->getTypeName();
226 	}
227 };
228 
getNamedStructs(std::vector<const StructType * > & structs) const229 void ShaderInterface::getNamedStructs (std::vector<const StructType*>& structs) const
230 {
231 	for (std::vector<StructTypeSP>::const_iterator i = m_structs.begin(); i != m_structs.end(); i++)
232 	{
233 		if ((*i)->hasTypeName())
234 			structs.push_back((*i).get());
235 	}
236 }
237 
allocBlock(const std::string & name)238 UniformBlock& ShaderInterface::allocBlock (const std::string& name)
239 {
240 	m_uniformBlocks.push_back(UniformBlockSP(new UniformBlock(name)));
241 	return *m_uniformBlocks.back();
242 }
243 
244 namespace // Utilities
245 {
246 
247 struct PrecisionFlagsFmt
248 {
249 	deUint32 flags;
PrecisionFlagsFmtvkt::ubo::__anon0393f7100111::PrecisionFlagsFmt250 	PrecisionFlagsFmt (deUint32 flags_) : flags(flags_) {}
251 };
252 
operator <<(std::ostream & str,const PrecisionFlagsFmt & fmt)253 std::ostream& operator<< (std::ostream& str, const PrecisionFlagsFmt& fmt)
254 {
255 	// Precision.
256 	DE_ASSERT(dePop32(fmt.flags & (PRECISION_LOW|PRECISION_MEDIUM|PRECISION_HIGH)) <= 1);
257 	str << (fmt.flags & PRECISION_LOW		? "lowp"	:
258 			fmt.flags & PRECISION_MEDIUM	? "mediump"	:
259 			fmt.flags & PRECISION_HIGH		? "highp"	: "");
260 	return str;
261 }
262 
263 struct LayoutFlagsFmt
264 {
265 	deUint32 flags;
266 	deUint32 offset;
LayoutFlagsFmtvkt::ubo::__anon0393f7100111::LayoutFlagsFmt267 	LayoutFlagsFmt (deUint32 flags_, deUint32 offset_ = 0u) : flags(flags_), offset(offset_) {}
268 };
269 
operator <<(std::ostream & str,const LayoutFlagsFmt & fmt)270 std::ostream& operator<< (std::ostream& str, const LayoutFlagsFmt& fmt)
271 {
272 	static const struct
273 	{
274 		deUint32	bit;
275 		const char*	token;
276 	} bitDesc[] =
277 	{
278 		{ LAYOUT_STD140,		"std140"		},
279 		{ LAYOUT_ROW_MAJOR,		"row_major"		},
280 		{ LAYOUT_COLUMN_MAJOR,	"column_major"	},
281 		{ LAYOUT_OFFSET,		"offset"		},
282 	};
283 
284 	deUint32 remBits = fmt.flags;
285 	for (int descNdx = 0; descNdx < DE_LENGTH_OF_ARRAY(bitDesc); descNdx++)
286 	{
287 		if (remBits & bitDesc[descNdx].bit)
288 		{
289 			if (remBits != fmt.flags)
290 				str << ", ";
291 			str << bitDesc[descNdx].token;
292 			if (bitDesc[descNdx].bit == LAYOUT_OFFSET)
293 				str << " = " << fmt.offset;
294 			remBits &= ~bitDesc[descNdx].bit;
295 		}
296 	}
297 	DE_ASSERT(remBits == 0);
298 	return str;
299 }
300 
301 // Layout computation.
302 
getDataTypeByteSize(glu::DataType type)303 int getDataTypeByteSize (glu::DataType type)
304 {
305 	return glu::getDataTypeScalarSize(type)*(int)sizeof(deUint32);
306 }
307 
getDataTypeByteAlignment(glu::DataType type)308 int getDataTypeByteAlignment (glu::DataType type)
309 {
310 	switch (type)
311 	{
312 		case glu::TYPE_FLOAT:
313 		case glu::TYPE_INT:
314 		case glu::TYPE_UINT:
315 		case glu::TYPE_BOOL:		return 1*(int)sizeof(deUint32);
316 
317 		case glu::TYPE_FLOAT_VEC2:
318 		case glu::TYPE_INT_VEC2:
319 		case glu::TYPE_UINT_VEC2:
320 		case glu::TYPE_BOOL_VEC2:	return 2*(int)sizeof(deUint32);
321 
322 		case glu::TYPE_FLOAT_VEC3:
323 		case glu::TYPE_INT_VEC3:
324 		case glu::TYPE_UINT_VEC3:
325 		case glu::TYPE_BOOL_VEC3:	// Fall-through to vec4
326 
327 		case glu::TYPE_FLOAT_VEC4:
328 		case glu::TYPE_INT_VEC4:
329 		case glu::TYPE_UINT_VEC4:
330 		case glu::TYPE_BOOL_VEC4:	return 4*(int)sizeof(deUint32);
331 
332 		default:
333 			DE_ASSERT(false);
334 			return 0;
335 	}
336 }
337 
getminUniformBufferOffsetAlignment(Context & ctx)338 deInt32 getminUniformBufferOffsetAlignment (Context &ctx)
339 {
340 	VkPhysicalDeviceProperties properties;
341 	ctx.getInstanceInterface().getPhysicalDeviceProperties(ctx.getPhysicalDevice(), &properties);
342 	VkDeviceSize align = properties.limits.minUniformBufferOffsetAlignment;
343 	DE_ASSERT(align == (VkDeviceSize)(deInt32)align);
344 	return (deInt32)align;
345 }
346 
getDataTypeArrayStride(glu::DataType type)347 int getDataTypeArrayStride (glu::DataType type)
348 {
349 	DE_ASSERT(!glu::isDataTypeMatrix(type));
350 
351 	const int baseStride	= getDataTypeByteSize(type);
352 	const int vec4Alignment	= (int)sizeof(deUint32)*4;
353 
354 	DE_ASSERT(baseStride <= vec4Alignment);
355 	return de::max(baseStride, vec4Alignment); // Really? See rule 4.
356 }
357 
deRoundUp32(int a,int b)358 static inline int deRoundUp32 (int a, int b)
359 {
360 	int d = a/b;
361 	return d*b == a ? a : (d+1)*b;
362 }
363 
computeStd140BaseAlignment(const VarType & type)364 int computeStd140BaseAlignment (const VarType& type)
365 {
366 	const int vec4Alignment = (int)sizeof(deUint32)*4;
367 
368 	if (type.isBasicType())
369 	{
370 		glu::DataType basicType = type.getBasicType();
371 
372 		if (glu::isDataTypeMatrix(basicType))
373 		{
374 			bool	isRowMajor	= !!(type.getFlags() & LAYOUT_ROW_MAJOR);
375 			int		vecSize		= isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
376 											 : glu::getDataTypeMatrixNumRows(basicType);
377 
378 			return getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
379 		}
380 		else
381 			return getDataTypeByteAlignment(basicType);
382 	}
383 	else if (type.isArrayType())
384 	{
385 		int elemAlignment = computeStd140BaseAlignment(type.getElementType());
386 
387 		// Round up to alignment of vec4
388 		return deRoundUp32(elemAlignment, vec4Alignment);
389 	}
390 	else
391 	{
392 		DE_ASSERT(type.isStructType());
393 
394 		int maxBaseAlignment = 0;
395 
396 		for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
397 			maxBaseAlignment = de::max(maxBaseAlignment, computeStd140BaseAlignment(memberIter->getType()));
398 
399 		return deRoundUp32(maxBaseAlignment, vec4Alignment);
400 	}
401 }
402 
mergeLayoutFlags(deUint32 prevFlags,deUint32 newFlags)403 inline deUint32 mergeLayoutFlags (deUint32 prevFlags, deUint32 newFlags)
404 {
405 	const deUint32	packingMask		= LAYOUT_STD140;
406 	const deUint32	matrixMask		= LAYOUT_ROW_MAJOR|LAYOUT_COLUMN_MAJOR;
407 
408 	deUint32 mergedFlags = 0;
409 
410 	mergedFlags |= ((newFlags & packingMask)	? newFlags : prevFlags) & packingMask;
411 	mergedFlags |= ((newFlags & matrixMask)		? newFlags : prevFlags) & matrixMask;
412 
413 	return mergedFlags;
414 }
415 
computeStd140Layout(UniformLayout & layout,int & curOffset,int curBlockNdx,const std::string & curPrefix,const VarType & type,deUint32 layoutFlags)416 void computeStd140Layout (UniformLayout& layout, int& curOffset, int curBlockNdx, const std::string& curPrefix, const VarType& type, deUint32 layoutFlags)
417 {
418 	int baseAlignment = computeStd140BaseAlignment(type);
419 
420 	curOffset = deAlign32(curOffset, baseAlignment);
421 
422 	if (type.isBasicType())
423 	{
424 		glu::DataType		basicType	= type.getBasicType();
425 		UniformLayoutEntry	entry;
426 
427 		entry.name			= curPrefix;
428 		entry.type			= basicType;
429 		entry.size			= 1;
430 		entry.arrayStride	= 0;
431 		entry.matrixStride	= 0;
432 		entry.blockLayoutNdx= curBlockNdx;
433 
434 		if (glu::isDataTypeMatrix(basicType))
435 		{
436 			// Array of vectors as specified in rules 5 & 7.
437 			bool	isRowMajor	= !!(layoutFlags & LAYOUT_ROW_MAJOR);
438 			int		vecSize		= isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
439 											 : glu::getDataTypeMatrixNumRows(basicType);
440 			int		numVecs		= isRowMajor ? glu::getDataTypeMatrixNumRows(basicType)
441 											 : glu::getDataTypeMatrixNumColumns(basicType);
442 			int		stride		= getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
443 
444 			entry.offset		= curOffset;
445 			entry.matrixStride	= stride;
446 			entry.isRowMajor	= isRowMajor;
447 
448 			curOffset += numVecs*stride;
449 		}
450 		else
451 		{
452 			// Scalar or vector.
453 			entry.offset = curOffset;
454 
455 			curOffset += getDataTypeByteSize(basicType);
456 		}
457 
458 		layout.uniforms.push_back(entry);
459 	}
460 	else if (type.isArrayType())
461 	{
462 		const VarType&	elemType	= type.getElementType();
463 
464 		if (elemType.isBasicType() && !glu::isDataTypeMatrix(elemType.getBasicType()))
465 		{
466 			// Array of scalars or vectors.
467 			glu::DataType		elemBasicType	= elemType.getBasicType();
468 			UniformLayoutEntry	entry;
469 			int					stride			= getDataTypeArrayStride(elemBasicType);
470 
471 			entry.name			= curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
472 			entry.type			= elemBasicType;
473 			entry.blockLayoutNdx= curBlockNdx;
474 			entry.offset		= curOffset;
475 			entry.size			= type.getArraySize();
476 			entry.arrayStride	= stride;
477 			entry.matrixStride	= 0;
478 
479 			curOffset += stride*type.getArraySize();
480 
481 			layout.uniforms.push_back(entry);
482 		}
483 		else if (elemType.isBasicType() && glu::isDataTypeMatrix(elemType.getBasicType()))
484 		{
485 			// Array of matrices.
486 			glu::DataType		elemBasicType	= elemType.getBasicType();
487 			bool				isRowMajor		= !!(layoutFlags & LAYOUT_ROW_MAJOR);
488 			int					vecSize			= isRowMajor ? glu::getDataTypeMatrixNumColumns(elemBasicType)
489 															 : glu::getDataTypeMatrixNumRows(elemBasicType);
490 			int					numVecs			= isRowMajor ? glu::getDataTypeMatrixNumRows(elemBasicType)
491 															 : glu::getDataTypeMatrixNumColumns(elemBasicType);
492 			int					stride			= getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
493 			UniformLayoutEntry	entry;
494 
495 			entry.name			= curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
496 			entry.type			= elemBasicType;
497 			entry.blockLayoutNdx= curBlockNdx;
498 			entry.offset		= curOffset;
499 			entry.size			= type.getArraySize();
500 			entry.arrayStride	= stride*numVecs;
501 			entry.matrixStride	= stride;
502 			entry.isRowMajor	= isRowMajor;
503 
504 			curOffset += numVecs*type.getArraySize()*stride;
505 
506 			layout.uniforms.push_back(entry);
507 		}
508 		else
509 		{
510 			DE_ASSERT(elemType.isStructType() || elemType.isArrayType());
511 
512 			for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
513 				computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "[" + de::toString(elemNdx) + "]", type.getElementType(), layoutFlags);
514 		}
515 	}
516 	else
517 	{
518 		DE_ASSERT(type.isStructType());
519 
520 		for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
521 			computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "." + memberIter->getName(), memberIter->getType(), layoutFlags);
522 
523 		curOffset = deAlign32(curOffset, baseAlignment);
524 	}
525 }
526 
computeStd140Layout(UniformLayout & layout,const ShaderInterface & interface)527 void computeStd140Layout (UniformLayout& layout, const ShaderInterface& interface)
528 {
529 	int numUniformBlocks = interface.getNumUniformBlocks();
530 
531 	for (int blockNdx = 0; blockNdx < numUniformBlocks; blockNdx++)
532 	{
533 		const UniformBlock&	block			= interface.getUniformBlock(blockNdx);
534 		bool				hasInstanceName	= block.hasInstanceName();
535 		std::string			blockPrefix		= hasInstanceName ? (block.getBlockName() + ".") : "";
536 		int					curOffset		= 0;
537 		int					activeBlockNdx	= (int)layout.blocks.size();
538 		int					firstUniformNdx	= (int)layout.uniforms.size();
539 
540 		for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
541 		{
542 			const Uniform& uniform = *uniformIter;
543 			computeStd140Layout(layout, curOffset, activeBlockNdx, blockPrefix + uniform.getName(), uniform.getType(), mergeLayoutFlags(block.getFlags(), uniform.getFlags()));
544 		}
545 
546 		int	uniformIndicesEnd	= (int)layout.uniforms.size();
547 		int	blockSize			= curOffset;
548 		int	numInstances		= block.isArray() ? block.getArraySize() : 1;
549 
550 		// Create block layout entries for each instance.
551 		for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
552 		{
553 			// Allocate entry for instance.
554 			layout.blocks.push_back(BlockLayoutEntry());
555 			BlockLayoutEntry& blockEntry = layout.blocks.back();
556 
557 			blockEntry.name = block.getBlockName();
558 			blockEntry.size = blockSize;
559 			blockEntry.bindingNdx = blockNdx;
560 			blockEntry.blockDeclarationNdx = blockNdx;
561 			blockEntry.instanceNdx = instanceNdx;
562 
563 			// Compute active uniform set for block.
564 			for (int uniformNdx = firstUniformNdx; uniformNdx < uniformIndicesEnd; uniformNdx++)
565 				blockEntry.activeUniformIndices.push_back(uniformNdx);
566 
567 			if (block.isArray())
568 				blockEntry.name += "[" + de::toString(instanceNdx) + "]";
569 		}
570 	}
571 }
572 
573 // Value generator.
574 
generateValue(const UniformLayoutEntry & entry,void * basePtr,de::Random & rnd)575 void generateValue (const UniformLayoutEntry& entry, void* basePtr, de::Random& rnd)
576 {
577 	glu::DataType	scalarType		= glu::getDataTypeScalarType(entry.type);
578 	int				scalarSize		= glu::getDataTypeScalarSize(entry.type);
579 	bool			isMatrix		= glu::isDataTypeMatrix(entry.type);
580 	int				numVecs			= isMatrix ? (entry.isRowMajor ? glu::getDataTypeMatrixNumRows(entry.type) : glu::getDataTypeMatrixNumColumns(entry.type)) : 1;
581 	int				vecSize			= scalarSize / numVecs;
582 	bool			isArray			= entry.size > 1;
583 	const int		compSize		= sizeof(deUint32);
584 
585 	DE_ASSERT(scalarSize%numVecs == 0);
586 
587 	for (int elemNdx = 0; elemNdx < entry.size; elemNdx++)
588 	{
589 		deUint8* elemPtr = (deUint8*)basePtr + entry.offset + (isArray ? elemNdx*entry.arrayStride : 0);
590 
591 		for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
592 		{
593 			deUint8* vecPtr = elemPtr + (isMatrix ? vecNdx*entry.matrixStride : 0);
594 
595 			for (int compNdx = 0; compNdx < vecSize; compNdx++)
596 			{
597 				deUint8* compPtr = vecPtr + compSize*compNdx;
598 
599 				switch (scalarType)
600 				{
601 					case glu::TYPE_FLOAT:	*((float*)compPtr)		= (float)rnd.getInt(-9, 9);						break;
602 					case glu::TYPE_INT:		*((int*)compPtr)		= rnd.getInt(-9, 9);							break;
603 					case glu::TYPE_UINT:	*((deUint32*)compPtr)	= (deUint32)rnd.getInt(0, 9);					break;
604 					// \note Random bit pattern is used for true values. Spec states that all non-zero values are
605 					//       interpreted as true but some implementations fail this.
606 					case glu::TYPE_BOOL:	*((deUint32*)compPtr)	= rnd.getBool() ? rnd.getUint32()|1u : 0u;		break;
607 					default:
608 						DE_ASSERT(false);
609 				}
610 			}
611 		}
612 	}
613 }
614 
generateValues(const UniformLayout & layout,const std::map<int,void * > & blockPointers,deUint32 seed)615 void generateValues (const UniformLayout& layout, const std::map<int, void*>& blockPointers, deUint32 seed)
616 {
617 	de::Random	rnd			(seed);
618 	int			numBlocks	= (int)layout.blocks.size();
619 
620 	for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
621 	{
622 		void*	basePtr		= blockPointers.find(blockNdx)->second;
623 		int		numEntries	= (int)layout.blocks[blockNdx].activeUniformIndices.size();
624 
625 		for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
626 		{
627 			const UniformLayoutEntry& entry = layout.uniforms[layout.blocks[blockNdx].activeUniformIndices[entryNdx]];
628 			generateValue(entry, basePtr, rnd);
629 		}
630 	}
631 }
632 
633 // Shader generator.
634 
getCompareFuncForType(glu::DataType type)635 const char* getCompareFuncForType (glu::DataType type)
636 {
637 	switch (type)
638 	{
639 		case glu::TYPE_FLOAT:			return "mediump float compare_float    (highp float a, highp float b)  { return abs(a - b) < 0.05 ? 1.0 : 0.0; }\n";
640 		case glu::TYPE_FLOAT_VEC2:		return "mediump float compare_vec2     (highp vec2 a, highp vec2 b)    { return compare_float(a.x, b.x)*compare_float(a.y, b.y); }\n";
641 		case glu::TYPE_FLOAT_VEC3:		return "mediump float compare_vec3     (highp vec3 a, highp vec3 b)    { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); }\n";
642 		case glu::TYPE_FLOAT_VEC4:		return "mediump float compare_vec4     (highp vec4 a, highp vec4 b)    { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z)*compare_float(a.w, b.w); }\n";
643 		case glu::TYPE_FLOAT_MAT2:		return "mediump float compare_mat2     (highp mat2 a, highp mat2 b)    { return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1]); }\n";
644 		case glu::TYPE_FLOAT_MAT2X3:	return "mediump float compare_mat2x3   (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); }\n";
645 		case glu::TYPE_FLOAT_MAT2X4:	return "mediump float compare_mat2x4   (highp mat2x4 a, highp mat2x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1]); }\n";
646 		case glu::TYPE_FLOAT_MAT3X2:	return "mediump float compare_mat3x2   (highp mat3x2 a, highp mat3x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2]); }\n";
647 		case glu::TYPE_FLOAT_MAT3:		return "mediump float compare_mat3     (highp mat3 a, highp mat3 b)    { return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2]); }\n";
648 		case glu::TYPE_FLOAT_MAT3X4:	return "mediump float compare_mat3x4   (highp mat3x4 a, highp mat3x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2]); }\n";
649 		case glu::TYPE_FLOAT_MAT4X2:	return "mediump float compare_mat4x2   (highp mat4x2 a, highp mat4x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2])*compare_vec2(a[3], b[3]); }\n";
650 		case glu::TYPE_FLOAT_MAT4X3:	return "mediump float compare_mat4x3   (highp mat4x3 a, highp mat4x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2])*compare_vec3(a[3], b[3]); }\n";
651 		case glu::TYPE_FLOAT_MAT4:		return "mediump float compare_mat4     (highp mat4 a, highp mat4 b)    { return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2])*compare_vec4(a[3], b[3]); }\n";
652 		case glu::TYPE_INT:				return "mediump float compare_int      (highp int a, highp int b)      { return a == b ? 1.0 : 0.0; }\n";
653 		case glu::TYPE_INT_VEC2:		return "mediump float compare_ivec2    (highp ivec2 a, highp ivec2 b)  { return a == b ? 1.0 : 0.0; }\n";
654 		case glu::TYPE_INT_VEC3:		return "mediump float compare_ivec3    (highp ivec3 a, highp ivec3 b)  { return a == b ? 1.0 : 0.0; }\n";
655 		case glu::TYPE_INT_VEC4:		return "mediump float compare_ivec4    (highp ivec4 a, highp ivec4 b)  { return a == b ? 1.0 : 0.0; }\n";
656 		case glu::TYPE_UINT:			return "mediump float compare_uint     (highp uint a, highp uint b)    { return a == b ? 1.0 : 0.0; }\n";
657 		case glu::TYPE_UINT_VEC2:		return "mediump float compare_uvec2    (highp uvec2 a, highp uvec2 b)  { return a == b ? 1.0 : 0.0; }\n";
658 		case glu::TYPE_UINT_VEC3:		return "mediump float compare_uvec3    (highp uvec3 a, highp uvec3 b)  { return a == b ? 1.0 : 0.0; }\n";
659 		case glu::TYPE_UINT_VEC4:		return "mediump float compare_uvec4    (highp uvec4 a, highp uvec4 b)  { return a == b ? 1.0 : 0.0; }\n";
660 		case glu::TYPE_BOOL:			return "mediump float compare_bool     (bool a, bool b)                { return a == b ? 1.0 : 0.0; }\n";
661 		case glu::TYPE_BOOL_VEC2:		return "mediump float compare_bvec2    (bvec2 a, bvec2 b)              { return a == b ? 1.0 : 0.0; }\n";
662 		case glu::TYPE_BOOL_VEC3:		return "mediump float compare_bvec3    (bvec3 a, bvec3 b)              { return a == b ? 1.0 : 0.0; }\n";
663 		case glu::TYPE_BOOL_VEC4:		return "mediump float compare_bvec4    (bvec4 a, bvec4 b)              { return a == b ? 1.0 : 0.0; }\n";
664 		default:
665 			DE_ASSERT(false);
666 			return DE_NULL;
667 	}
668 }
669 
getCompareDependencies(std::set<glu::DataType> & compareFuncs,glu::DataType basicType)670 void getCompareDependencies (std::set<glu::DataType>& compareFuncs, glu::DataType basicType)
671 {
672 	switch (basicType)
673 	{
674 		case glu::TYPE_FLOAT_VEC2:
675 		case glu::TYPE_FLOAT_VEC3:
676 		case glu::TYPE_FLOAT_VEC4:
677 			compareFuncs.insert(glu::TYPE_FLOAT);
678 			compareFuncs.insert(basicType);
679 			break;
680 
681 		case glu::TYPE_FLOAT_MAT2:
682 		case glu::TYPE_FLOAT_MAT2X3:
683 		case glu::TYPE_FLOAT_MAT2X4:
684 		case glu::TYPE_FLOAT_MAT3X2:
685 		case glu::TYPE_FLOAT_MAT3:
686 		case glu::TYPE_FLOAT_MAT3X4:
687 		case glu::TYPE_FLOAT_MAT4X2:
688 		case glu::TYPE_FLOAT_MAT4X3:
689 		case glu::TYPE_FLOAT_MAT4:
690 			compareFuncs.insert(glu::TYPE_FLOAT);
691 			compareFuncs.insert(glu::getDataTypeFloatVec(glu::getDataTypeMatrixNumRows(basicType)));
692 			compareFuncs.insert(basicType);
693 			break;
694 
695 		default:
696 			compareFuncs.insert(basicType);
697 			break;
698 	}
699 }
700 
collectUniqueBasicTypes(std::set<glu::DataType> & basicTypes,const VarType & type)701 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const VarType& type)
702 {
703 	if (type.isStructType())
704 	{
705 		for (StructType::ConstIterator iter = type.getStruct().begin(); iter != type.getStruct().end(); ++iter)
706 			collectUniqueBasicTypes(basicTypes, iter->getType());
707 	}
708 	else if (type.isArrayType())
709 		collectUniqueBasicTypes(basicTypes, type.getElementType());
710 	else
711 	{
712 		DE_ASSERT(type.isBasicType());
713 		basicTypes.insert(type.getBasicType());
714 	}
715 }
716 
collectUniqueBasicTypes(std::set<glu::DataType> & basicTypes,const UniformBlock & uniformBlock)717 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const UniformBlock& uniformBlock)
718 {
719 	for (UniformBlock::ConstIterator iter = uniformBlock.begin(); iter != uniformBlock.end(); ++iter)
720 		collectUniqueBasicTypes(basicTypes, iter->getType());
721 }
722 
collectUniqueBasicTypes(std::set<glu::DataType> & basicTypes,const ShaderInterface & interface)723 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const ShaderInterface& interface)
724 {
725 	for (int ndx = 0; ndx < interface.getNumUniformBlocks(); ++ndx)
726 		collectUniqueBasicTypes(basicTypes, interface.getUniformBlock(ndx));
727 }
728 
generateCompareFuncs(std::ostream & str,const ShaderInterface & interface)729 void generateCompareFuncs (std::ostream& str, const ShaderInterface& interface)
730 {
731 	std::set<glu::DataType> types;
732 	std::set<glu::DataType> compareFuncs;
733 
734 	// Collect unique basic types
735 	collectUniqueBasicTypes(types, interface);
736 
737 	// Set of compare functions required
738 	for (std::set<glu::DataType>::const_iterator iter = types.begin(); iter != types.end(); ++iter)
739 	{
740 		getCompareDependencies(compareFuncs, *iter);
741 	}
742 
743 	for (int type = 0; type < glu::TYPE_LAST; ++type)
744 	{
745 		if (compareFuncs.find(glu::DataType(type)) != compareFuncs.end())
746 			str << getCompareFuncForType(glu::DataType(type));
747 	}
748 }
749 
750 struct Indent
751 {
752 	int level;
Indentvkt::ubo::__anon0393f7100111::Indent753 	Indent (int level_) : level(level_) {}
754 };
755 
operator <<(std::ostream & str,const Indent & indent)756 std::ostream& operator<< (std::ostream& str, const Indent& indent)
757 {
758 	for (int i = 0; i < indent.level; i++)
759 		str << "\t";
760 	return str;
761 }
762 
763 void		generateDeclaration			(std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints, deUint32 flagsMask, deUint32 offset);
764 void		generateDeclaration			(std::ostringstream& src, const Uniform& uniform, int indentLevel, deUint32 offset);
765 void		generateDeclaration			(std::ostringstream& src, const StructType& structType, int indentLevel);
766 
767 void		generateLocalDeclaration	(std::ostringstream& src, const StructType& structType, int indentLevel);
768 void		generateFullDeclaration		(std::ostringstream& src, const StructType& structType, int indentLevel);
769 
generateDeclaration(std::ostringstream & src,const StructType & structType,int indentLevel)770 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
771 {
772 	DE_ASSERT(structType.hasTypeName());
773 	generateFullDeclaration(src, structType, indentLevel);
774 	src << ";\n";
775 }
776 
generateFullDeclaration(std::ostringstream & src,const StructType & structType,int indentLevel)777 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
778 {
779 	src << "struct";
780 	if (structType.hasTypeName())
781 		src << " " << structType.getTypeName();
782 	src << "\n" << Indent(indentLevel) << "{\n";
783 
784 	for (StructType::ConstIterator memberIter = structType.begin(); memberIter != structType.end(); memberIter++)
785 	{
786 		src << Indent(indentLevel + 1);
787 		generateDeclaration(src, memberIter->getType(), memberIter->getName(), indentLevel + 1, memberIter->getFlags() & UNUSED_BOTH, ~LAYOUT_OFFSET, 0u);
788 	}
789 
790 	src << Indent(indentLevel) << "}";
791 }
792 
generateLocalDeclaration(std::ostringstream & src,const StructType & structType,int)793 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int /* indentLevel */)
794 {
795 	src << structType.getTypeName();
796 }
797 
generateLayoutAndPrecisionDeclaration(std::ostringstream & src,deUint32 flags,deUint32 offset)798 void generateLayoutAndPrecisionDeclaration (std::ostringstream& src, deUint32 flags, deUint32 offset)
799 {
800 	if ((flags & LAYOUT_MASK) != 0)
801 		src << "layout(" << LayoutFlagsFmt(flags & LAYOUT_MASK, offset) << ") ";
802 
803 	if ((flags & PRECISION_MASK) != 0)
804 		src << PrecisionFlagsFmt(flags & PRECISION_MASK) << " ";
805 }
806 
generateDeclaration(std::ostringstream & src,const VarType & type,const std::string & name,int indentLevel,deUint32 unusedHints,deUint32 flagsMask,deUint32 offset)807 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints, deUint32 flagsMask, deUint32 offset)
808 {
809 	generateLayoutAndPrecisionDeclaration(src, type.getFlags() & flagsMask, offset);
810 
811 	if (type.isBasicType())
812 		src << glu::getDataTypeName(type.getBasicType()) << " " << name;
813 	else if (type.isArrayType())
814 	{
815 		std::vector<int>	arraySizes;
816 		const VarType*		curType		= &type;
817 		while (curType->isArrayType())
818 		{
819 			arraySizes.push_back(curType->getArraySize());
820 			curType = &curType->getElementType();
821 		}
822 
823 		generateLayoutAndPrecisionDeclaration(src, curType->getFlags() & flagsMask, offset);
824 
825 		if (curType->isBasicType())
826 			src << glu::getDataTypeName(curType->getBasicType());
827 		else
828 		{
829 			DE_ASSERT(curType->isStructType());
830 			generateLocalDeclaration(src, curType->getStruct(), indentLevel+1);
831 		}
832 
833 		src << " " << name;
834 
835 		for (std::vector<int>::const_iterator sizeIter = arraySizes.begin(); sizeIter != arraySizes.end(); sizeIter++)
836 			src << "[" << *sizeIter << "]";
837 	}
838 	else
839 	{
840 		generateLocalDeclaration(src, type.getStruct(), indentLevel+1);
841 		src << " " << name;
842 	}
843 
844 	src << ";";
845 
846 	// Print out unused hints.
847 	if (unusedHints != 0)
848 		src << " // unused in " << (unusedHints == UNUSED_BOTH		? "both shaders"	:
849 									unusedHints == UNUSED_VERTEX	? "vertex shader"	:
850 									unusedHints == UNUSED_FRAGMENT	? "fragment shader" : "???");
851 
852 	src << "\n";
853 }
854 
generateDeclaration(std::ostringstream & src,const Uniform & uniform,int indentLevel,deUint32 offset)855 void generateDeclaration (std::ostringstream& src, const Uniform& uniform, int indentLevel, deUint32 offset)
856 {
857 	if ((uniform.getFlags() & LAYOUT_MASK) != 0)
858 		src << "layout(" << LayoutFlagsFmt(uniform.getFlags() & LAYOUT_MASK) << ") ";
859 
860 	generateDeclaration(src, uniform.getType(), uniform.getName(), indentLevel, uniform.getFlags() & UNUSED_BOTH, ~0u, offset);
861 }
862 
getBlockMemberOffset(int blockNdx,const UniformBlock & block,const Uniform & uniform,const UniformLayout & layout)863 deUint32 getBlockMemberOffset (int blockNdx, const UniformBlock& block, const Uniform& uniform, const UniformLayout& layout)
864 {
865 	std::ostringstream	name;
866 	const VarType*		curType = &uniform.getType();
867 
868 	if (block.getInstanceName().length() != 0)
869 		name << block.getBlockName() << ".";	// \note UniformLayoutEntry uses block name rather than instance name
870 
871 	name << uniform.getName();
872 
873 	while (!curType->isBasicType())
874 	{
875 		if (curType->isArrayType())
876 		{
877 			name << "[0]";
878 			curType = &curType->getElementType();
879 		}
880 
881 		if (curType->isStructType())
882 		{
883 			const StructType::ConstIterator firstMember = curType->getStruct().begin();
884 			name << "." << firstMember->getName();
885 			curType = &firstMember->getType();
886 		}
887 	}
888 
889 	const int uniformNdx = layout.getUniformLayoutIndex(blockNdx, name.str());
890 	DE_ASSERT(uniformNdx >= 0);
891 
892 	return layout.uniforms[uniformNdx].offset;
893 }
894 
895 template<typename T>
semiShuffle(std::vector<T> & v)896 void semiShuffle (std::vector<T>& v)
897 {
898 	const std::vector<T>	src	= v;
899 	int						i	= -1;
900 	int						n	= static_cast<int>(src.size());
901 
902 	v.clear();
903 
904 	while (n)
905 	{
906 		i += n;
907 		v.push_back(src[i]);
908 		n = (n > 0 ? 1 - n : -1 - n);
909 	}
910 }
911 
912 template<typename T>
913 //! \note Stores pointers to original elements
914 class Traverser
915 {
916 public:
917 	template<typename Iter>
Traverser(const Iter beg,const Iter end,const bool shuffled)918 	Traverser (const Iter beg, const Iter end, const bool shuffled)
919 	{
920 		for (Iter it = beg; it != end; ++it)
921 			m_elements.push_back(&(*it));
922 
923 		if (shuffled)
924 			semiShuffle(m_elements);
925 
926 		m_next = m_elements.begin();
927 	}
928 
next(void)929 	T* next (void)
930 	{
931 		if (m_next != m_elements.end())
932 			return *m_next++;
933 		else
934 			return DE_NULL;
935 	}
936 
937 private:
938 	typename std::vector<T*>					m_elements;
939 	typename std::vector<T*>::const_iterator	m_next;
940 };
941 
generateDeclaration(std::ostringstream & src,int blockNdx,const UniformBlock & block,const UniformLayout & layout,bool shuffleUniformMembers)942 void generateDeclaration (std::ostringstream& src, int blockNdx, const UniformBlock& block, const UniformLayout& layout, bool shuffleUniformMembers)
943 {
944 	src << "layout(set = 0, binding = " << blockNdx;
945 	if ((block.getFlags() & LAYOUT_MASK) != 0)
946 		src << ", " << LayoutFlagsFmt(block.getFlags() & LAYOUT_MASK);
947 	src << ") ";
948 
949 	src << "uniform " << block.getBlockName();
950 	src << "\n{\n";
951 
952 	Traverser<const Uniform> uniforms(block.begin(), block.end(), shuffleUniformMembers);
953 
954 	while (const Uniform* pUniform = uniforms.next())
955 	{
956 		src << Indent(1);
957 		generateDeclaration(src, *pUniform, 1 /* indent level */, getBlockMemberOffset(blockNdx, block, *pUniform, layout));
958 	}
959 
960 	src << "}";
961 
962 	if (block.hasInstanceName())
963 	{
964 		src << " " << block.getInstanceName();
965 		if (block.isArray())
966 			src << "[" << block.getArraySize() << "]";
967 	}
968 	else
969 		DE_ASSERT(!block.isArray());
970 
971 	src << ";\n";
972 }
973 
generateValueSrc(std::ostringstream & src,const UniformLayoutEntry & entry,const void * basePtr,int elementNdx)974 void generateValueSrc (std::ostringstream& src, const UniformLayoutEntry& entry, const void* basePtr, int elementNdx)
975 {
976 	glu::DataType	scalarType		= glu::getDataTypeScalarType(entry.type);
977 	int				scalarSize		= glu::getDataTypeScalarSize(entry.type);
978 	bool			isArray			= entry.size > 1;
979 	const deUint8*	elemPtr			= (const deUint8*)basePtr + entry.offset + (isArray ? elementNdx * entry.arrayStride : 0);
980 	const int		compSize		= sizeof(deUint32);
981 
982 	if (scalarSize > 1)
983 		src << glu::getDataTypeName(entry.type) << "(";
984 
985 	if (glu::isDataTypeMatrix(entry.type))
986 	{
987 		int	numRows	= glu::getDataTypeMatrixNumRows(entry.type);
988 		int	numCols	= glu::getDataTypeMatrixNumColumns(entry.type);
989 
990 		DE_ASSERT(scalarType == glu::TYPE_FLOAT);
991 
992 		// Constructed in column-wise order.
993 		for (int colNdx = 0; colNdx < numCols; colNdx++)
994 		{
995 			for (int rowNdx = 0; rowNdx < numRows; rowNdx++)
996 			{
997 				const deUint8*	compPtr	= elemPtr + (entry.isRowMajor ? (rowNdx * entry.matrixStride + colNdx * compSize)
998 																	  : (colNdx * entry.matrixStride + rowNdx * compSize));
999 
1000 				if (colNdx > 0 || rowNdx > 0)
1001 					src << ", ";
1002 
1003 				src << de::floatToString(*((const float*)compPtr), 1);
1004 			}
1005 		}
1006 	}
1007 	else
1008 	{
1009 		for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
1010 		{
1011 			const deUint8* compPtr = elemPtr + scalarNdx * compSize;
1012 
1013 			if (scalarNdx > 0)
1014 				src << ", ";
1015 
1016 			switch (scalarType)
1017 			{
1018 				case glu::TYPE_FLOAT:	src << de::floatToString(*((const float*)compPtr), 1);			break;
1019 				case glu::TYPE_INT:		src << *((const int*)compPtr);									break;
1020 				case glu::TYPE_UINT:	src << *((const deUint32*)compPtr) << "u";						break;
1021 				case glu::TYPE_BOOL:	src << (*((const deUint32*)compPtr) != 0u ? "true" : "false");	break;
1022 				default:
1023 					DE_ASSERT(false);
1024 			}
1025 		}
1026 	}
1027 
1028 	if (scalarSize > 1)
1029 		src << ")";
1030 }
1031 
isMatrix(glu::DataType elementType)1032 bool isMatrix (glu::DataType elementType)
1033 {
1034 	return (elementType >= glu::TYPE_FLOAT_MAT2) && (elementType <= glu::TYPE_FLOAT_MAT4);
1035 }
1036 
writeMatrixTypeSrc(int columnCount,int rowCount,std::string compare,std::string compareType,std::ostringstream & src,const std::string & srcName,const void * basePtr,const UniformLayoutEntry & entry,bool vector)1037 void writeMatrixTypeSrc (int						columnCount,
1038 						 int						rowCount,
1039 						 std::string				compare,
1040 						 std::string				compareType,
1041 						 std::ostringstream&		src,
1042 						 const std::string&			srcName,
1043 						 const void*				basePtr,
1044 						 const UniformLayoutEntry&	entry,
1045 						 bool						vector)
1046 {
1047 	if (vector)	// generateTestSrcMatrixPerVec
1048 	{
1049 		for (int colNdex = 0; colNdex < columnCount; colNdex++)
1050 		{
1051 			src << "\tresult *= " << compare + compareType << "(" << srcName << "[" << colNdex << "], ";
1052 
1053 			if (glu::isDataTypeMatrix(entry.type))
1054 			{
1055 				int	scalarSize = glu::getDataTypeScalarSize(entry.type);
1056 				const deUint8*	elemPtr			= (const deUint8*)basePtr + entry.offset;
1057 				const int		compSize		= sizeof(deUint32);
1058 
1059 				if (scalarSize > 1)
1060 					src << compareType << "(";
1061 				for (int rowNdex = 0; rowNdex < rowCount; rowNdex++)
1062 				{
1063 					const deUint8*	compPtr	= elemPtr + (entry.isRowMajor ? (rowNdex * entry.matrixStride + colNdex * compSize)
1064 																		  : (colNdex * entry.matrixStride + rowNdex * compSize));
1065 					src << de::floatToString(*((const float*)compPtr), 1);
1066 
1067 					if (rowNdex < rowCount-1)
1068 						src << ", ";
1069 				}
1070 				src << "));\n";
1071 			}
1072 			else
1073 			{
1074 				generateValueSrc(src, entry, basePtr, 0);
1075 				src << "[" << colNdex << "]);\n";
1076 			}
1077 		}
1078 	}
1079 	else		// generateTestSrcMatrixPerElement
1080 	{
1081 		for (int colNdex = 0; colNdex < columnCount; colNdex++)
1082 		{
1083 			for (int rowNdex = 0; rowNdex < rowCount; rowNdex++)
1084 			{
1085 				src << "\tresult *= " << compare + compareType << "(" << srcName << "[" << colNdex << "][" << rowNdex << "], ";
1086 				if (glu::isDataTypeMatrix(entry.type))
1087 				{
1088 					const deUint8*	elemPtr			= (const deUint8*)basePtr + entry.offset;
1089 					const int		compSize		= sizeof(deUint32);
1090 					const deUint8*	compPtr	= elemPtr + (entry.isRowMajor ? (rowNdex * entry.matrixStride + colNdex * compSize)
1091 																		  : (colNdex * entry.matrixStride + rowNdex * compSize));
1092 
1093 					src << de::floatToString(*((const float*)compPtr), 1) << ");\n";
1094 				}
1095 				else
1096 				{
1097 					generateValueSrc(src, entry, basePtr, 0);
1098 					src << "[" << colNdex << "][" << rowNdex << "]);\n";
1099 				}
1100 			}
1101 		}
1102 	}
1103 }
1104 
generateTestSrcMatrixPerVec(glu::DataType elementType,std::ostringstream & src,const std::string & srcName,const void * basePtr,const UniformLayoutEntry & entry,bool vector)1105 void generateTestSrcMatrixPerVec (glu::DataType				elementType,
1106 								  std::ostringstream&		src,
1107 								  const std::string&		srcName,
1108 								  const void*				basePtr,
1109 								  const UniformLayoutEntry&	entry,
1110 								  bool						vector)
1111 {
1112 	std::string compare = "compare_";
1113 	switch (elementType)
1114 	{
1115 		case glu::TYPE_FLOAT_MAT2:
1116 			writeMatrixTypeSrc(2, 2, compare, "vec2", src, srcName, basePtr, entry, vector);
1117 			break;
1118 
1119 		case glu::TYPE_FLOAT_MAT2X3:
1120 			writeMatrixTypeSrc(2, 3, compare, "vec3", src, srcName, basePtr, entry, vector);
1121 			break;
1122 
1123 		case glu::TYPE_FLOAT_MAT2X4:
1124 			writeMatrixTypeSrc(2, 4, compare, "vec4", src, srcName, basePtr, entry, vector);
1125 			break;
1126 
1127 		case glu::TYPE_FLOAT_MAT3X4:
1128 			writeMatrixTypeSrc(3, 4, compare, "vec4", src, srcName, basePtr, entry, vector);
1129 			break;
1130 
1131 		case glu::TYPE_FLOAT_MAT4:
1132 			writeMatrixTypeSrc(4, 4, compare, "vec4", src, srcName, basePtr, entry, vector);
1133 			break;
1134 
1135 		case glu::TYPE_FLOAT_MAT4X2:
1136 			writeMatrixTypeSrc(4, 2, compare, "vec2", src, srcName, basePtr, entry, vector);
1137 			break;
1138 
1139 		case glu::TYPE_FLOAT_MAT4X3:
1140 			writeMatrixTypeSrc(4, 3, compare, "vec3", src, srcName, basePtr, entry, vector);
1141 			break;
1142 
1143 		default:
1144 			break;
1145 	}
1146 }
1147 
generateTestSrcMatrixPerElement(glu::DataType elementType,std::ostringstream & src,const std::string & srcName,const void * basePtr,const UniformLayoutEntry & entry,bool vector)1148 void generateTestSrcMatrixPerElement (glu::DataType				elementType,
1149 									  std::ostringstream&		src,
1150 									  const std::string&		srcName,
1151 									  const void*				basePtr,
1152 									  const UniformLayoutEntry&	entry,
1153 									  bool						vector)
1154 {
1155 	std::string compare = "compare_";
1156 	std::string compareType = "float";
1157 	switch (elementType)
1158 	{
1159 		case glu::TYPE_FLOAT_MAT2:
1160 			writeMatrixTypeSrc(2, 2, compare, compareType, src, srcName, basePtr, entry, vector);
1161 			break;
1162 
1163 		case glu::TYPE_FLOAT_MAT2X3:
1164 			writeMatrixTypeSrc(2, 3, compare, compareType, src, srcName, basePtr, entry, vector);
1165 			break;
1166 
1167 		case glu::TYPE_FLOAT_MAT2X4:
1168 			writeMatrixTypeSrc(2, 4, compare, compareType, src, srcName, basePtr, entry, vector);
1169 			break;
1170 
1171 		case glu::TYPE_FLOAT_MAT3X4:
1172 			writeMatrixTypeSrc(3, 4, compare, compareType, src, srcName, basePtr, entry, vector);
1173 			break;
1174 
1175 		case glu::TYPE_FLOAT_MAT4:
1176 			writeMatrixTypeSrc(4, 4, compare, compareType, src, srcName, basePtr, entry, vector);
1177 			break;
1178 
1179 		case glu::TYPE_FLOAT_MAT4X2:
1180 			writeMatrixTypeSrc(4, 2, compare, compareType, src, srcName, basePtr, entry, vector);
1181 			break;
1182 
1183 		case glu::TYPE_FLOAT_MAT4X3:
1184 			writeMatrixTypeSrc(4, 3, compare, compareType, src, srcName, basePtr, entry, vector);
1185 			break;
1186 
1187 		default:
1188 			break;
1189 	}
1190 }
1191 
generateSingleCompare(std::ostringstream & src,glu::DataType elementType,const std::string & srcName,const void * basePtr,const UniformLayoutEntry & entry,MatrixLoadFlags matrixLoadFlag)1192 void generateSingleCompare (std::ostringstream&			src,
1193 							glu::DataType				elementType,
1194 							const std::string&			srcName,
1195 							const void*					basePtr,
1196 							const UniformLayoutEntry&	entry,
1197 							MatrixLoadFlags				matrixLoadFlag)
1198 {
1199 	if (matrixLoadFlag == LOAD_FULL_MATRIX)
1200 	{
1201 		const char* typeName = glu::getDataTypeName(elementType);
1202 
1203 		src << "\tresult *= compare_" << typeName << "(" << srcName << ", ";
1204 		generateValueSrc(src, entry, basePtr, 0);
1205 		src << ");\n";
1206 	}
1207 	else
1208 	{
1209 		if (isMatrix(elementType))
1210 		{
1211 			generateTestSrcMatrixPerVec		(elementType, src, srcName, basePtr, entry, true);
1212 			generateTestSrcMatrixPerElement	(elementType, src, srcName, basePtr, entry, false);
1213 		}
1214 	}
1215 }
1216 
generateCompareSrc(std::ostringstream & src,const char * resultVar,const VarType & type,const std::string & srcName,const std::string & apiName,const UniformLayout & layout,int blockNdx,const void * basePtr,deUint32 unusedMask,MatrixLoadFlags matrixLoadFlag)1217 void generateCompareSrc (std::ostringstream&	src,
1218 						 const char*			resultVar,
1219 						 const VarType&			type,
1220 						 const std::string&		srcName,
1221 						 const std::string&		apiName,
1222 						 const UniformLayout&	layout,
1223 						 int					blockNdx,
1224 						 const void*			basePtr,
1225 						 deUint32				unusedMask,
1226 						 MatrixLoadFlags		matrixLoadFlag)
1227 {
1228 	if (type.isBasicType() || (type.isArrayType() && type.getElementType().isBasicType()))
1229 	{
1230 		// Basic type or array of basic types.
1231 		bool						isArray			= type.isArrayType();
1232 		glu::DataType				elementType		= isArray ? type.getElementType().getBasicType() : type.getBasicType();
1233 		const char*					typeName		= glu::getDataTypeName(elementType);
1234 		std::string					fullApiName		= std::string(apiName) + (isArray ? "[0]" : ""); // Arrays are always postfixed with [0]
1235 		int							uniformNdx		= layout.getUniformLayoutIndex(blockNdx, fullApiName);
1236 		const UniformLayoutEntry&	entry			= layout.uniforms[uniformNdx];
1237 
1238 		if (isArray)
1239 		{
1240 			for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
1241 			{
1242 				src << "\tresult *= compare_" << typeName << "(" << srcName << "[" << elemNdx << "], ";
1243 				generateValueSrc(src, entry, basePtr, elemNdx);
1244 				src << ");\n";
1245 			}
1246 		}
1247 		else
1248 		{
1249 			generateSingleCompare(src, elementType, srcName, basePtr, entry, matrixLoadFlag);
1250 		}
1251 	}
1252 	else if (type.isArrayType())
1253 	{
1254 		const VarType& elementType = type.getElementType();
1255 
1256 		for (int elementNdx = 0; elementNdx < type.getArraySize(); elementNdx++)
1257 		{
1258 			std::string op = std::string("[") + de::toString(elementNdx) + "]";
1259 			std::string elementSrcName = std::string(srcName) + op;
1260 			std::string elementApiName = std::string(apiName) + op;
1261 			generateCompareSrc(src, resultVar, elementType, elementSrcName, elementApiName, layout, blockNdx, basePtr, unusedMask, LOAD_FULL_MATRIX);
1262 		}
1263 	}
1264 	else
1265 	{
1266 		DE_ASSERT(type.isStructType());
1267 
1268 		for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
1269 		{
1270 			if (memberIter->getFlags() & unusedMask)
1271 				continue; // Skip member.
1272 
1273 			std::string op = std::string(".") + memberIter->getName();
1274 			std::string memberSrcName = std::string(srcName) + op;
1275 			std::string memberApiName = std::string(apiName) + op;
1276 			generateCompareSrc(src, resultVar, memberIter->getType(), memberSrcName, memberApiName, layout, blockNdx, basePtr, unusedMask, LOAD_FULL_MATRIX);
1277 		}
1278 	}
1279 }
1280 
generateCompareSrc(std::ostringstream & src,const char * resultVar,const ShaderInterface & interface,const UniformLayout & layout,const std::map<int,void * > & blockPointers,bool isVertex,MatrixLoadFlags matrixLoadFlag)1281 void generateCompareSrc (std::ostringstream& src,
1282 						 const char* resultVar,
1283 						 const ShaderInterface& interface,
1284 						 const UniformLayout& layout,
1285 						 const std::map<int,
1286 						 void*>& blockPointers,
1287 						 bool isVertex,
1288 						 MatrixLoadFlags matrixLoadFlag)
1289 {
1290 	deUint32 unusedMask = isVertex ? UNUSED_VERTEX : UNUSED_FRAGMENT;
1291 
1292 	for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1293 	{
1294 		const UniformBlock& block = interface.getUniformBlock(blockNdx);
1295 
1296 		if ((block.getFlags() & (isVertex ? DECLARE_VERTEX : DECLARE_FRAGMENT)) == 0)
1297 			continue; // Skip.
1298 
1299 		bool			hasInstanceName	= block.hasInstanceName();
1300 		bool			isArray			= block.isArray();
1301 		int				numInstances	= isArray ? block.getArraySize() : 1;
1302 		std::string		apiPrefix		= hasInstanceName ? block.getBlockName() + "." : std::string("");
1303 
1304 		DE_ASSERT(!isArray || hasInstanceName);
1305 
1306 		for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1307 		{
1308 			std::string		instancePostfix		= isArray ? std::string("[") + de::toString(instanceNdx) + "]" : std::string("");
1309 			std::string		blockInstanceName	= block.getBlockName() + instancePostfix;
1310 			std::string		srcPrefix			= hasInstanceName ? block.getInstanceName() + instancePostfix + "." : std::string("");
1311 			int				blockLayoutNdx		= layout.getBlockLayoutIndex(blockNdx, instanceNdx);
1312 			void*			basePtr				= blockPointers.find(blockLayoutNdx)->second;
1313 
1314 			for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
1315 			{
1316 				const Uniform& uniform = *uniformIter;
1317 
1318 				if (uniform.getFlags() & unusedMask)
1319 					continue; // Don't read from that uniform.
1320 
1321 				std::string srcName = srcPrefix + uniform.getName();
1322 				std::string apiName = apiPrefix + uniform.getName();
1323 				generateCompareSrc(src, resultVar, uniform.getType(), srcName, apiName, layout, blockNdx, basePtr, unusedMask, matrixLoadFlag);
1324 			}
1325 		}
1326 	}
1327 }
1328 
generateVertexShader(const ShaderInterface & interface,const UniformLayout & layout,const std::map<int,void * > & blockPointers,MatrixLoadFlags matrixLoadFlag,bool shuffleUniformMembers)1329 std::string generateVertexShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers, MatrixLoadFlags matrixLoadFlag, bool shuffleUniformMembers)
1330 {
1331 	std::ostringstream src;
1332 	src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n";
1333 
1334 	src << "layout(location = 0) in highp vec4 a_position;\n";
1335 	src << "layout(location = 0) out mediump float v_vtxResult;\n";
1336 	src << "\n";
1337 
1338 	std::vector<const StructType*> namedStructs;
1339 	interface.getNamedStructs(namedStructs);
1340 	for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1341 		generateDeclaration(src, **structIter, 0);
1342 
1343 	for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1344 	{
1345 		const UniformBlock& block = interface.getUniformBlock(blockNdx);
1346 		if (block.getFlags() & DECLARE_VERTEX)
1347 			generateDeclaration(src, blockNdx, block, layout, shuffleUniformMembers);
1348 	}
1349 
1350 	// Comparison utilities.
1351 	src << "\n";
1352 	generateCompareFuncs(src, interface);
1353 
1354 	src << "\n"
1355 		   "void main (void)\n"
1356 		   "{\n"
1357 		   "	gl_Position = a_position;\n"
1358 		   "	mediump float result = 1.0;\n";
1359 
1360 	// Value compare.
1361 	generateCompareSrc(src, "result", interface, layout, blockPointers, true, matrixLoadFlag);
1362 
1363 	src << "	v_vtxResult = result;\n"
1364 		   "}\n";
1365 
1366 	return src.str();
1367 }
1368 
generateFragmentShader(const ShaderInterface & interface,const UniformLayout & layout,const std::map<int,void * > & blockPointers,MatrixLoadFlags matrixLoadFlag,bool shuffleUniformMembers)1369 std::string generateFragmentShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers, MatrixLoadFlags matrixLoadFlag, bool shuffleUniformMembers)
1370 {
1371 	std::ostringstream src;
1372 	src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_450) << "\n";
1373 
1374 	src << "layout(location = 0) in mediump float v_vtxResult;\n";
1375 	src << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
1376 	src << "\n";
1377 
1378 	std::vector<const StructType*> namedStructs;
1379 	interface.getNamedStructs(namedStructs);
1380 	for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1381 		generateDeclaration(src, **structIter, 0);
1382 
1383 	for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1384 	{
1385 		const UniformBlock& block = interface.getUniformBlock(blockNdx);
1386 		if (block.getFlags() & DECLARE_FRAGMENT)
1387 			generateDeclaration(src, blockNdx, block, layout, shuffleUniformMembers);
1388 	}
1389 
1390 	// Comparison utilities.
1391 	src << "\n";
1392 	generateCompareFuncs(src, interface);
1393 
1394 	src << "\n"
1395 		   "void main (void)\n"
1396 		   "{\n"
1397 		   "	mediump float result = 1.0;\n";
1398 
1399 	// Value compare.
1400 	generateCompareSrc(src, "result", interface, layout, blockPointers, false, matrixLoadFlag);
1401 
1402 	src << "	dEQP_FragColor = vec4(1.0, v_vtxResult, result, 1.0);\n"
1403 		   "}\n";
1404 
1405 	return src.str();
1406 }
1407 
createBuffer(Context & context,VkDeviceSize bufferSize,vk::VkBufferUsageFlags usageFlags)1408 Move<VkBuffer> createBuffer (Context& context, VkDeviceSize bufferSize, vk::VkBufferUsageFlags usageFlags)
1409 {
1410 	const VkDevice				vkDevice			= context.getDevice();
1411 	const DeviceInterface&		vk					= context.getDeviceInterface();
1412 	const deUint32				queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
1413 
1414 	const VkBufferCreateInfo	bufferInfo			=
1415 	{
1416 		VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,	// VkStructureType		sType;
1417 		DE_NULL,								// const void*			pNext;
1418 		0u,										// VkBufferCreateFlags	flags;
1419 		bufferSize,								// VkDeviceSize			size;
1420 		usageFlags,								// VkBufferUsageFlags	usage;
1421 		VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode		sharingMode;
1422 		1u,										// deUint32				queueFamilyIndexCount;
1423 		&queueFamilyIndex						// const deUint32*		pQueueFamilyIndices;
1424 	};
1425 
1426 	return vk::createBuffer(vk, vkDevice, &bufferInfo);
1427 }
1428 
createImage2D(Context & context,deUint32 width,deUint32 height,vk::VkFormat format,vk::VkImageTiling tiling,vk::VkImageUsageFlags usageFlags)1429 Move<vk::VkImage> createImage2D (Context& context, deUint32 width, deUint32 height, vk::VkFormat format, vk::VkImageTiling tiling, vk::VkImageUsageFlags usageFlags)
1430 {
1431 	const deUint32				queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
1432 	const vk::VkImageCreateInfo	params				=
1433 	{
1434 		vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO,	// VkStructureType			sType
1435 		DE_NULL,									// const void*				pNext
1436 		0u,											// VkImageCreateFlags		flags
1437 		vk::VK_IMAGE_TYPE_2D,						// VkImageType				imageType
1438 		format,										// VkFormat					format
1439 		{ width, height, 1u },						// VkExtent3D				extent
1440 		1u,											// deUint32					mipLevels
1441 		1u,											// deUint32					arrayLayers
1442 		VK_SAMPLE_COUNT_1_BIT,						// VkSampleCountFlagBits	samples
1443 		tiling,										// VkImageTiling			tiling
1444 		usageFlags,									// VkImageUsageFlags		usage
1445 		vk::VK_SHARING_MODE_EXCLUSIVE,				// VkSharingMode			sharingMode
1446 		1u,											// deUint32					queueFamilyIndexCount
1447 		&queueFamilyIndex,							// const deUint32*			pQueueFamilyIndices
1448 		vk::VK_IMAGE_LAYOUT_UNDEFINED,				// VkImageLayout			initialLayout
1449 	};
1450 
1451 	return vk::createImage(context.getDeviceInterface(), context.getDevice(), &params);
1452 }
1453 
allocateAndBindMemory(Context & context,vk::VkBuffer buffer,vk::MemoryRequirement memReqs)1454 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
1455 {
1456 	const vk::DeviceInterface&		vkd		= context.getDeviceInterface();
1457 	const vk::VkMemoryRequirements	bufReqs	= vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
1458 	de::MovePtr<vk::Allocation>		memory	= context.getDefaultAllocator().allocate(bufReqs, memReqs);
1459 
1460 	vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
1461 
1462 	return memory;
1463 }
1464 
allocateAndBindMemory(Context & context,vk::VkImage image,vk::MemoryRequirement memReqs)1465 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkImage image, vk::MemoryRequirement memReqs)
1466 {
1467 	const vk::DeviceInterface&	  vkd	 = context.getDeviceInterface();
1468 	const vk::VkMemoryRequirements  imgReqs = vk::getImageMemoryRequirements(vkd, context.getDevice(), image);
1469 	de::MovePtr<vk::Allocation>		 memory  = context.getDefaultAllocator().allocate(imgReqs, memReqs);
1470 
1471 	vkd.bindImageMemory(context.getDevice(), image, memory->getMemory(), memory->getOffset());
1472 
1473 	return memory;
1474 }
1475 
createAttachmentView(Context & context,vk::VkImage image,vk::VkFormat format)1476 Move<vk::VkImageView> createAttachmentView (Context& context, vk::VkImage image, vk::VkFormat format)
1477 {
1478 	const vk::VkImageViewCreateInfo params =
1479 	{
1480 		vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,		// sType
1481 		DE_NULL,											// pNext
1482 		0u,													// flags
1483 		image,												// image
1484 		vk::VK_IMAGE_VIEW_TYPE_2D,							// viewType
1485 		format,												// format
1486 		vk::makeComponentMappingRGBA(),						// components
1487 		{ vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u,1u },	// subresourceRange
1488 	};
1489 
1490 	return vk::createImageView(context.getDeviceInterface(), context.getDevice(), &params);
1491 }
1492 
createPipelineLayout(Context & context,vk::VkDescriptorSetLayout descriptorSetLayout)1493 Move<vk::VkPipelineLayout> createPipelineLayout (Context& context, vk::VkDescriptorSetLayout descriptorSetLayout)
1494 {
1495 	const vk::VkPipelineLayoutCreateInfo params =
1496 	{
1497 		vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,	// sType
1498 		DE_NULL,											// pNext
1499 		0u,													// flags
1500 		1u,													// setLayoutCount
1501 		&descriptorSetLayout,								// pSetLayouts
1502 		0u,													// pushConstantRangeCount
1503 		DE_NULL,											// pPushConstantRanges
1504 	};
1505 
1506 	return vk::createPipelineLayout(context.getDeviceInterface(), context.getDevice(), &params);
1507 }
1508 
createCmdPool(Context & context)1509 Move<vk::VkCommandPool> createCmdPool (Context& context)
1510 {
1511 	const deUint32					queueFamilyIndex	= context.getUniversalQueueFamilyIndex();
1512 	const vk::VkCommandPoolCreateInfo	params				=
1513 	{
1514 		vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,			// sType
1515 		DE_NULL,												// pNext
1516 		vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT,	// flags
1517 		queueFamilyIndex,										// queueFamilyIndex
1518 	};
1519 
1520 	return vk::createCommandPool(context.getDeviceInterface(), context.getDevice(), &params);
1521 }
1522 
createCmdBuffer(Context & context,vk::VkCommandPool cmdPool)1523 Move<vk::VkCommandBuffer> createCmdBuffer (Context& context, vk::VkCommandPool cmdPool)
1524 {
1525 	const vk::VkCommandBufferAllocateInfo params =
1526 	{
1527 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,	// sType
1528 		DE_NULL,											// pNext
1529 		cmdPool,											// commandPool
1530 		vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY,				// level
1531 		1u,													// bufferCount
1532 	};
1533 
1534 	return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), &params);
1535 }
1536 
1537 
1538 // UniformBlockCaseInstance
1539 
1540 class UniformBlockCaseInstance : public vkt::TestInstance
1541 {
1542 public:
1543 									UniformBlockCaseInstance	(Context&						context,
1544 																 UniformBlockCase::BufferMode	bufferMode,
1545 																 const UniformLayout&			layout,
1546 																 const std::map<int, void*>&	blockPointers);
1547 	virtual							~UniformBlockCaseInstance	(void);
1548 	virtual tcu::TestStatus			iterate						(void);
1549 
1550 private:
1551 	enum
1552 	{
1553 		RENDER_WIDTH = 100,
1554 		RENDER_HEIGHT = 100,
1555 	};
1556 
1557 	vk::Move<VkRenderPass>			createRenderPass			(vk::VkFormat format) const;
1558 	vk::Move<VkFramebuffer>			createFramebuffer			(vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const;
1559 	vk::Move<VkDescriptorSetLayout>	createDescriptorSetLayout	(void) const;
1560 	vk::Move<VkDescriptorPool>		createDescriptorPool		(void) const;
1561 	vk::Move<VkPipeline>			createPipeline				(vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const;
1562 
1563 	vk::VkDescriptorBufferInfo		addUniformData				(deUint32 size, const void* dataPtr);
1564 
1565 	UniformBlockCase::BufferMode	m_bufferMode;
1566 	const UniformLayout&			m_layout;
1567 	const std::map<int, void*>&		m_blockPointers;
1568 
1569 	typedef de::SharedPtr<vk::Unique<vk::VkBuffer> >	VkBufferSp;
1570 	typedef de::SharedPtr<vk::Allocation>				AllocationSp;
1571 
1572 	std::vector<VkBufferSp>			m_uniformBuffers;
1573 	std::vector<AllocationSp>		m_uniformAllocs;
1574 };
1575 
UniformBlockCaseInstance(Context & ctx,UniformBlockCase::BufferMode bufferMode,const UniformLayout & layout,const std::map<int,void * > & blockPointers)1576 UniformBlockCaseInstance::UniformBlockCaseInstance (Context&						ctx,
1577 													UniformBlockCase::BufferMode	bufferMode,
1578 													const UniformLayout&			layout,
1579 													const std::map<int, void*>&		blockPointers)
1580 	: vkt::TestInstance (ctx)
1581 	, m_bufferMode		(bufferMode)
1582 	, m_layout			(layout)
1583 	, m_blockPointers	(blockPointers)
1584 {
1585 }
1586 
~UniformBlockCaseInstance(void)1587 UniformBlockCaseInstance::~UniformBlockCaseInstance (void)
1588 {
1589 }
1590 
iterate(void)1591 tcu::TestStatus UniformBlockCaseInstance::iterate (void)
1592 {
1593 	const vk::DeviceInterface&		vk					= m_context.getDeviceInterface();
1594 	const vk::VkDevice				device				= m_context.getDevice();
1595 	const vk::VkQueue				queue				= m_context.getUniversalQueue();
1596 	const deUint32					queueFamilyIndex	= m_context.getUniversalQueueFamilyIndex();
1597 
1598 	const float positions[] =
1599 	{
1600 		-1.0f, -1.0f, 0.0f, 1.0f,
1601 		-1.0f, +1.0f, 0.0f, 1.0f,
1602 		+1.0f, -1.0f, 0.0f, 1.0f,
1603 		+1.0f, +1.0f, 0.0f, 1.0f
1604 	};
1605 
1606 	const deUint32 indices[] = { 0, 1, 2, 2, 1, 3 };
1607 
1608 	vk::Unique<VkBuffer>				positionsBuffer		(createBuffer(m_context, sizeof(positions), vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1609 	de::UniquePtr<Allocation>			positionsAlloc		(allocateAndBindMemory(m_context, *positionsBuffer, MemoryRequirement::HostVisible));
1610 	vk::Unique<VkBuffer>				indicesBuffer		(createBuffer(m_context, sizeof(indices), vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1611 	de::UniquePtr<Allocation>			indicesAlloc		(allocateAndBindMemory(m_context, *indicesBuffer, MemoryRequirement::HostVisible));
1612 
1613 	int minUniformBufferOffsetAlignment = getminUniformBufferOffsetAlignment(m_context);
1614 
1615 	// Upload attrbiutes data
1616 	{
1617 		deMemcpy(positionsAlloc->getHostPtr(), positions, sizeof(positions));
1618 		flushMappedMemoryRange(vk, device, positionsAlloc->getMemory(), positionsAlloc->getOffset(), sizeof(positions));
1619 
1620 		deMemcpy(indicesAlloc->getHostPtr(), indices, sizeof(indices));
1621 		flushMappedMemoryRange(vk, device, indicesAlloc->getMemory(), indicesAlloc->getOffset(), sizeof(indices));
1622 	}
1623 
1624 	vk::Unique<VkImage>					colorImage			(createImage2D(m_context,
1625 																			RENDER_WIDTH,
1626 																			RENDER_HEIGHT,
1627 																			vk::VK_FORMAT_R8G8B8A8_UNORM,
1628 																			vk::VK_IMAGE_TILING_OPTIMAL,
1629 																			vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
1630 	de::UniquePtr<Allocation>			colorImageAlloc		(allocateAndBindMemory(m_context, *colorImage, MemoryRequirement::Any));
1631 	vk::Unique<VkImageView>				colorImageView		(createAttachmentView(m_context, *colorImage, vk::VK_FORMAT_R8G8B8A8_UNORM));
1632 
1633 	vk::Unique<VkDescriptorSetLayout>	descriptorSetLayout	(createDescriptorSetLayout());
1634 	vk::Unique<VkDescriptorPool>		descriptorPool		(createDescriptorPool());
1635 
1636 	const VkDescriptorSetAllocateInfo	descriptorSetAllocateInfo =
1637 	{
1638 		VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,		// VkStructureType				sType;
1639 		DE_NULL,											// const void*					pNext;
1640 		*descriptorPool,									// VkDescriptorPool				descriptorPool;
1641 		1u,													// deUint32						setLayoutCount;
1642 		&descriptorSetLayout.get()							// const VkDescriptorSetLayout*	pSetLayouts;
1643 	};
1644 
1645 	vk::Unique<VkDescriptorSet>			descriptorSet(vk::allocateDescriptorSet(vk, device, &descriptorSetAllocateInfo));
1646 	int									numBlocks = (int)m_layout.blocks.size();
1647 	std::vector<vk::VkDescriptorBufferInfo>	descriptors(numBlocks);
1648 
1649 	// Upload uniform data
1650 	{
1651 		vk::DescriptorSetUpdateBuilder	descriptorSetUpdateBuilder;
1652 
1653 		if (m_bufferMode == UniformBlockCase::BUFFERMODE_PER_BLOCK)
1654 		{
1655 			for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1656 			{
1657 				const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1658 				const void*	srcPtr = m_blockPointers.find(blockNdx)->second;
1659 
1660 				descriptors[blockNdx] = addUniformData(block.size, srcPtr);
1661 				descriptorSetUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1662 														VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptors[blockNdx]);
1663 			}
1664 		}
1665 		else
1666 		{
1667 			int currentOffset = 0;
1668 			std::map<int, int> offsets;
1669 			for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1670 			{
1671 				if (minUniformBufferOffsetAlignment > 0)
1672 					currentOffset = deAlign32(currentOffset, minUniformBufferOffsetAlignment);
1673 				offsets[blockNdx] = currentOffset;
1674 				currentOffset += m_layout.blocks[blockNdx].size;
1675 			}
1676 
1677 			deUint32 totalSize = currentOffset;
1678 
1679 			// Make a copy of the data that satisfies the device's min uniform buffer alignment
1680 			std::vector<deUint8> data;
1681 			data.resize(totalSize);
1682 			for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1683 			{
1684 				deMemcpy(&data[offsets[blockNdx]], m_blockPointers.find(blockNdx)->second, m_layout.blocks[blockNdx].size);
1685 			}
1686 
1687 			vk::VkBuffer buffer = addUniformData(totalSize, &data[0]).buffer;
1688 
1689 			for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1690 			{
1691 				const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1692 				deUint32 size = block.size;
1693 
1694 				const VkDescriptorBufferInfo	descriptor =
1695 				{
1696 					buffer,							// VkBuffer		buffer;
1697 					(deUint32)offsets[blockNdx],	// VkDeviceSize	offset;
1698 					size,							// VkDeviceSize	range;
1699 				};
1700 
1701 				descriptors[blockNdx] = descriptor;
1702 				descriptorSetUpdateBuilder.writeSingle(*descriptorSet,
1703 														vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1704 														VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1705 														&descriptors[blockNdx]);
1706 			}
1707 		}
1708 
1709 		descriptorSetUpdateBuilder.update(vk, device);
1710 	}
1711 
1712 	vk::Unique<VkRenderPass>			renderPass			(createRenderPass(vk::VK_FORMAT_R8G8B8A8_UNORM));
1713 	vk::Unique<VkFramebuffer>			framebuffer			(createFramebuffer(*renderPass, *colorImageView));
1714 	vk::Unique<VkPipelineLayout>		pipelineLayout		(createPipelineLayout(m_context, *descriptorSetLayout));
1715 
1716 	vk::Unique<VkShaderModule>			vtxShaderModule		(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
1717 	vk::Unique<VkShaderModule>			fragShaderModule	(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
1718 	vk::Unique<VkPipeline>				pipeline			(createPipeline(*vtxShaderModule, *fragShaderModule, *pipelineLayout, *renderPass));
1719 	vk::Unique<VkCommandPool>			cmdPool				(createCmdPool(m_context));
1720 	vk::Unique<VkCommandBuffer>			cmdBuffer			(createCmdBuffer(m_context, *cmdPool));
1721 	vk::Unique<VkBuffer>				readImageBuffer		(createBuffer(m_context, (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1722 	de::UniquePtr<Allocation>			readImageAlloc		(allocateAndBindMemory(m_context, *readImageBuffer, vk::MemoryRequirement::HostVisible));
1723 
1724 	// Record command buffer
1725 	const vk::VkCommandBufferBeginInfo beginInfo	=
1726 	{
1727 		vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,	// VkStructureType					sType;
1728 		DE_NULL,											// const void*						pNext;
1729 		0u,													// VkCommandBufferUsageFlags		flags;
1730 		(const vk::VkCommandBufferInheritanceInfo*)DE_NULL,
1731 	};
1732 	VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &beginInfo));
1733 
1734 	const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
1735 	const vk::VkRenderPassBeginInfo passBeginInfo	=
1736 	{
1737 		vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,	// VkStructureType		sType;
1738 		DE_NULL,										// const void*			pNext;
1739 		*renderPass,									// VkRenderPass			renderPass;
1740 		*framebuffer,									// VkFramebuffer		framebuffer;
1741 		{ { 0, 0 }, { RENDER_WIDTH, RENDER_HEIGHT } },	// VkRect2D				renderArea;
1742 		1u,												// deUint32				clearValueCount;
1743 		&clearValue,									// const VkClearValue*	pClearValues;
1744 	};
1745 
1746 	// Add barrier for initializing image state
1747 	{
1748 		const vk::VkImageMemoryBarrier  initializeBarrier =
1749 		{
1750 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
1751 			DE_NULL,										// const void*				pNext
1752 			0,												// VVkAccessFlags			srcAccessMask;
1753 			vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,		// VkAccessFlags			dstAccessMask;
1754 			vk::VK_IMAGE_LAYOUT_UNDEFINED,					// VkImageLayout			oldLayout;
1755 			vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,	// VkImageLayout			newLayout;
1756 			queueFamilyIndex,								// deUint32					srcQueueFamilyIndex;
1757 			queueFamilyIndex,								// deUint32					dstQueueFamilyIndex;
1758 			*colorImage,									// VkImage					image;
1759 			{
1760 				vk::VK_IMAGE_ASPECT_COLOR_BIT,			// VkImageAspectFlags	aspectMask;
1761 				0u,										// deUint32				baseMipLevel;
1762 				1u,										// deUint32				mipLevels;
1763 				0u,										// deUint32				baseArraySlice;
1764 				1u,										// deUint32				arraySize;
1765 			}												// VkImageSubresourceRange	subresourceRange
1766 		};
1767 
1768 		vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (vk::VkDependencyFlags)0,
1769 			0, (const vk::VkMemoryBarrier*)DE_NULL,
1770 			0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1771 			1, &initializeBarrier);
1772 	}
1773 
1774 	vk.cmdBeginRenderPass(*cmdBuffer, &passBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
1775 
1776 	vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1777 	vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1778 
1779 	const vk::VkDeviceSize offsets[] = { 0u };
1780 	vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &*positionsBuffer, offsets);
1781 	vk.cmdBindIndexBuffer(*cmdBuffer, *indicesBuffer, (vk::VkDeviceSize)0, vk::VK_INDEX_TYPE_UINT32);
1782 
1783 	vk.cmdDrawIndexed(*cmdBuffer, DE_LENGTH_OF_ARRAY(indices), 1u, 0u, 0u, 0u);
1784 	vk.cmdEndRenderPass(*cmdBuffer);
1785 
1786 	// Add render finish barrier
1787 	{
1788 		const vk::VkImageMemoryBarrier  renderFinishBarrier =
1789 		{
1790 			vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,		// VkStructureType			sType;
1791 			DE_NULL,										// const void*				pNext
1792 			vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,		// VVkAccessFlags			srcAccessMask;
1793 			vk::VK_ACCESS_TRANSFER_READ_BIT,				// VkAccessFlags			dstAccessMask;
1794 			vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,	// VkImageLayout			oldLayout;
1795 			vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,		// VkImageLayout			newLayout;
1796 			queueFamilyIndex,								// deUint32					srcQueueFamilyIndex;
1797 			queueFamilyIndex,								// deUint32					dstQueueFamilyIndex;
1798 			*colorImage,									// VkImage					image;
1799 			{
1800 				vk::VK_IMAGE_ASPECT_COLOR_BIT,			// VkImageAspectFlags	aspectMask;
1801 				0u,										// deUint32				baseMipLevel;
1802 				1u,										// deUint32				mipLevels;
1803 				0u,										// deUint32				baseArraySlice;
1804 				1u,										// deUint32				arraySize;
1805 			}												// VkImageSubresourceRange	subresourceRange
1806 		};
1807 
1808 		vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0,
1809 							  0, (const vk::VkMemoryBarrier*)DE_NULL,
1810 							  0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1811 							  1, &renderFinishBarrier);
1812 	}
1813 
1814 	// Add Image->Buffer copy command
1815 	{
1816 		const vk::VkBufferImageCopy copyParams =
1817 		{
1818 			(vk::VkDeviceSize)0u,					// VkDeviceSize				bufferOffset;
1819 			(deUint32)RENDER_WIDTH,					// deUint32					bufferRowLength;
1820 			(deUint32)RENDER_HEIGHT,				// deUint32					bufferImageHeight;
1821 			{
1822 				vk::VK_IMAGE_ASPECT_COLOR_BIT,	// VkImageAspect	aspect;
1823 				0u,								// deUint32			mipLevel;
1824 				0u,								// deUint32			arrayLayer;
1825 				1u,								// deUint32			arraySize;
1826 			},										// VkImageSubresourceCopy	imageSubresource
1827 			{ 0u, 0u, 0u },							// VkOffset3D				imageOffset;
1828 			{ RENDER_WIDTH, RENDER_HEIGHT, 1u }		// VkExtent3D				imageExtent;
1829 		};
1830 
1831 		vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, &copyParams);
1832 	}
1833 
1834 	// Add copy finish barrier
1835 	{
1836 		const vk::VkBufferMemoryBarrier copyFinishBarrier	=
1837 		{
1838 			vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER,		// VkStructureType		sType;
1839 			DE_NULL,											// const void*			pNext;
1840 			VK_ACCESS_TRANSFER_WRITE_BIT,						// VkAccessFlags		srcAccessMask;
1841 			VK_ACCESS_HOST_READ_BIT,							// VkAccessFlags		dstAccessMask;
1842 			queueFamilyIndex,									// deUint32				srcQueueFamilyIndex;
1843 			queueFamilyIndex,									// deUint32				destQueueFamilyIndex;
1844 			*readImageBuffer,									// VkBuffer				buffer;
1845 			0u,													// VkDeviceSize			offset;
1846 			(vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4)// VkDeviceSize			size;
1847 		};
1848 
1849 		vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
1850 							  0, (const vk::VkMemoryBarrier*)DE_NULL,
1851 							  1, &copyFinishBarrier,
1852 							  0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1853 	}
1854 
1855 	VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1856 
1857 	// Submit the command buffer
1858 	{
1859 		const vk::VkFenceCreateInfo fenceParams =
1860 		{
1861 			vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,	// VkStructureType		sType;
1862 			DE_NULL,									// const void*			pNext;
1863 			0u,											// VkFenceCreateFlags	flags;
1864 		};
1865 		const Unique<vk::VkFence> fence(vk::createFence(vk, device, &fenceParams));
1866 
1867 		const VkSubmitInfo			submitInfo	=
1868 		{
1869 			VK_STRUCTURE_TYPE_SUBMIT_INFO,	// VkStructureType			sType;
1870 			DE_NULL,						// const void*				pNext;
1871 			0u,								// deUint32					waitSemaphoreCount;
1872 			DE_NULL,						// const VkSemaphore*		pWaitSemaphores;
1873 			(const VkPipelineStageFlags*)DE_NULL,
1874 			1u,								// deUint32					commandBufferCount;
1875 			&cmdBuffer.get(),				// const VkCommandBuffer*	pCommandBuffers;
1876 			0u,								// deUint32					signalSemaphoreCount;
1877 			DE_NULL							// const VkSemaphore*		pSignalSemaphores;
1878 		};
1879 
1880 		VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
1881 		VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
1882 	}
1883 
1884 	// Read back the results
1885 	tcu::Surface surface(RENDER_WIDTH, RENDER_HEIGHT);
1886 	{
1887 		const tcu::TextureFormat textureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
1888 		const tcu::ConstPixelBufferAccess imgAccess(textureFormat, RENDER_WIDTH, RENDER_HEIGHT, 1, readImageAlloc->getHostPtr());
1889 		const vk::VkDeviceSize bufferSize = RENDER_WIDTH * RENDER_HEIGHT * 4;
1890 		invalidateMappedMemoryRange(vk, device, readImageAlloc->getMemory(), readImageAlloc->getOffset(), bufferSize);
1891 
1892 		tcu::copy(surface.getAccess(), imgAccess);
1893 	}
1894 
1895 	// Check if the result image is all white
1896 	tcu::RGBA white(tcu::RGBA::white());
1897 	int numFailedPixels = 0;
1898 
1899 	for (int y = 0; y < surface.getHeight(); y++)
1900 	{
1901 		for (int x = 0; x < surface.getWidth(); x++)
1902 		{
1903 			if (surface.getPixel(x, y) != white)
1904 				numFailedPixels += 1;
1905 		}
1906 	}
1907 
1908 	if (numFailedPixels > 0)
1909 	{
1910 		tcu::TestLog& log = m_context.getTestContext().getLog();
1911 		log << tcu::TestLog::Image("Image", "Rendered image", surface);
1912 		log << tcu::TestLog::Message << "Image comparison failed, got " << numFailedPixels << " non-white pixels" << tcu::TestLog::EndMessage;
1913 
1914 		for (size_t blockNdx = 0; blockNdx < m_layout.blocks.size(); blockNdx++)
1915 		{
1916 			const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1917 			log << tcu::TestLog::Message << "Block index: " << blockNdx << " infos: " << block << tcu::TestLog::EndMessage;
1918 		}
1919 
1920 		for (size_t uniformNdx = 0; uniformNdx < m_layout.uniforms.size(); uniformNdx++)
1921 		{
1922 			log << tcu::TestLog::Message << "Uniform index: " << uniformNdx << " infos: " << m_layout.uniforms[uniformNdx] << tcu::TestLog::EndMessage;
1923 		}
1924 
1925 		return tcu::TestStatus::fail("Detected non-white pixels");
1926 	}
1927 	else
1928 		return tcu::TestStatus::pass("Full white image ok");
1929 }
1930 
addUniformData(deUint32 size,const void * dataPtr)1931 vk::VkDescriptorBufferInfo UniformBlockCaseInstance::addUniformData (deUint32 size, const void* dataPtr)
1932 {
1933 	const VkDevice					vkDevice			= m_context.getDevice();
1934 	const DeviceInterface&			vk					= m_context.getDeviceInterface();
1935 
1936 	Move<VkBuffer>					buffer	= createBuffer(m_context, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
1937 	de::MovePtr<Allocation>			alloc	= allocateAndBindMemory(m_context, *buffer, vk::MemoryRequirement::HostVisible);
1938 
1939 	deMemcpy(alloc->getHostPtr(), dataPtr, size);
1940 	flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), size);
1941 
1942 	const VkDescriptorBufferInfo			descriptor			=
1943 	{
1944 		*buffer,				// VkBuffer		buffer;
1945 		0u,						// VkDeviceSize	offset;
1946 		size,					// VkDeviceSize	range;
1947 
1948 	};
1949 
1950 	m_uniformBuffers.push_back(VkBufferSp(new vk::Unique<vk::VkBuffer>(buffer)));
1951 	m_uniformAllocs.push_back(AllocationSp(alloc.release()));
1952 
1953 	return descriptor;
1954 }
1955 
createRenderPass(vk::VkFormat format) const1956 vk::Move<VkRenderPass> UniformBlockCaseInstance::createRenderPass (vk::VkFormat format) const
1957 {
1958 	const VkDevice					vkDevice				= m_context.getDevice();
1959 	const DeviceInterface&			vk						= m_context.getDeviceInterface();
1960 
1961 	const VkAttachmentDescription	attachmentDescription	=
1962 	{
1963 		0u,												// VkAttachmentDescriptorFlags	flags;
1964 		format,											// VkFormat						format;
1965 		VK_SAMPLE_COUNT_1_BIT,							// VkSampleCountFlagBits		samples;
1966 		VK_ATTACHMENT_LOAD_OP_CLEAR,					// VkAttachmentLoadOp			loadOp;
1967 		VK_ATTACHMENT_STORE_OP_STORE,					// VkAttachmentStoreOp			storeOp;
1968 		VK_ATTACHMENT_LOAD_OP_DONT_CARE,				// VkAttachmentLoadOp			stencilLoadOp;
1969 		VK_ATTACHMENT_STORE_OP_DONT_CARE,				// VkAttachmentStoreOp			stencilStoreOp;
1970 		VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,		// VkImageLayout				initialLayout;
1971 		VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL,		// VkImageLayout				finalLayout;
1972 	};
1973 
1974 	const VkAttachmentReference		attachmentReference		=
1975 	{
1976 		0u,											// deUint32			attachment;
1977 		VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL	// VkImageLayout	layout;
1978 	};
1979 
1980 
1981 	const VkSubpassDescription		subpassDescription		=
1982 	{
1983 		0u,												// VkSubpassDescriptionFlags	flags;
1984 		VK_PIPELINE_BIND_POINT_GRAPHICS,				// VkPipelineBindPoint			pipelineBindPoint;
1985 		0u,												// deUint32						inputAttachmentCount;
1986 		DE_NULL,										// const VkAttachmentReference*	pInputAttachments;
1987 		1u,												// deUint32						colorAttachmentCount;
1988 		&attachmentReference,							// const VkAttachmentReference*	pColorAttachments;
1989 		DE_NULL,										// const VkAttachmentReference*	pResolveAttachments;
1990 		DE_NULL,										// const VkAttachmentReference*	pDepthStencilAttachment;
1991 		0u,												// deUint32						preserveAttachmentCount;
1992 		DE_NULL											// const VkAttachmentReference*	pPreserveAttachments;
1993 	};
1994 
1995 	const VkRenderPassCreateInfo	renderPassParams		=
1996 	{
1997 		VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,		// VkStructureType					sType;
1998 		DE_NULL,										// const void*						pNext;
1999 		0u,												// VkRenderPassCreateFlags			flags;
2000 		1u,												// deUint32							attachmentCount;
2001 		&attachmentDescription,							// const VkAttachmentDescription*	pAttachments;
2002 		1u,												// deUint32							subpassCount;
2003 		&subpassDescription,							// const VkSubpassDescription*		pSubpasses;
2004 		0u,												// deUint32							dependencyCount;
2005 		DE_NULL											// const VkSubpassDependency*		pDependencies;
2006 	};
2007 
2008 	return vk::createRenderPass(vk, vkDevice, &renderPassParams);
2009 }
2010 
createFramebuffer(vk::VkRenderPass renderPass,vk::VkImageView colorImageView) const2011 vk::Move<VkFramebuffer> UniformBlockCaseInstance::createFramebuffer (vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const
2012 {
2013 	const VkDevice					vkDevice			= m_context.getDevice();
2014 	const DeviceInterface&			vk					= m_context.getDeviceInterface();
2015 
2016 	const VkFramebufferCreateInfo	framebufferParams	=
2017 	{
2018 		VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,		// VkStructureType			sType;
2019 		DE_NULL,										// const void*				pNext;
2020 		0u,												// VkFramebufferCreateFlags	flags;
2021 		renderPass,										// VkRenderPass				renderPass;
2022 		1u,												// deUint32					attachmentCount;
2023 		&colorImageView,								// const VkImageView*		pAttachments;
2024 		RENDER_WIDTH,									// deUint32					width;
2025 		RENDER_HEIGHT,									// deUint32					height;
2026 		1u												// deUint32					layers;
2027 	};
2028 
2029 	return vk::createFramebuffer(vk, vkDevice, &framebufferParams);
2030 }
2031 
createDescriptorSetLayout(void) const2032 vk::Move<VkDescriptorSetLayout> UniformBlockCaseInstance::createDescriptorSetLayout (void) const
2033 {
2034 	int numBlocks = (int)m_layout.blocks.size();
2035 	int lastBindingNdx = -1;
2036 	std::vector<int> lengths;
2037 
2038 	for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
2039 	{
2040 		const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
2041 
2042 		if (block.bindingNdx == lastBindingNdx)
2043 		{
2044 			lengths.back()++;
2045 		}
2046 		else
2047 		{
2048 			lengths.push_back(1);
2049 			lastBindingNdx = block.bindingNdx;
2050 		}
2051 	}
2052 
2053 	vk::DescriptorSetLayoutBuilder layoutBuilder;
2054 	for (size_t i = 0; i < lengths.size(); i++)
2055 	{
2056 		if (lengths[i] > 0)
2057 		{
2058 			layoutBuilder.addArrayBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, lengths[i], vk::VK_SHADER_STAGE_ALL);
2059 		}
2060 		else
2061 		{
2062 			layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL);
2063 		}
2064 	}
2065 
2066 	return layoutBuilder.build(m_context.getDeviceInterface(), m_context.getDevice());
2067 }
2068 
createDescriptorPool(void) const2069 vk::Move<VkDescriptorPool> UniformBlockCaseInstance::createDescriptorPool (void) const
2070 {
2071 	vk::DescriptorPoolBuilder poolBuilder;
2072 
2073 	return poolBuilder
2074 		.addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, (int)m_layout.blocks.size())
2075 		.build(m_context.getDeviceInterface(), m_context.getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
2076 }
2077 
createPipeline(vk::VkShaderModule vtxShaderModule,vk::VkShaderModule fragShaderModule,vk::VkPipelineLayout pipelineLayout,vk::VkRenderPass renderPass) const2078 vk::Move<VkPipeline> UniformBlockCaseInstance::createPipeline (vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const
2079 {
2080 	const VkDevice									vkDevice				= m_context.getDevice();
2081 	const DeviceInterface&							vk						= m_context.getDeviceInterface();
2082 
2083 	const VkVertexInputBindingDescription			vertexBinding			=
2084 	{
2085 		0,									// deUint32					binding;
2086 		(deUint32)sizeof(float) * 4,		// deUint32					strideInBytes;
2087 		VK_VERTEX_INPUT_RATE_VERTEX			// VkVertexInputStepRate	inputRate;
2088 	};
2089 
2090 	const VkVertexInputAttributeDescription			vertexAttribute			=
2091 	{
2092 		0,									// deUint32		location;
2093 		0,									// deUint32		binding;
2094 		VK_FORMAT_R32G32B32A32_SFLOAT,		// VkFormat		format;
2095 		0u									// deUint32		offset;
2096 	};
2097 
2098 	const VkPipelineShaderStageCreateInfo			shaderStages[2]	=
2099 	{
2100 		{
2101 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	// VkStructureType					sType;
2102 			DE_NULL,												// const void*						pNext;
2103 			0u,														// VkPipelineShaderStageCreateFlags	flags;
2104 			VK_SHADER_STAGE_VERTEX_BIT,								// VkShaderStageFlagBits			stage;
2105 			vtxShaderModule,										// VkShaderModule					module;
2106 			"main",													// const char*						pName;
2107 			DE_NULL													// const VkSpecializationInfo*		pSpecializationInfo;
2108 		},
2109 		{
2110 			VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,	// VkStructureType					sType;
2111 			DE_NULL,												// const void*						pNext;
2112 			0u,														// VkPipelineShaderStageCreateFlags flags;
2113 			VK_SHADER_STAGE_FRAGMENT_BIT,							// VkShaderStageFlagBits			stage;
2114 			fragShaderModule,										// VkShaderModule					module;
2115 			"main",													// const char*						pName;
2116 			DE_NULL													// const VkSpecializationInfo*		pSpecializationInfo;
2117 		}
2118 	};
2119 
2120 	const VkPipelineVertexInputStateCreateInfo		vertexInputStateParams		=
2121 	{
2122 		VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,	// VkStructureType							sType;
2123 		DE_NULL,													// const void*								pNext;
2124 		0u,															// VkPipelineVertexInputStateCreateFlags	flags;
2125 		1u,															// deUint32									vertexBindingDescriptionCount;
2126 		&vertexBinding,												// const VkVertexInputBindingDescription*	pVertexBindingDescriptions;
2127 		1u,															// deUint32									vertexAttributeDescriptionCount;
2128 		&vertexAttribute,											// const VkVertexInputAttributeDescription*	pVertexAttributeDescriptions;
2129 	};
2130 
2131 	const VkPipelineInputAssemblyStateCreateInfo	inputAssemblyStateParams	=
2132 	{
2133 		VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,// VkStructureType							sType;
2134 		DE_NULL,													// const void*								pNext;
2135 		0u,															// VkPipelineInputAssemblyStateCreateFlags	flags;
2136 		VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST,						// VkPrimitiveTopology						topology;
2137 		false														// VkBool32									primitiveRestartEnable;
2138 	};
2139 
2140 	const VkViewport								viewport					=
2141 	{
2142 		0.0f,					// float	originX;
2143 		0.0f,					// float	originY;
2144 		(float)RENDER_WIDTH,	// float	width;
2145 		(float)RENDER_HEIGHT,	// float	height;
2146 		0.0f,					// float	minDepth;
2147 		1.0f					// float	maxDepth;
2148 	};
2149 
2150 
2151 	const VkRect2D									scissor						=
2152 	{
2153 		{
2154 			0u,				// deUint32	x;
2155 			0u,				// deUint32	y;
2156 		},						// VkOffset2D	offset;
2157 		{
2158 			RENDER_WIDTH,	// deUint32	width;
2159 			RENDER_HEIGHT,	// deUint32	height;
2160 		},						// VkExtent2D	extent;
2161 	};
2162 
2163 	const VkPipelineViewportStateCreateInfo			viewportStateParams			=
2164 	{
2165 		VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,		// VkStructureType						sType;
2166 		DE_NULL,													// const void*							pNext;
2167 		0u,															// VkPipelineViewportStateCreateFlags	flags;
2168 		1u,															// deUint32								viewportCount;
2169 		&viewport,													// const VkViewport*					pViewports;
2170 		1u,															// deUint32								scissorsCount;
2171 		&scissor,													// const VkRect2D*						pScissors;
2172 	};
2173 
2174 	const VkPipelineRasterizationStateCreateInfo	rasterStateParams			=
2175 	{
2176 		VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType							sType;
2177 		DE_NULL,													// const void*								pNext;
2178 		0u,															// VkPipelineRasterizationStateCreateFlags	flags;
2179 		false,														// VkBool32									depthClampEnable;
2180 		false,														// VkBool32									rasterizerDiscardEnable;
2181 		VK_POLYGON_MODE_FILL,										// VkPolygonMode							polygonMode;
2182 		VK_CULL_MODE_NONE,											// VkCullModeFlags							cullMode;
2183 		VK_FRONT_FACE_COUNTER_CLOCKWISE,							// VkFrontFace								frontFace;
2184 		false,														// VkBool32									depthBiasEnable;
2185 		0.0f,														// float									depthBiasConstantFactor;
2186 		0.0f,														// float									depthBiasClamp;
2187 		0.0f,														// float									depthBiasSlopeFactor;
2188 		1.0f,														// float									lineWidth;
2189 	};
2190 
2191 	const VkPipelineMultisampleStateCreateInfo		multisampleStateParams =
2192 	{
2193 		VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,	// VkStructureType							sType;
2194 		DE_NULL,													// const void*								pNext;
2195 		0u,															// VkPipelineMultisampleStateCreateFlags	flags;
2196 		VK_SAMPLE_COUNT_1_BIT,										// VkSampleCountFlagBits					rasterizationSamples;
2197 		VK_FALSE,													// VkBool32									sampleShadingEnable;
2198 		0.0f,														// float									minSampleShading;
2199 		DE_NULL,													// const VkSampleMask*						pSampleMask;
2200 		VK_FALSE,													// VkBool32									alphaToCoverageEnable;
2201 		VK_FALSE													// VkBool32									alphaToOneEnable;
2202 	 };
2203 
2204 	const VkPipelineColorBlendAttachmentState		colorBlendAttachmentState	=
2205 	{
2206 		false,																		// VkBool32			blendEnable;
2207 		VK_BLEND_FACTOR_ONE,														// VkBlend			srcBlendColor;
2208 		VK_BLEND_FACTOR_ZERO,														// VkBlend			destBlendColor;
2209 		VK_BLEND_OP_ADD,															// VkBlendOp		blendOpColor;
2210 		VK_BLEND_FACTOR_ONE,														// VkBlend			srcBlendAlpha;
2211 		VK_BLEND_FACTOR_ZERO,														// VkBlend			destBlendAlpha;
2212 		VK_BLEND_OP_ADD,															// VkBlendOp		blendOpAlpha;
2213 		VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |						// VkChannelFlags	channelWriteMask;
2214 		VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT
2215 	};
2216 
2217 	const VkPipelineColorBlendStateCreateInfo		colorBlendStateParams		=
2218 	{
2219 		VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,	// VkStructureType								sType;
2220 		DE_NULL,													// const void*									pNext;
2221 		0u,															// VkPipelineColorBlendStateCreateFlags			flags;
2222 		false,														// VkBool32										logicOpEnable;
2223 		VK_LOGIC_OP_COPY,											// VkLogicOp									logicOp;
2224 		1u,															// deUint32										attachmentCount;
2225 		&colorBlendAttachmentState,									// const VkPipelineColorBlendAttachmentState*	pAttachments;
2226 		{ 0.0f, 0.0f, 0.0f, 0.0f },									// float										blendConstants[4];
2227 	};
2228 
2229 	const VkGraphicsPipelineCreateInfo				graphicsPipelineParams		=
2230 	{
2231 		VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,	// VkStructureType									sType;
2232 		DE_NULL,											// const void*										pNext;
2233 		0u,													// VkPipelineCreateFlags							flags;
2234 		2u,													// deUint32											stageCount;
2235 		shaderStages,										// const VkPipelineShaderStageCreateInfo*			pStages;
2236 		&vertexInputStateParams,							// const VkPipelineVertexInputStateCreateInfo*		pVertexInputState;
2237 		&inputAssemblyStateParams,							// const VkPipelineInputAssemblyStateCreateInfo*	pInputAssemblyState;
2238 		DE_NULL,											// const VkPipelineTessellationStateCreateInfo*		pTessellationState;
2239 		&viewportStateParams,								// const VkPipelineViewportStateCreateInfo*			pViewportState;
2240 		&rasterStateParams,									// const VkPipelineRasterizationStateCreateInfo*	pRasterizationState;
2241 		&multisampleStateParams,							// const VkPipelineMultisampleStateCreateInfo*		pMultisampleState;
2242 		DE_NULL,											// const VkPipelineDepthStencilStateCreateInfo*		pDepthStencilState;
2243 		&colorBlendStateParams,								// const VkPipelineColorBlendStateCreateInfo*		pColorBlendState;
2244 		(const VkPipelineDynamicStateCreateInfo*)DE_NULL,	// const VkPipelineDynamicStateCreateInfo*			pDynamicState;
2245 		pipelineLayout,										// VkPipelineLayout									layout;
2246 		renderPass,											// VkRenderPass										renderPass;
2247 		0u,													// deUint32											subpass;
2248 		0u,													// VkPipeline										basePipelineHandle;
2249 		0u													// deInt32											basePipelineIndex;
2250 	};
2251 
2252 	return vk::createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams);
2253 }
2254 
2255 } // anonymous (utilities)
2256 
2257 // UniformBlockCase.
2258 
UniformBlockCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,BufferMode bufferMode,MatrixLoadFlags matrixLoadFlag,bool shuffleUniformMembers)2259 UniformBlockCase::UniformBlockCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, BufferMode bufferMode, MatrixLoadFlags matrixLoadFlag, bool shuffleUniformMembers)
2260 	: TestCase					(testCtx, name, description)
2261 	, m_bufferMode				(bufferMode)
2262 	, m_matrixLoadFlag			(matrixLoadFlag)
2263 	, m_shuffleUniformMembers	(shuffleUniformMembers)
2264 {
2265 }
2266 
~UniformBlockCase(void)2267 UniformBlockCase::~UniformBlockCase (void)
2268 {
2269 }
2270 
initPrograms(vk::SourceCollections & programCollection) const2271 void UniformBlockCase::initPrograms (vk::SourceCollections& programCollection) const
2272 {
2273 	DE_ASSERT(!m_vertShaderSource.empty());
2274 	DE_ASSERT(!m_fragShaderSource.empty());
2275 
2276 	programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
2277 	programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
2278 }
2279 
createInstance(Context & context) const2280 TestInstance* UniformBlockCase::createInstance (Context& context) const
2281 {
2282 	return new UniformBlockCaseInstance(context, m_bufferMode, m_uniformLayout, m_blockPointers);
2283 }
2284 
init(void)2285 void UniformBlockCase::init (void)
2286 {
2287 	// Compute reference layout.
2288 	computeStd140Layout(m_uniformLayout, m_interface);
2289 
2290 	// Assign storage for reference values.
2291 	{
2292 		int totalSize = 0;
2293 		for (std::vector<BlockLayoutEntry>::const_iterator blockIter = m_uniformLayout.blocks.begin(); blockIter != m_uniformLayout.blocks.end(); blockIter++)
2294 			totalSize += blockIter->size;
2295 		m_data.resize(totalSize);
2296 
2297 		// Pointers for each block.
2298 		int curOffset = 0;
2299 		for (int blockNdx = 0; blockNdx < (int)m_uniformLayout.blocks.size(); blockNdx++)
2300 		{
2301 			m_blockPointers[blockNdx] = &m_data[0] + curOffset;
2302 			curOffset += m_uniformLayout.blocks[blockNdx].size;
2303 		}
2304 	}
2305 
2306 	// Generate values.
2307 	generateValues(m_uniformLayout, m_blockPointers, 1 /* seed */);
2308 
2309 	// Generate shaders.
2310 	m_vertShaderSource = generateVertexShader(m_interface, m_uniformLayout, m_blockPointers, m_matrixLoadFlag, m_shuffleUniformMembers);
2311 	m_fragShaderSource = generateFragmentShader(m_interface, m_uniformLayout, m_blockPointers, m_matrixLoadFlag, m_shuffleUniformMembers);
2312 }
2313 
2314 } // ubo
2315 } // vkt
2316