1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Samsung Electronics Co., Ltd.
7 * Copyright (c) 2016 The Android Open Source Project
8 *
9 * Licensed under the Apache License, Version 2.0 (the "License");
10 * you may not use this file except in compliance with the License.
11 * You may obtain a copy of the License at
12 *
13 * http://www.apache.org/licenses/LICENSE-2.0
14 *
15 * Unless required by applicable law or agreed to in writing, software
16 * distributed under the License is distributed on an "AS IS" BASIS,
17 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
18 * See the License for the specific language governing permissions and
19 * limitations under the License.
20 *
21 *//*!
22 * \file
23 * \brief Uniform block case.
24 *//*--------------------------------------------------------------------*/
25
26 #include "vktUniformBlockCase.hpp"
27
28 #include "vkPrograms.hpp"
29
30 #include "gluVarType.hpp"
31 #include "tcuTestLog.hpp"
32 #include "tcuSurface.hpp"
33 #include "deRandom.hpp"
34 #include "deStringUtil.hpp"
35
36 #include "tcuTextureUtil.hpp"
37 #include "deSharedPtr.hpp"
38
39 #include "vkMemUtil.hpp"
40 #include "vkQueryUtil.hpp"
41 #include "vkTypeUtil.hpp"
42 #include "vkRef.hpp"
43 #include "vkRefUtil.hpp"
44 #include "vkBuilderUtil.hpp"
45
46 #include <map>
47 #include <set>
48
49 namespace vkt
50 {
51 namespace ubo
52 {
53
54 using namespace vk;
55
56 // VarType implementation.
57
VarType(void)58 VarType::VarType (void)
59 : m_type (TYPE_LAST)
60 , m_flags (0)
61 {
62 }
63
VarType(const VarType & other)64 VarType::VarType (const VarType& other)
65 : m_type (TYPE_LAST)
66 , m_flags (0)
67 {
68 *this = other;
69 }
70
VarType(glu::DataType basicType,deUint32 flags)71 VarType::VarType (glu::DataType basicType, deUint32 flags)
72 : m_type (TYPE_BASIC)
73 , m_flags (flags)
74 {
75 m_data.basicType = basicType;
76 }
77
VarType(const VarType & elementType,int arraySize)78 VarType::VarType (const VarType& elementType, int arraySize)
79 : m_type (TYPE_ARRAY)
80 , m_flags (0)
81 {
82 m_data.array.size = arraySize;
83 m_data.array.elementType = new VarType(elementType);
84 }
85
VarType(const StructType * structPtr)86 VarType::VarType (const StructType* structPtr)
87 : m_type (TYPE_STRUCT)
88 , m_flags (0)
89 {
90 m_data.structPtr = structPtr;
91 }
92
~VarType(void)93 VarType::~VarType (void)
94 {
95 if (m_type == TYPE_ARRAY)
96 delete m_data.array.elementType;
97 }
98
operator =(const VarType & other)99 VarType& VarType::operator= (const VarType& other)
100 {
101 if (this == &other)
102 return *this; // Self-assignment.
103
104 if (m_type == TYPE_ARRAY)
105 delete m_data.array.elementType;
106
107 m_type = other.m_type;
108 m_flags = other.m_flags;
109 m_data = Data();
110
111 if (m_type == TYPE_ARRAY)
112 {
113 m_data.array.elementType = new VarType(*other.m_data.array.elementType);
114 m_data.array.size = other.m_data.array.size;
115 }
116 else
117 m_data = other.m_data;
118
119 return *this;
120 }
121
122 // StructType implementation.
123
addMember(const std::string & name,const VarType & type,deUint32 flags)124 void StructType::addMember (const std::string& name, const VarType& type, deUint32 flags)
125 {
126 m_members.push_back(StructMember(name, type, flags));
127 }
128
129 // Uniform implementation.
130
Uniform(const std::string & name,const VarType & type,deUint32 flags)131 Uniform::Uniform (const std::string& name, const VarType& type, deUint32 flags)
132 : m_name (name)
133 , m_type (type)
134 , m_flags (flags)
135 {
136 }
137
138 // UniformBlock implementation.
139
UniformBlock(const std::string & blockName)140 UniformBlock::UniformBlock (const std::string& blockName)
141 : m_blockName (blockName)
142 , m_arraySize (0)
143 , m_flags (0)
144 {
145 }
146
operator <<(std::ostream & stream,const BlockLayoutEntry & entry)147 std::ostream& operator<< (std::ostream& stream, const BlockLayoutEntry& entry)
148 {
149 stream << entry.name << " { name = " << entry.name
150 << ", size = " << entry.size
151 << ", activeUniformIndices = [";
152
153 for (std::vector<int>::const_iterator i = entry.activeUniformIndices.begin(); i != entry.activeUniformIndices.end(); i++)
154 {
155 if (i != entry.activeUniformIndices.begin())
156 stream << ", ";
157 stream << *i;
158 }
159
160 stream << "] }";
161 return stream;
162 }
163
operator <<(std::ostream & stream,const UniformLayoutEntry & entry)164 std::ostream& operator<< (std::ostream& stream, const UniformLayoutEntry& entry)
165 {
166 stream << entry.name << " { type = " << glu::getDataTypeName(entry.type)
167 << ", size = " << entry.size
168 << ", blockNdx = " << entry.blockLayoutNdx
169 << ", offset = " << entry.offset
170 << ", arrayStride = " << entry.arrayStride
171 << ", matrixStride = " << entry.matrixStride
172 << ", isRowMajor = " << (entry.isRowMajor ? "true" : "false")
173 << " }";
174 return stream;
175 }
176
getUniformLayoutIndex(int blockNdx,const std::string & name) const177 int UniformLayout::getUniformLayoutIndex (int blockNdx, const std::string& name) const
178 {
179 for (int ndx = 0; ndx < (int)uniforms.size(); ndx++)
180 {
181 if (blocks[uniforms[ndx].blockLayoutNdx].blockDeclarationNdx == blockNdx &&
182 uniforms[ndx].name == name)
183 return ndx;
184 }
185
186 return -1;
187 }
188
getBlockLayoutIndex(int blockNdx,int instanceNdx) const189 int UniformLayout::getBlockLayoutIndex (int blockNdx, int instanceNdx) const
190 {
191 for (int ndx = 0; ndx < (int)blocks.size(); ndx++)
192 {
193 if (blocks[ndx].blockDeclarationNdx == blockNdx &&
194 blocks[ndx].instanceNdx == instanceNdx)
195 return ndx;
196 }
197
198 return -1;
199 }
200
201 // ShaderInterface implementation.
202
ShaderInterface(void)203 ShaderInterface::ShaderInterface (void)
204 {
205 }
206
~ShaderInterface(void)207 ShaderInterface::~ShaderInterface (void)
208 {
209 }
210
allocStruct(const std::string & name)211 StructType& ShaderInterface::allocStruct (const std::string& name)
212 {
213 m_structs.push_back(StructTypeSP(new StructType(name)));
214 return *m_structs.back();
215 }
216
217 struct StructNameEquals
218 {
219 std::string name;
220
StructNameEqualsvkt::ubo::StructNameEquals221 StructNameEquals (const std::string& name_) : name(name_) {}
222
operator ()vkt::ubo::StructNameEquals223 bool operator() (const StructTypeSP type) const
224 {
225 return type->hasTypeName() && name == type->getTypeName();
226 }
227 };
228
getNamedStructs(std::vector<const StructType * > & structs) const229 void ShaderInterface::getNamedStructs (std::vector<const StructType*>& structs) const
230 {
231 for (std::vector<StructTypeSP>::const_iterator i = m_structs.begin(); i != m_structs.end(); i++)
232 {
233 if ((*i)->hasTypeName())
234 structs.push_back((*i).get());
235 }
236 }
237
allocBlock(const std::string & name)238 UniformBlock& ShaderInterface::allocBlock (const std::string& name)
239 {
240 m_uniformBlocks.push_back(UniformBlockSP(new UniformBlock(name)));
241 return *m_uniformBlocks.back();
242 }
243
244 namespace // Utilities
245 {
246
247 struct PrecisionFlagsFmt
248 {
249 deUint32 flags;
PrecisionFlagsFmtvkt::ubo::__anon0393f7100111::PrecisionFlagsFmt250 PrecisionFlagsFmt (deUint32 flags_) : flags(flags_) {}
251 };
252
operator <<(std::ostream & str,const PrecisionFlagsFmt & fmt)253 std::ostream& operator<< (std::ostream& str, const PrecisionFlagsFmt& fmt)
254 {
255 // Precision.
256 DE_ASSERT(dePop32(fmt.flags & (PRECISION_LOW|PRECISION_MEDIUM|PRECISION_HIGH)) <= 1);
257 str << (fmt.flags & PRECISION_LOW ? "lowp" :
258 fmt.flags & PRECISION_MEDIUM ? "mediump" :
259 fmt.flags & PRECISION_HIGH ? "highp" : "");
260 return str;
261 }
262
263 struct LayoutFlagsFmt
264 {
265 deUint32 flags;
LayoutFlagsFmtvkt::ubo::__anon0393f7100111::LayoutFlagsFmt266 LayoutFlagsFmt (deUint32 flags_) : flags(flags_) {}
267 };
268
operator <<(std::ostream & str,const LayoutFlagsFmt & fmt)269 std::ostream& operator<< (std::ostream& str, const LayoutFlagsFmt& fmt)
270 {
271 static const struct
272 {
273 deUint32 bit;
274 const char* token;
275 } bitDesc[] =
276 {
277 { LAYOUT_STD140, "std140" },
278 { LAYOUT_ROW_MAJOR, "row_major" },
279 { LAYOUT_COLUMN_MAJOR, "column_major" }
280 };
281
282 deUint32 remBits = fmt.flags;
283 for (int descNdx = 0; descNdx < DE_LENGTH_OF_ARRAY(bitDesc); descNdx++)
284 {
285 if (remBits & bitDesc[descNdx].bit)
286 {
287 if (remBits != fmt.flags)
288 str << ", ";
289 str << bitDesc[descNdx].token;
290 remBits &= ~bitDesc[descNdx].bit;
291 }
292 }
293 DE_ASSERT(remBits == 0);
294 return str;
295 }
296
297 // Layout computation.
298
getDataTypeByteSize(glu::DataType type)299 int getDataTypeByteSize (glu::DataType type)
300 {
301 return glu::getDataTypeScalarSize(type)*(int)sizeof(deUint32);
302 }
303
getDataTypeByteAlignment(glu::DataType type)304 int getDataTypeByteAlignment (glu::DataType type)
305 {
306 switch (type)
307 {
308 case glu::TYPE_FLOAT:
309 case glu::TYPE_INT:
310 case glu::TYPE_UINT:
311 case glu::TYPE_BOOL: return 1*(int)sizeof(deUint32);
312
313 case glu::TYPE_FLOAT_VEC2:
314 case glu::TYPE_INT_VEC2:
315 case glu::TYPE_UINT_VEC2:
316 case glu::TYPE_BOOL_VEC2: return 2*(int)sizeof(deUint32);
317
318 case glu::TYPE_FLOAT_VEC3:
319 case glu::TYPE_INT_VEC3:
320 case glu::TYPE_UINT_VEC3:
321 case glu::TYPE_BOOL_VEC3: // Fall-through to vec4
322
323 case glu::TYPE_FLOAT_VEC4:
324 case glu::TYPE_INT_VEC4:
325 case glu::TYPE_UINT_VEC4:
326 case glu::TYPE_BOOL_VEC4: return 4*(int)sizeof(deUint32);
327
328 default:
329 DE_ASSERT(false);
330 return 0;
331 }
332 }
333
getminUniformBufferOffsetAlignment(Context & ctx)334 deInt32 getminUniformBufferOffsetAlignment (Context &ctx)
335 {
336 VkPhysicalDeviceProperties properties;
337 ctx.getInstanceInterface().getPhysicalDeviceProperties(ctx.getPhysicalDevice(), &properties);
338 VkDeviceSize align = properties.limits.minUniformBufferOffsetAlignment;
339 DE_ASSERT(align == (VkDeviceSize)(deInt32)align);
340 return (deInt32)align;
341 }
342
getDataTypeArrayStride(glu::DataType type)343 int getDataTypeArrayStride (glu::DataType type)
344 {
345 DE_ASSERT(!glu::isDataTypeMatrix(type));
346
347 const int baseStride = getDataTypeByteSize(type);
348 const int vec4Alignment = (int)sizeof(deUint32)*4;
349
350 DE_ASSERT(baseStride <= vec4Alignment);
351 return de::max(baseStride, vec4Alignment); // Really? See rule 4.
352 }
353
deRoundUp32(int a,int b)354 static inline int deRoundUp32 (int a, int b)
355 {
356 int d = a/b;
357 return d*b == a ? a : (d+1)*b;
358 }
359
computeStd140BaseAlignment(const VarType & type)360 int computeStd140BaseAlignment (const VarType& type)
361 {
362 const int vec4Alignment = (int)sizeof(deUint32)*4;
363
364 if (type.isBasicType())
365 {
366 glu::DataType basicType = type.getBasicType();
367
368 if (glu::isDataTypeMatrix(basicType))
369 {
370 bool isRowMajor = !!(type.getFlags() & LAYOUT_ROW_MAJOR);
371 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
372 : glu::getDataTypeMatrixNumRows(basicType);
373
374 return getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
375 }
376 else
377 return getDataTypeByteAlignment(basicType);
378 }
379 else if (type.isArrayType())
380 {
381 int elemAlignment = computeStd140BaseAlignment(type.getElementType());
382
383 // Round up to alignment of vec4
384 return deRoundUp32(elemAlignment, vec4Alignment);
385 }
386 else
387 {
388 DE_ASSERT(type.isStructType());
389
390 int maxBaseAlignment = 0;
391
392 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
393 maxBaseAlignment = de::max(maxBaseAlignment, computeStd140BaseAlignment(memberIter->getType()));
394
395 return deRoundUp32(maxBaseAlignment, vec4Alignment);
396 }
397 }
398
mergeLayoutFlags(deUint32 prevFlags,deUint32 newFlags)399 inline deUint32 mergeLayoutFlags (deUint32 prevFlags, deUint32 newFlags)
400 {
401 const deUint32 packingMask = LAYOUT_STD140;
402 const deUint32 matrixMask = LAYOUT_ROW_MAJOR|LAYOUT_COLUMN_MAJOR;
403
404 deUint32 mergedFlags = 0;
405
406 mergedFlags |= ((newFlags & packingMask) ? newFlags : prevFlags) & packingMask;
407 mergedFlags |= ((newFlags & matrixMask) ? newFlags : prevFlags) & matrixMask;
408
409 return mergedFlags;
410 }
411
computeStd140Layout(UniformLayout & layout,int & curOffset,int curBlockNdx,const std::string & curPrefix,const VarType & type,deUint32 layoutFlags)412 void computeStd140Layout (UniformLayout& layout, int& curOffset, int curBlockNdx, const std::string& curPrefix, const VarType& type, deUint32 layoutFlags)
413 {
414 int baseAlignment = computeStd140BaseAlignment(type);
415
416 curOffset = deAlign32(curOffset, baseAlignment);
417
418 if (type.isBasicType())
419 {
420 glu::DataType basicType = type.getBasicType();
421 UniformLayoutEntry entry;
422
423 entry.name = curPrefix;
424 entry.type = basicType;
425 entry.size = 1;
426 entry.arrayStride = 0;
427 entry.matrixStride = 0;
428 entry.blockLayoutNdx= curBlockNdx;
429
430 if (glu::isDataTypeMatrix(basicType))
431 {
432 // Array of vectors as specified in rules 5 & 7.
433 bool isRowMajor = !!(layoutFlags & LAYOUT_ROW_MAJOR);
434 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(basicType)
435 : glu::getDataTypeMatrixNumRows(basicType);
436 int numVecs = isRowMajor ? glu::getDataTypeMatrixNumRows(basicType)
437 : glu::getDataTypeMatrixNumColumns(basicType);
438 int stride = getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
439
440 entry.offset = curOffset;
441 entry.matrixStride = stride;
442 entry.isRowMajor = isRowMajor;
443
444 curOffset += numVecs*stride;
445 }
446 else
447 {
448 // Scalar or vector.
449 entry.offset = curOffset;
450
451 curOffset += getDataTypeByteSize(basicType);
452 }
453
454 layout.uniforms.push_back(entry);
455 }
456 else if (type.isArrayType())
457 {
458 const VarType& elemType = type.getElementType();
459
460 if (elemType.isBasicType() && !glu::isDataTypeMatrix(elemType.getBasicType()))
461 {
462 // Array of scalars or vectors.
463 glu::DataType elemBasicType = elemType.getBasicType();
464 UniformLayoutEntry entry;
465 int stride = getDataTypeArrayStride(elemBasicType);
466
467 entry.name = curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
468 entry.type = elemBasicType;
469 entry.blockLayoutNdx= curBlockNdx;
470 entry.offset = curOffset;
471 entry.size = type.getArraySize();
472 entry.arrayStride = stride;
473 entry.matrixStride = 0;
474
475 curOffset += stride*type.getArraySize();
476
477 layout.uniforms.push_back(entry);
478 }
479 else if (elemType.isBasicType() && glu::isDataTypeMatrix(elemType.getBasicType()))
480 {
481 // Array of matrices.
482 glu::DataType elemBasicType = elemType.getBasicType();
483 bool isRowMajor = !!(layoutFlags & LAYOUT_ROW_MAJOR);
484 int vecSize = isRowMajor ? glu::getDataTypeMatrixNumColumns(elemBasicType)
485 : glu::getDataTypeMatrixNumRows(elemBasicType);
486 int numVecs = isRowMajor ? glu::getDataTypeMatrixNumRows(elemBasicType)
487 : glu::getDataTypeMatrixNumColumns(elemBasicType);
488 int stride = getDataTypeArrayStride(glu::getDataTypeFloatVec(vecSize));
489 UniformLayoutEntry entry;
490
491 entry.name = curPrefix + "[0]"; // Array uniforms are always postfixed with [0]
492 entry.type = elemBasicType;
493 entry.blockLayoutNdx= curBlockNdx;
494 entry.offset = curOffset;
495 entry.size = type.getArraySize();
496 entry.arrayStride = stride*numVecs;
497 entry.matrixStride = stride;
498 entry.isRowMajor = isRowMajor;
499
500 curOffset += numVecs*type.getArraySize()*stride;
501
502 layout.uniforms.push_back(entry);
503 }
504 else
505 {
506 DE_ASSERT(elemType.isStructType() || elemType.isArrayType());
507
508 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
509 computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "[" + de::toString(elemNdx) + "]", type.getElementType(), layoutFlags);
510 }
511 }
512 else
513 {
514 DE_ASSERT(type.isStructType());
515
516 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
517 computeStd140Layout(layout, curOffset, curBlockNdx, curPrefix + "." + memberIter->getName(), memberIter->getType(), layoutFlags);
518
519 curOffset = deAlign32(curOffset, baseAlignment);
520 }
521 }
522
computeStd140Layout(UniformLayout & layout,const ShaderInterface & interface)523 void computeStd140Layout (UniformLayout& layout, const ShaderInterface& interface)
524 {
525 int numUniformBlocks = interface.getNumUniformBlocks();
526
527 for (int blockNdx = 0; blockNdx < numUniformBlocks; blockNdx++)
528 {
529 const UniformBlock& block = interface.getUniformBlock(blockNdx);
530 bool hasInstanceName = block.hasInstanceName();
531 std::string blockPrefix = hasInstanceName ? (block.getBlockName() + ".") : "";
532 int curOffset = 0;
533 int activeBlockNdx = (int)layout.blocks.size();
534 int firstUniformNdx = (int)layout.uniforms.size();
535
536 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
537 {
538 const Uniform& uniform = *uniformIter;
539 computeStd140Layout(layout, curOffset, activeBlockNdx, blockPrefix + uniform.getName(), uniform.getType(), mergeLayoutFlags(block.getFlags(), uniform.getFlags()));
540 }
541
542 int uniformIndicesEnd = (int)layout.uniforms.size();
543 int blockSize = curOffset;
544 int numInstances = block.isArray() ? block.getArraySize() : 1;
545
546 // Create block layout entries for each instance.
547 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
548 {
549 // Allocate entry for instance.
550 layout.blocks.push_back(BlockLayoutEntry());
551 BlockLayoutEntry& blockEntry = layout.blocks.back();
552
553 blockEntry.name = block.getBlockName();
554 blockEntry.size = blockSize;
555 blockEntry.bindingNdx = blockNdx;
556 blockEntry.blockDeclarationNdx = blockNdx;
557 blockEntry.instanceNdx = instanceNdx;
558
559 // Compute active uniform set for block.
560 for (int uniformNdx = firstUniformNdx; uniformNdx < uniformIndicesEnd; uniformNdx++)
561 blockEntry.activeUniformIndices.push_back(uniformNdx);
562
563 if (block.isArray())
564 blockEntry.name += "[" + de::toString(instanceNdx) + "]";
565 }
566 }
567 }
568
569 // Value generator.
570
generateValue(const UniformLayoutEntry & entry,void * basePtr,de::Random & rnd)571 void generateValue (const UniformLayoutEntry& entry, void* basePtr, de::Random& rnd)
572 {
573 glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
574 int scalarSize = glu::getDataTypeScalarSize(entry.type);
575 bool isMatrix = glu::isDataTypeMatrix(entry.type);
576 int numVecs = isMatrix ? (entry.isRowMajor ? glu::getDataTypeMatrixNumRows(entry.type) : glu::getDataTypeMatrixNumColumns(entry.type)) : 1;
577 int vecSize = scalarSize / numVecs;
578 bool isArray = entry.size > 1;
579 const int compSize = sizeof(deUint32);
580
581 DE_ASSERT(scalarSize%numVecs == 0);
582
583 for (int elemNdx = 0; elemNdx < entry.size; elemNdx++)
584 {
585 deUint8* elemPtr = (deUint8*)basePtr + entry.offset + (isArray ? elemNdx*entry.arrayStride : 0);
586
587 for (int vecNdx = 0; vecNdx < numVecs; vecNdx++)
588 {
589 deUint8* vecPtr = elemPtr + (isMatrix ? vecNdx*entry.matrixStride : 0);
590
591 for (int compNdx = 0; compNdx < vecSize; compNdx++)
592 {
593 deUint8* compPtr = vecPtr + compSize*compNdx;
594
595 switch (scalarType)
596 {
597 case glu::TYPE_FLOAT: *((float*)compPtr) = (float)rnd.getInt(-9, 9); break;
598 case glu::TYPE_INT: *((int*)compPtr) = rnd.getInt(-9, 9); break;
599 case glu::TYPE_UINT: *((deUint32*)compPtr) = (deUint32)rnd.getInt(0, 9); break;
600 // \note Random bit pattern is used for true values. Spec states that all non-zero values are
601 // interpreted as true but some implementations fail this.
602 case glu::TYPE_BOOL: *((deUint32*)compPtr) = rnd.getBool() ? rnd.getUint32()|1u : 0u; break;
603 default:
604 DE_ASSERT(false);
605 }
606 }
607 }
608 }
609 }
610
generateValues(const UniformLayout & layout,const std::map<int,void * > & blockPointers,deUint32 seed)611 void generateValues (const UniformLayout& layout, const std::map<int, void*>& blockPointers, deUint32 seed)
612 {
613 de::Random rnd (seed);
614 int numBlocks = (int)layout.blocks.size();
615
616 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
617 {
618 void* basePtr = blockPointers.find(blockNdx)->second;
619 int numEntries = (int)layout.blocks[blockNdx].activeUniformIndices.size();
620
621 for (int entryNdx = 0; entryNdx < numEntries; entryNdx++)
622 {
623 const UniformLayoutEntry& entry = layout.uniforms[layout.blocks[blockNdx].activeUniformIndices[entryNdx]];
624 generateValue(entry, basePtr, rnd);
625 }
626 }
627 }
628
629 // Shader generator.
630
getCompareFuncForType(glu::DataType type)631 const char* getCompareFuncForType (glu::DataType type)
632 {
633 switch (type)
634 {
635 case glu::TYPE_FLOAT: return "mediump float compare_float (highp float a, highp float b) { return abs(a - b) < 0.05 ? 1.0 : 0.0; }\n";
636 case glu::TYPE_FLOAT_VEC2: return "mediump float compare_vec2 (highp vec2 a, highp vec2 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y); }\n";
637 case glu::TYPE_FLOAT_VEC3: return "mediump float compare_vec3 (highp vec3 a, highp vec3 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z); }\n";
638 case glu::TYPE_FLOAT_VEC4: return "mediump float compare_vec4 (highp vec4 a, highp vec4 b) { return compare_float(a.x, b.x)*compare_float(a.y, b.y)*compare_float(a.z, b.z)*compare_float(a.w, b.w); }\n";
639 case glu::TYPE_FLOAT_MAT2: return "mediump float compare_mat2 (highp mat2 a, highp mat2 b) { return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1]); }\n";
640 case glu::TYPE_FLOAT_MAT2X3: return "mediump float compare_mat2x3 (highp mat2x3 a, highp mat2x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1]); }\n";
641 case glu::TYPE_FLOAT_MAT2X4: return "mediump float compare_mat2x4 (highp mat2x4 a, highp mat2x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1]); }\n";
642 case glu::TYPE_FLOAT_MAT3X2: return "mediump float compare_mat3x2 (highp mat3x2 a, highp mat3x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2]); }\n";
643 case glu::TYPE_FLOAT_MAT3: return "mediump float compare_mat3 (highp mat3 a, highp mat3 b) { return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2]); }\n";
644 case glu::TYPE_FLOAT_MAT3X4: return "mediump float compare_mat3x4 (highp mat3x4 a, highp mat3x4 b){ return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2]); }\n";
645 case glu::TYPE_FLOAT_MAT4X2: return "mediump float compare_mat4x2 (highp mat4x2 a, highp mat4x2 b){ return compare_vec2(a[0], b[0])*compare_vec2(a[1], b[1])*compare_vec2(a[2], b[2])*compare_vec2(a[3], b[3]); }\n";
646 case glu::TYPE_FLOAT_MAT4X3: return "mediump float compare_mat4x3 (highp mat4x3 a, highp mat4x3 b){ return compare_vec3(a[0], b[0])*compare_vec3(a[1], b[1])*compare_vec3(a[2], b[2])*compare_vec3(a[3], b[3]); }\n";
647 case glu::TYPE_FLOAT_MAT4: return "mediump float compare_mat4 (highp mat4 a, highp mat4 b) { return compare_vec4(a[0], b[0])*compare_vec4(a[1], b[1])*compare_vec4(a[2], b[2])*compare_vec4(a[3], b[3]); }\n";
648 case glu::TYPE_INT: return "mediump float compare_int (highp int a, highp int b) { return a == b ? 1.0 : 0.0; }\n";
649 case glu::TYPE_INT_VEC2: return "mediump float compare_ivec2 (highp ivec2 a, highp ivec2 b) { return a == b ? 1.0 : 0.0; }\n";
650 case glu::TYPE_INT_VEC3: return "mediump float compare_ivec3 (highp ivec3 a, highp ivec3 b) { return a == b ? 1.0 : 0.0; }\n";
651 case glu::TYPE_INT_VEC4: return "mediump float compare_ivec4 (highp ivec4 a, highp ivec4 b) { return a == b ? 1.0 : 0.0; }\n";
652 case glu::TYPE_UINT: return "mediump float compare_uint (highp uint a, highp uint b) { return a == b ? 1.0 : 0.0; }\n";
653 case glu::TYPE_UINT_VEC2: return "mediump float compare_uvec2 (highp uvec2 a, highp uvec2 b) { return a == b ? 1.0 : 0.0; }\n";
654 case glu::TYPE_UINT_VEC3: return "mediump float compare_uvec3 (highp uvec3 a, highp uvec3 b) { return a == b ? 1.0 : 0.0; }\n";
655 case glu::TYPE_UINT_VEC4: return "mediump float compare_uvec4 (highp uvec4 a, highp uvec4 b) { return a == b ? 1.0 : 0.0; }\n";
656 case glu::TYPE_BOOL: return "mediump float compare_bool (bool a, bool b) { return a == b ? 1.0 : 0.0; }\n";
657 case glu::TYPE_BOOL_VEC2: return "mediump float compare_bvec2 (bvec2 a, bvec2 b) { return a == b ? 1.0 : 0.0; }\n";
658 case glu::TYPE_BOOL_VEC3: return "mediump float compare_bvec3 (bvec3 a, bvec3 b) { return a == b ? 1.0 : 0.0; }\n";
659 case glu::TYPE_BOOL_VEC4: return "mediump float compare_bvec4 (bvec4 a, bvec4 b) { return a == b ? 1.0 : 0.0; }\n";
660 default:
661 DE_ASSERT(false);
662 return DE_NULL;
663 }
664 }
665
getCompareDependencies(std::set<glu::DataType> & compareFuncs,glu::DataType basicType)666 void getCompareDependencies (std::set<glu::DataType>& compareFuncs, glu::DataType basicType)
667 {
668 switch (basicType)
669 {
670 case glu::TYPE_FLOAT_VEC2:
671 case glu::TYPE_FLOAT_VEC3:
672 case glu::TYPE_FLOAT_VEC4:
673 compareFuncs.insert(glu::TYPE_FLOAT);
674 compareFuncs.insert(basicType);
675 break;
676
677 case glu::TYPE_FLOAT_MAT2:
678 case glu::TYPE_FLOAT_MAT2X3:
679 case glu::TYPE_FLOAT_MAT2X4:
680 case glu::TYPE_FLOAT_MAT3X2:
681 case glu::TYPE_FLOAT_MAT3:
682 case glu::TYPE_FLOAT_MAT3X4:
683 case glu::TYPE_FLOAT_MAT4X2:
684 case glu::TYPE_FLOAT_MAT4X3:
685 case glu::TYPE_FLOAT_MAT4:
686 compareFuncs.insert(glu::TYPE_FLOAT);
687 compareFuncs.insert(glu::getDataTypeFloatVec(glu::getDataTypeMatrixNumRows(basicType)));
688 compareFuncs.insert(basicType);
689 break;
690
691 default:
692 compareFuncs.insert(basicType);
693 break;
694 }
695 }
696
collectUniqueBasicTypes(std::set<glu::DataType> & basicTypes,const VarType & type)697 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const VarType& type)
698 {
699 if (type.isStructType())
700 {
701 for (StructType::ConstIterator iter = type.getStruct().begin(); iter != type.getStruct().end(); ++iter)
702 collectUniqueBasicTypes(basicTypes, iter->getType());
703 }
704 else if (type.isArrayType())
705 collectUniqueBasicTypes(basicTypes, type.getElementType());
706 else
707 {
708 DE_ASSERT(type.isBasicType());
709 basicTypes.insert(type.getBasicType());
710 }
711 }
712
collectUniqueBasicTypes(std::set<glu::DataType> & basicTypes,const UniformBlock & uniformBlock)713 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const UniformBlock& uniformBlock)
714 {
715 for (UniformBlock::ConstIterator iter = uniformBlock.begin(); iter != uniformBlock.end(); ++iter)
716 collectUniqueBasicTypes(basicTypes, iter->getType());
717 }
718
collectUniqueBasicTypes(std::set<glu::DataType> & basicTypes,const ShaderInterface & interface)719 void collectUniqueBasicTypes (std::set<glu::DataType>& basicTypes, const ShaderInterface& interface)
720 {
721 for (int ndx = 0; ndx < interface.getNumUniformBlocks(); ++ndx)
722 collectUniqueBasicTypes(basicTypes, interface.getUniformBlock(ndx));
723 }
724
generateCompareFuncs(std::ostream & str,const ShaderInterface & interface)725 void generateCompareFuncs (std::ostream& str, const ShaderInterface& interface)
726 {
727 std::set<glu::DataType> types;
728 std::set<glu::DataType> compareFuncs;
729
730 // Collect unique basic types
731 collectUniqueBasicTypes(types, interface);
732
733 // Set of compare functions required
734 for (std::set<glu::DataType>::const_iterator iter = types.begin(); iter != types.end(); ++iter)
735 {
736 getCompareDependencies(compareFuncs, *iter);
737 }
738
739 for (int type = 0; type < glu::TYPE_LAST; ++type)
740 {
741 if (compareFuncs.find(glu::DataType(type)) != compareFuncs.end())
742 str << getCompareFuncForType(glu::DataType(type));
743 }
744 }
745
746 struct Indent
747 {
748 int level;
Indentvkt::ubo::__anon0393f7100111::Indent749 Indent (int level_) : level(level_) {}
750 };
751
operator <<(std::ostream & str,const Indent & indent)752 std::ostream& operator<< (std::ostream& str, const Indent& indent)
753 {
754 for (int i = 0; i < indent.level; i++)
755 str << "\t";
756 return str;
757 }
758
759 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints);
760 void generateDeclaration (std::ostringstream& src, const Uniform& uniform, int indentLevel);
761 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
762
763 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
764 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel);
765
generateDeclaration(std::ostringstream & src,const StructType & structType,int indentLevel)766 void generateDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
767 {
768 DE_ASSERT(structType.hasTypeName());
769 generateFullDeclaration(src, structType, indentLevel);
770 src << ";\n";
771 }
772
generateFullDeclaration(std::ostringstream & src,const StructType & structType,int indentLevel)773 void generateFullDeclaration (std::ostringstream& src, const StructType& structType, int indentLevel)
774 {
775 src << "struct";
776 if (structType.hasTypeName())
777 src << " " << structType.getTypeName();
778 src << "\n" << Indent(indentLevel) << "{\n";
779
780 for (StructType::ConstIterator memberIter = structType.begin(); memberIter != structType.end(); memberIter++)
781 {
782 src << Indent(indentLevel + 1);
783 generateDeclaration(src, memberIter->getType(), memberIter->getName(), indentLevel + 1, memberIter->getFlags() & UNUSED_BOTH);
784 }
785
786 src << Indent(indentLevel) << "}";
787 }
788
generateLocalDeclaration(std::ostringstream & src,const StructType & structType,int)789 void generateLocalDeclaration (std::ostringstream& src, const StructType& structType, int /* indentLevel */)
790 {
791 src << structType.getTypeName();
792 }
793
generateDeclaration(std::ostringstream & src,const VarType & type,const std::string & name,int indentLevel,deUint32 unusedHints)794 void generateDeclaration (std::ostringstream& src, const VarType& type, const std::string& name, int indentLevel, deUint32 unusedHints)
795 {
796 deUint32 flags = type.getFlags();
797
798 if ((flags & LAYOUT_MASK) != 0)
799 src << "layout(" << LayoutFlagsFmt(flags & LAYOUT_MASK) << ") ";
800
801 if ((flags & PRECISION_MASK) != 0)
802 src << PrecisionFlagsFmt(flags & PRECISION_MASK) << " ";
803
804 if (type.isBasicType())
805 src << glu::getDataTypeName(type.getBasicType()) << " " << name;
806 else if (type.isArrayType())
807 {
808 std::vector<int> arraySizes;
809 const VarType* curType = &type;
810 while (curType->isArrayType())
811 {
812 arraySizes.push_back(curType->getArraySize());
813 curType = &curType->getElementType();
814 }
815
816 if (curType->isBasicType())
817 {
818 if ((curType->getFlags() & PRECISION_MASK) != 0)
819 src << PrecisionFlagsFmt(curType->getFlags() & PRECISION_MASK) << " ";
820 src << glu::getDataTypeName(curType->getBasicType());
821 }
822 else
823 {
824 DE_ASSERT(curType->isStructType());
825 generateLocalDeclaration(src, curType->getStruct(), indentLevel+1);
826 }
827
828 src << " " << name;
829
830 for (std::vector<int>::const_iterator sizeIter = arraySizes.begin(); sizeIter != arraySizes.end(); sizeIter++)
831 src << "[" << *sizeIter << "]";
832 }
833 else
834 {
835 generateLocalDeclaration(src, type.getStruct(), indentLevel+1);
836 src << " " << name;
837 }
838
839 src << ";";
840
841 // Print out unused hints.
842 if (unusedHints != 0)
843 src << " // unused in " << (unusedHints == UNUSED_BOTH ? "both shaders" :
844 unusedHints == UNUSED_VERTEX ? "vertex shader" :
845 unusedHints == UNUSED_FRAGMENT ? "fragment shader" : "???");
846
847 src << "\n";
848 }
849
generateDeclaration(std::ostringstream & src,const Uniform & uniform,int indentLevel)850 void generateDeclaration (std::ostringstream& src, const Uniform& uniform, int indentLevel)
851 {
852 if ((uniform.getFlags() & LAYOUT_MASK) != 0)
853 src << "layout(" << LayoutFlagsFmt(uniform.getFlags() & LAYOUT_MASK) << ") ";
854
855 generateDeclaration(src, uniform.getType(), uniform.getName(), indentLevel, uniform.getFlags() & UNUSED_BOTH);
856 }
857
generateDeclaration(std::ostringstream & src,int blockNdx,const UniformBlock & block)858 void generateDeclaration (std::ostringstream& src, int blockNdx, const UniformBlock& block)
859 {
860 src << "layout(set = 0, binding = " << blockNdx;
861 if ((block.getFlags() & LAYOUT_MASK) != 0)
862 src << ", " << LayoutFlagsFmt(block.getFlags() & LAYOUT_MASK);
863 src << ") ";
864
865 src << "uniform " << block.getBlockName();
866 src << "\n{\n";
867
868 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
869 {
870 src << Indent(1);
871 generateDeclaration(src, *uniformIter, 1 /* indent level */);
872 }
873
874 src << "}";
875
876 if (block.hasInstanceName())
877 {
878 src << " " << block.getInstanceName();
879 if (block.isArray())
880 src << "[" << block.getArraySize() << "]";
881 }
882 else
883 DE_ASSERT(!block.isArray());
884
885 src << ";\n";
886 }
887
generateValueSrc(std::ostringstream & src,const UniformLayoutEntry & entry,const void * basePtr,int elementNdx)888 void generateValueSrc (std::ostringstream& src, const UniformLayoutEntry& entry, const void* basePtr, int elementNdx)
889 {
890 glu::DataType scalarType = glu::getDataTypeScalarType(entry.type);
891 int scalarSize = glu::getDataTypeScalarSize(entry.type);
892 bool isArray = entry.size > 1;
893 const deUint8* elemPtr = (const deUint8*)basePtr + entry.offset + (isArray ? elementNdx * entry.arrayStride : 0);
894 const int compSize = sizeof(deUint32);
895
896 if (scalarSize > 1)
897 src << glu::getDataTypeName(entry.type) << "(";
898
899 if (glu::isDataTypeMatrix(entry.type))
900 {
901 int numRows = glu::getDataTypeMatrixNumRows(entry.type);
902 int numCols = glu::getDataTypeMatrixNumColumns(entry.type);
903
904 DE_ASSERT(scalarType == glu::TYPE_FLOAT);
905
906 // Constructed in column-wise order.
907 for (int colNdx = 0; colNdx < numCols; colNdx++)
908 {
909 for (int rowNdx = 0; rowNdx < numRows; rowNdx++)
910 {
911 const deUint8* compPtr = elemPtr + (entry.isRowMajor ? (rowNdx * entry.matrixStride + colNdx * compSize)
912 : (colNdx * entry.matrixStride + rowNdx * compSize));
913
914 if (colNdx > 0 || rowNdx > 0)
915 src << ", ";
916
917 src << de::floatToString(*((const float*)compPtr), 1);
918 }
919 }
920 }
921 else
922 {
923 for (int scalarNdx = 0; scalarNdx < scalarSize; scalarNdx++)
924 {
925 const deUint8* compPtr = elemPtr + scalarNdx * compSize;
926
927 if (scalarNdx > 0)
928 src << ", ";
929
930 switch (scalarType)
931 {
932 case glu::TYPE_FLOAT: src << de::floatToString(*((const float*)compPtr), 1); break;
933 case glu::TYPE_INT: src << *((const int*)compPtr); break;
934 case glu::TYPE_UINT: src << *((const deUint32*)compPtr) << "u"; break;
935 case glu::TYPE_BOOL: src << (*((const deUint32*)compPtr) != 0u ? "true" : "false"); break;
936 default:
937 DE_ASSERT(false);
938 }
939 }
940 }
941
942 if (scalarSize > 1)
943 src << ")";
944 }
945
generateCompareSrc(std::ostringstream & src,const char * resultVar,const VarType & type,const std::string & srcName,const std::string & apiName,const UniformLayout & layout,int blockNdx,const void * basePtr,deUint32 unusedMask)946 void generateCompareSrc (std::ostringstream& src,
947 const char* resultVar,
948 const VarType& type,
949 const std::string& srcName,
950 const std::string& apiName,
951 const UniformLayout& layout,
952 int blockNdx,
953 const void* basePtr,
954 deUint32 unusedMask)
955 {
956 if (type.isBasicType() || (type.isArrayType() && type.getElementType().isBasicType()))
957 {
958 // Basic type or array of basic types.
959 bool isArray = type.isArrayType();
960 glu::DataType elementType = isArray ? type.getElementType().getBasicType() : type.getBasicType();
961 const char* typeName = glu::getDataTypeName(elementType);
962 std::string fullApiName = std::string(apiName) + (isArray ? "[0]" : ""); // Arrays are always postfixed with [0]
963 int uniformNdx = layout.getUniformLayoutIndex(blockNdx, fullApiName);
964 const UniformLayoutEntry& entry = layout.uniforms[uniformNdx];
965
966 if (isArray)
967 {
968 for (int elemNdx = 0; elemNdx < type.getArraySize(); elemNdx++)
969 {
970 src << "\tresult *= compare_" << typeName << "(" << srcName << "[" << elemNdx << "], ";
971 generateValueSrc(src, entry, basePtr, elemNdx);
972 src << ");\n";
973 }
974 }
975 else
976 {
977 src << "\tresult *= compare_" << typeName << "(" << srcName << ", ";
978 generateValueSrc(src, entry, basePtr, 0);
979 src << ");\n";
980 }
981 }
982 else if (type.isArrayType())
983 {
984 const VarType& elementType = type.getElementType();
985
986 for (int elementNdx = 0; elementNdx < type.getArraySize(); elementNdx++)
987 {
988 std::string op = std::string("[") + de::toString(elementNdx) + "]";
989 std::string elementSrcName = std::string(srcName) + op;
990 std::string elementApiName = std::string(apiName) + op;
991 generateCompareSrc(src, resultVar, elementType, elementSrcName, elementApiName, layout, blockNdx, basePtr, unusedMask);
992 }
993 }
994 else
995 {
996 DE_ASSERT(type.isStructType());
997
998 for (StructType::ConstIterator memberIter = type.getStruct().begin(); memberIter != type.getStruct().end(); memberIter++)
999 {
1000 if (memberIter->getFlags() & unusedMask)
1001 continue; // Skip member.
1002
1003 std::string op = std::string(".") + memberIter->getName();
1004 std::string memberSrcName = std::string(srcName) + op;
1005 std::string memberApiName = std::string(apiName) + op;
1006 generateCompareSrc(src, resultVar, memberIter->getType(), memberSrcName, memberApiName, layout, blockNdx, basePtr, unusedMask);
1007 }
1008 }
1009 }
1010
generateCompareSrc(std::ostringstream & src,const char * resultVar,const ShaderInterface & interface,const UniformLayout & layout,const std::map<int,void * > & blockPointers,bool isVertex)1011 void generateCompareSrc (std::ostringstream& src, const char* resultVar, const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers, bool isVertex)
1012 {
1013 deUint32 unusedMask = isVertex ? UNUSED_VERTEX : UNUSED_FRAGMENT;
1014
1015 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1016 {
1017 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1018
1019 if ((block.getFlags() & (isVertex ? DECLARE_VERTEX : DECLARE_FRAGMENT)) == 0)
1020 continue; // Skip.
1021
1022 bool hasInstanceName = block.hasInstanceName();
1023 bool isArray = block.isArray();
1024 int numInstances = isArray ? block.getArraySize() : 1;
1025 std::string apiPrefix = hasInstanceName ? block.getBlockName() + "." : std::string("");
1026
1027 DE_ASSERT(!isArray || hasInstanceName);
1028
1029 for (int instanceNdx = 0; instanceNdx < numInstances; instanceNdx++)
1030 {
1031 std::string instancePostfix = isArray ? std::string("[") + de::toString(instanceNdx) + "]" : std::string("");
1032 std::string blockInstanceName = block.getBlockName() + instancePostfix;
1033 std::string srcPrefix = hasInstanceName ? block.getInstanceName() + instancePostfix + "." : std::string("");
1034 int blockLayoutNdx = layout.getBlockLayoutIndex(blockNdx, instanceNdx);
1035 void* basePtr = blockPointers.find(blockLayoutNdx)->second;
1036
1037 for (UniformBlock::ConstIterator uniformIter = block.begin(); uniformIter != block.end(); uniformIter++)
1038 {
1039 const Uniform& uniform = *uniformIter;
1040
1041 if (uniform.getFlags() & unusedMask)
1042 continue; // Don't read from that uniform.
1043
1044 std::string srcName = srcPrefix + uniform.getName();
1045 std::string apiName = apiPrefix + uniform.getName();
1046 generateCompareSrc(src, resultVar, uniform.getType(), srcName, apiName, layout, blockNdx, basePtr, unusedMask);
1047 }
1048 }
1049 }
1050 }
1051
generateVertexShader(const ShaderInterface & interface,const UniformLayout & layout,const std::map<int,void * > & blockPointers)1052 std::string generateVertexShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers)
1053 {
1054 std::ostringstream src;
1055 src << "#version 450\n";
1056
1057 src << "layout(location = 0) in highp vec4 a_position;\n";
1058 src << "layout(location = 0) out mediump float v_vtxResult;\n";
1059 src << "\n";
1060
1061 std::vector<const StructType*> namedStructs;
1062 interface.getNamedStructs(namedStructs);
1063 for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1064 generateDeclaration(src, **structIter, 0);
1065
1066 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1067 {
1068 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1069 if (block.getFlags() & DECLARE_VERTEX)
1070 generateDeclaration(src, blockNdx, block);
1071 }
1072
1073 // Comparison utilities.
1074 src << "\n";
1075 generateCompareFuncs(src, interface);
1076
1077 src << "\n"
1078 "void main (void)\n"
1079 "{\n"
1080 " gl_Position = a_position;\n"
1081 " mediump float result = 1.0;\n";
1082
1083 // Value compare.
1084 generateCompareSrc(src, "result", interface, layout, blockPointers, true);
1085
1086 src << " v_vtxResult = result;\n"
1087 "}\n";
1088
1089 return src.str();
1090 }
1091
generateFragmentShader(const ShaderInterface & interface,const UniformLayout & layout,const std::map<int,void * > & blockPointers)1092 std::string generateFragmentShader (const ShaderInterface& interface, const UniformLayout& layout, const std::map<int, void*>& blockPointers)
1093 {
1094 std::ostringstream src;
1095 src << "#version 450\n";
1096
1097 src << "layout(location = 0) in mediump float v_vtxResult;\n";
1098 src << "layout(location = 0) out mediump vec4 dEQP_FragColor;\n";
1099 src << "\n";
1100
1101 std::vector<const StructType*> namedStructs;
1102 interface.getNamedStructs(namedStructs);
1103 for (std::vector<const StructType*>::const_iterator structIter = namedStructs.begin(); structIter != namedStructs.end(); structIter++)
1104 generateDeclaration(src, **structIter, 0);
1105
1106 for (int blockNdx = 0; blockNdx < interface.getNumUniformBlocks(); blockNdx++)
1107 {
1108 const UniformBlock& block = interface.getUniformBlock(blockNdx);
1109 if (block.getFlags() & DECLARE_FRAGMENT)
1110 generateDeclaration(src, blockNdx, block);
1111 }
1112
1113 // Comparison utilities.
1114 src << "\n";
1115 generateCompareFuncs(src, interface);
1116
1117 src << "\n"
1118 "void main (void)\n"
1119 "{\n"
1120 " mediump float result = 1.0;\n";
1121
1122 // Value compare.
1123 generateCompareSrc(src, "result", interface, layout, blockPointers, false);
1124
1125 src << " dEQP_FragColor = vec4(1.0, v_vtxResult, result, 1.0);\n"
1126 "}\n";
1127
1128 return src.str();
1129 }
1130
createBuffer(Context & context,VkDeviceSize bufferSize,vk::VkBufferUsageFlags usageFlags)1131 Move<VkBuffer> createBuffer (Context& context, VkDeviceSize bufferSize, vk::VkBufferUsageFlags usageFlags)
1132 {
1133 const VkDevice vkDevice = context.getDevice();
1134 const DeviceInterface& vk = context.getDeviceInterface();
1135 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1136
1137 const VkBufferCreateInfo bufferInfo =
1138 {
1139 VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO, // VkStructureType sType;
1140 DE_NULL, // const void* pNext;
1141 0u, // VkBufferCreateFlags flags;
1142 bufferSize, // VkDeviceSize size;
1143 usageFlags, // VkBufferUsageFlags usage;
1144 VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode;
1145 1u, // deUint32 queueFamilyIndexCount;
1146 &queueFamilyIndex // const deUint32* pQueueFamilyIndices;
1147 };
1148
1149 return vk::createBuffer(vk, vkDevice, &bufferInfo);
1150 }
1151
createImage2D(Context & context,deUint32 width,deUint32 height,vk::VkFormat format,vk::VkImageTiling tiling,vk::VkImageUsageFlags usageFlags)1152 Move<vk::VkImage> createImage2D (Context& context, deUint32 width, deUint32 height, vk::VkFormat format, vk::VkImageTiling tiling, vk::VkImageUsageFlags usageFlags)
1153 {
1154 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1155 const vk::VkImageCreateInfo params =
1156 {
1157 vk::VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO, // VkStructureType sType
1158 DE_NULL, // const void* pNext
1159 0u, // VkImageCreateFlags flags
1160 vk::VK_IMAGE_TYPE_2D, // VkImageType imageType
1161 format, // VkFormat format
1162 { width, height, 1u }, // VkExtent3D extent
1163 1u, // deUint32 mipLevels
1164 1u, // deUint32 arrayLayers
1165 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples
1166 tiling, // VkImageTiling tiling
1167 usageFlags, // VkImageUsageFlags usage
1168 vk::VK_SHARING_MODE_EXCLUSIVE, // VkSharingMode sharingMode
1169 1u, // deUint32 queueFamilyIndexCount
1170 &queueFamilyIndex, // const deUint32* pQueueFamilyIndices
1171 vk::VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout initialLayout
1172 };
1173
1174 return vk::createImage(context.getDeviceInterface(), context.getDevice(), ¶ms);
1175 }
1176
allocateAndBindMemory(Context & context,vk::VkBuffer buffer,vk::MemoryRequirement memReqs)1177 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkBuffer buffer, vk::MemoryRequirement memReqs)
1178 {
1179 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1180 const vk::VkMemoryRequirements bufReqs = vk::getBufferMemoryRequirements(vkd, context.getDevice(), buffer);
1181 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(bufReqs, memReqs);
1182
1183 vkd.bindBufferMemory(context.getDevice(), buffer, memory->getMemory(), memory->getOffset());
1184
1185 return memory;
1186 }
1187
allocateAndBindMemory(Context & context,vk::VkImage image,vk::MemoryRequirement memReqs)1188 de::MovePtr<vk::Allocation> allocateAndBindMemory (Context& context, vk::VkImage image, vk::MemoryRequirement memReqs)
1189 {
1190 const vk::DeviceInterface& vkd = context.getDeviceInterface();
1191 const vk::VkMemoryRequirements imgReqs = vk::getImageMemoryRequirements(vkd, context.getDevice(), image);
1192 de::MovePtr<vk::Allocation> memory = context.getDefaultAllocator().allocate(imgReqs, memReqs);
1193
1194 vkd.bindImageMemory(context.getDevice(), image, memory->getMemory(), memory->getOffset());
1195
1196 return memory;
1197 }
1198
createAttachmentView(Context & context,vk::VkImage image,vk::VkFormat format)1199 Move<vk::VkImageView> createAttachmentView (Context& context, vk::VkImage image, vk::VkFormat format)
1200 {
1201 const vk::VkImageViewCreateInfo params =
1202 {
1203 vk::VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO, // sType
1204 DE_NULL, // pNext
1205 0u, // flags
1206 image, // image
1207 vk::VK_IMAGE_VIEW_TYPE_2D, // viewType
1208 format, // format
1209 vk::makeComponentMappingRGBA(), // components
1210 { vk::VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u,1u }, // subresourceRange
1211 };
1212
1213 return vk::createImageView(context.getDeviceInterface(), context.getDevice(), ¶ms);
1214 }
1215
createPipelineLayout(Context & context,vk::VkDescriptorSetLayout descriptorSetLayout)1216 Move<vk::VkPipelineLayout> createPipelineLayout (Context& context, vk::VkDescriptorSetLayout descriptorSetLayout)
1217 {
1218 const vk::VkPipelineLayoutCreateInfo params =
1219 {
1220 vk::VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO, // sType
1221 DE_NULL, // pNext
1222 0u, // flags
1223 1u, // setLayoutCount
1224 &descriptorSetLayout, // pSetLayouts
1225 0u, // pushConstantRangeCount
1226 DE_NULL, // pPushConstantRanges
1227 };
1228
1229 return vk::createPipelineLayout(context.getDeviceInterface(), context.getDevice(), ¶ms);
1230 }
1231
createCmdPool(Context & context)1232 Move<vk::VkCommandPool> createCmdPool (Context& context)
1233 {
1234 const deUint32 queueFamilyIndex = context.getUniversalQueueFamilyIndex();
1235 const vk::VkCommandPoolCreateInfo params =
1236 {
1237 vk::VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, // sType
1238 DE_NULL, // pNext
1239 vk::VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT, // flags
1240 queueFamilyIndex, // queueFamilyIndex
1241 };
1242
1243 return vk::createCommandPool(context.getDeviceInterface(), context.getDevice(), ¶ms);
1244 }
1245
createCmdBuffer(Context & context,vk::VkCommandPool cmdPool)1246 Move<vk::VkCommandBuffer> createCmdBuffer (Context& context, vk::VkCommandPool cmdPool)
1247 {
1248 const vk::VkCommandBufferAllocateInfo params =
1249 {
1250 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO, // sType
1251 DE_NULL, // pNext
1252 cmdPool, // commandPool
1253 vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY, // level
1254 1u, // bufferCount
1255 };
1256
1257 return vk::allocateCommandBuffer(context.getDeviceInterface(), context.getDevice(), ¶ms);
1258 }
1259
1260
1261 // UniformBlockCaseInstance
1262
1263 class UniformBlockCaseInstance : public vkt::TestInstance
1264 {
1265 public:
1266 UniformBlockCaseInstance (Context& context,
1267 UniformBlockCase::BufferMode bufferMode,
1268 const UniformLayout& layout,
1269 const std::map<int, void*>& blockPointers);
1270 virtual ~UniformBlockCaseInstance (void);
1271 virtual tcu::TestStatus iterate (void);
1272
1273 private:
1274 enum
1275 {
1276 RENDER_WIDTH = 100,
1277 RENDER_HEIGHT = 100,
1278 };
1279
1280 vk::Move<VkRenderPass> createRenderPass (vk::VkFormat format) const;
1281 vk::Move<VkFramebuffer> createFramebuffer (vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const;
1282 vk::Move<VkDescriptorSetLayout> createDescriptorSetLayout (void) const;
1283 vk::Move<VkDescriptorPool> createDescriptorPool (void) const;
1284 vk::Move<VkPipeline> createPipeline (vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const;
1285
1286 vk::VkDescriptorBufferInfo addUniformData (deUint32 size, const void* dataPtr);
1287
1288 UniformBlockCase::BufferMode m_bufferMode;
1289 const UniformLayout& m_layout;
1290 const std::map<int, void*>& m_blockPointers;
1291
1292 typedef de::SharedPtr<vk::Unique<vk::VkBuffer> > VkBufferSp;
1293 typedef de::SharedPtr<vk::Allocation> AllocationSp;
1294
1295 std::vector<VkBufferSp> m_uniformBuffers;
1296 std::vector<AllocationSp> m_uniformAllocs;
1297 };
1298
UniformBlockCaseInstance(Context & ctx,UniformBlockCase::BufferMode bufferMode,const UniformLayout & layout,const std::map<int,void * > & blockPointers)1299 UniformBlockCaseInstance::UniformBlockCaseInstance (Context& ctx,
1300 UniformBlockCase::BufferMode bufferMode,
1301 const UniformLayout& layout,
1302 const std::map<int, void*>& blockPointers)
1303 : vkt::TestInstance (ctx)
1304 , m_bufferMode (bufferMode)
1305 , m_layout (layout)
1306 , m_blockPointers (blockPointers)
1307 {
1308 }
1309
~UniformBlockCaseInstance(void)1310 UniformBlockCaseInstance::~UniformBlockCaseInstance (void)
1311 {
1312 }
1313
iterate(void)1314 tcu::TestStatus UniformBlockCaseInstance::iterate (void)
1315 {
1316 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
1317 const vk::VkDevice device = m_context.getDevice();
1318 const vk::VkQueue queue = m_context.getUniversalQueue();
1319 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
1320
1321 const float positions[] =
1322 {
1323 -1.0f, -1.0f, 0.0f, 1.0f,
1324 -1.0f, +1.0f, 0.0f, 1.0f,
1325 +1.0f, -1.0f, 0.0f, 1.0f,
1326 +1.0f, +1.0f, 0.0f, 1.0f
1327 };
1328
1329 const deUint32 indices[] = { 0, 1, 2, 2, 1, 3 };
1330
1331 vk::Unique<VkBuffer> positionsBuffer (createBuffer(m_context, sizeof(positions), vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1332 de::UniquePtr<Allocation> positionsAlloc (allocateAndBindMemory(m_context, *positionsBuffer, MemoryRequirement::HostVisible));
1333 vk::Unique<VkBuffer> indicesBuffer (createBuffer(m_context, sizeof(indices), vk::VK_BUFFER_USAGE_INDEX_BUFFER_BIT|vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT));
1334 de::UniquePtr<Allocation> indicesAlloc (allocateAndBindMemory(m_context, *indicesBuffer, MemoryRequirement::HostVisible));
1335
1336 int minUniformBufferOffsetAlignment = getminUniformBufferOffsetAlignment(m_context);
1337
1338 // Upload attrbiutes data
1339 {
1340 deMemcpy(positionsAlloc->getHostPtr(), positions, sizeof(positions));
1341 flushMappedMemoryRange(vk, device, positionsAlloc->getMemory(), positionsAlloc->getOffset(), sizeof(positions));
1342
1343 deMemcpy(indicesAlloc->getHostPtr(), indices, sizeof(indices));
1344 flushMappedMemoryRange(vk, device, indicesAlloc->getMemory(), indicesAlloc->getOffset(), sizeof(indices));
1345 }
1346
1347 vk::Unique<VkImage> colorImage (createImage2D(m_context,
1348 RENDER_WIDTH,
1349 RENDER_HEIGHT,
1350 vk::VK_FORMAT_R8G8B8A8_UNORM,
1351 vk::VK_IMAGE_TILING_OPTIMAL,
1352 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT|vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT));
1353 de::UniquePtr<Allocation> colorImageAlloc (allocateAndBindMemory(m_context, *colorImage, MemoryRequirement::Any));
1354 vk::Unique<VkImageView> colorImageView (createAttachmentView(m_context, *colorImage, vk::VK_FORMAT_R8G8B8A8_UNORM));
1355
1356 vk::Unique<VkDescriptorSetLayout> descriptorSetLayout (createDescriptorSetLayout());
1357 vk::Unique<VkDescriptorPool> descriptorPool (createDescriptorPool());
1358
1359 const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo =
1360 {
1361 VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO, // VkStructureType sType;
1362 DE_NULL, // const void* pNext;
1363 *descriptorPool, // VkDescriptorPool descriptorPool;
1364 1u, // deUint32 setLayoutCount;
1365 &descriptorSetLayout.get() // const VkDescriptorSetLayout* pSetLayouts;
1366 };
1367
1368 vk::Unique<VkDescriptorSet> descriptorSet(vk::allocateDescriptorSet(vk, device, &descriptorSetAllocateInfo));
1369 int numBlocks = (int)m_layout.blocks.size();
1370 std::vector<vk::VkDescriptorBufferInfo> descriptors(numBlocks);
1371
1372 // Upload uniform data
1373 {
1374 vk::DescriptorSetUpdateBuilder descriptorSetUpdateBuilder;
1375
1376 if (m_bufferMode == UniformBlockCase::BUFFERMODE_PER_BLOCK)
1377 {
1378 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1379 {
1380 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1381 const void* srcPtr = m_blockPointers.find(blockNdx)->second;
1382
1383 descriptors[blockNdx] = addUniformData(block.size, srcPtr);
1384 descriptorSetUpdateBuilder.writeSingle(*descriptorSet, vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1385 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptors[blockNdx]);
1386 }
1387 }
1388 else
1389 {
1390 int currentOffset = 0;
1391 std::map<int, int> offsets;
1392 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1393 {
1394 if (minUniformBufferOffsetAlignment > 0)
1395 currentOffset = deAlign32(currentOffset, minUniformBufferOffsetAlignment);
1396 offsets[blockNdx] = currentOffset;
1397 currentOffset += m_layout.blocks[blockNdx].size;
1398 }
1399
1400 deUint32 totalSize = currentOffset;
1401
1402 // Make a copy of the data that satisfies the device's min uniform buffer alignment
1403 std::vector<deUint8> data;
1404 data.resize(totalSize);
1405 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1406 {
1407 deMemcpy(&data[offsets[blockNdx]], m_blockPointers.find(blockNdx)->second, m_layout.blocks[blockNdx].size);
1408 }
1409
1410 vk::VkBuffer buffer = addUniformData(totalSize, &data[0]).buffer;
1411
1412 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1413 {
1414 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1415 deUint32 size = block.size;
1416
1417 const VkDescriptorBufferInfo descriptor =
1418 {
1419 buffer, // VkBuffer buffer;
1420 (deUint32)offsets[blockNdx], // VkDeviceSize offset;
1421 size, // VkDeviceSize range;
1422 };
1423
1424 descriptors[blockNdx] = descriptor;
1425 descriptorSetUpdateBuilder.writeSingle(*descriptorSet,
1426 vk::DescriptorSetUpdateBuilder::Location::bindingArrayElement(block.bindingNdx, block.instanceNdx),
1427 VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
1428 &descriptors[blockNdx]);
1429 }
1430 }
1431
1432 descriptorSetUpdateBuilder.update(vk, device);
1433 }
1434
1435 vk::Unique<VkRenderPass> renderPass (createRenderPass(vk::VK_FORMAT_R8G8B8A8_UNORM));
1436 vk::Unique<VkFramebuffer> framebuffer (createFramebuffer(*renderPass, *colorImageView));
1437 vk::Unique<VkPipelineLayout> pipelineLayout (createPipelineLayout(m_context, *descriptorSetLayout));
1438
1439 vk::Unique<VkShaderModule> vtxShaderModule (vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
1440 vk::Unique<VkShaderModule> fragShaderModule (vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
1441 vk::Unique<VkPipeline> pipeline (createPipeline(*vtxShaderModule, *fragShaderModule, *pipelineLayout, *renderPass));
1442 vk::Unique<VkCommandPool> cmdPool (createCmdPool(m_context));
1443 vk::Unique<VkCommandBuffer> cmdBuffer (createCmdBuffer(m_context, *cmdPool));
1444 vk::Unique<VkBuffer> readImageBuffer (createBuffer(m_context, (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4), vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT));
1445 de::UniquePtr<Allocation> readImageAlloc (allocateAndBindMemory(m_context, *readImageBuffer, vk::MemoryRequirement::HostVisible));
1446
1447 // Record command buffer
1448 const vk::VkCommandBufferBeginInfo beginInfo =
1449 {
1450 vk::VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, // VkStructureType sType;
1451 DE_NULL, // const void* pNext;
1452 0u, // VkCommandBufferUsageFlags flags;
1453 (const vk::VkCommandBufferInheritanceInfo*)DE_NULL,
1454 };
1455 VK_CHECK(vk.beginCommandBuffer(*cmdBuffer, &beginInfo));
1456
1457 const vk::VkClearValue clearValue = vk::makeClearValueColorF32(0.125f, 0.25f, 0.75f, 1.0f);
1458 const vk::VkRenderPassBeginInfo passBeginInfo =
1459 {
1460 vk::VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO, // VkStructureType sType;
1461 DE_NULL, // const void* pNext;
1462 *renderPass, // VkRenderPass renderPass;
1463 *framebuffer, // VkFramebuffer framebuffer;
1464 { { 0, 0 }, { RENDER_WIDTH, RENDER_HEIGHT } }, // VkRect2D renderArea;
1465 1u, // deUint32 clearValueCount;
1466 &clearValue, // const VkClearValue* pClearValues;
1467 };
1468
1469 // Add barrier for initializing image state
1470 {
1471 const vk::VkImageMemoryBarrier initializeBarrier =
1472 {
1473 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1474 DE_NULL, // const void* pNext
1475 0, // VVkAccessFlags srcAccessMask;
1476 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VkAccessFlags dstAccessMask;
1477 vk::VK_IMAGE_LAYOUT_UNDEFINED, // VkImageLayout oldLayout;
1478 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout newLayout;
1479 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1480 queueFamilyIndex, // deUint32 dstQueueFamilyIndex;
1481 *colorImage, // VkImage image;
1482 {
1483 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1484 0u, // deUint32 baseMipLevel;
1485 1u, // deUint32 mipLevels;
1486 0u, // deUint32 baseArraySlice;
1487 1u, // deUint32 arraySize;
1488 } // VkImageSubresourceRange subresourceRange
1489 };
1490
1491 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, (vk::VkDependencyFlags)0,
1492 0, (const vk::VkMemoryBarrier*)DE_NULL,
1493 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1494 1, &initializeBarrier);
1495 }
1496
1497 vk.cmdBeginRenderPass(*cmdBuffer, &passBeginInfo, vk::VK_SUBPASS_CONTENTS_INLINE);
1498
1499 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipeline);
1500 vk.cmdBindDescriptorSets(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *pipelineLayout, 0u, 1u, &*descriptorSet, 0u, DE_NULL);
1501
1502 const vk::VkDeviceSize offsets[] = { 0u };
1503 vk.cmdBindVertexBuffers(*cmdBuffer, 0u, 1u, &*positionsBuffer, offsets);
1504 vk.cmdBindIndexBuffer(*cmdBuffer, *indicesBuffer, (vk::VkDeviceSize)0, vk::VK_INDEX_TYPE_UINT32);
1505
1506 vk.cmdDrawIndexed(*cmdBuffer, DE_LENGTH_OF_ARRAY(indices), 1u, 0u, 0u, 0u);
1507 vk.cmdEndRenderPass(*cmdBuffer);
1508
1509 // Add render finish barrier
1510 {
1511 const vk::VkImageMemoryBarrier renderFinishBarrier =
1512 {
1513 vk::VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER, // VkStructureType sType;
1514 DE_NULL, // const void* pNext
1515 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, // VVkAccessFlags srcAccessMask;
1516 vk::VK_ACCESS_TRANSFER_READ_BIT, // VkAccessFlags dstAccessMask;
1517 vk::VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout oldLayout;
1518 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, // VkImageLayout newLayout;
1519 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1520 queueFamilyIndex, // deUint32 dstQueueFamilyIndex;
1521 *colorImage, // VkImage image;
1522 {
1523 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspectFlags aspectMask;
1524 0u, // deUint32 baseMipLevel;
1525 1u, // deUint32 mipLevels;
1526 0u, // deUint32 baseArraySlice;
1527 1u, // deUint32 arraySize;
1528 } // VkImageSubresourceRange subresourceRange
1529 };
1530
1531 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, (vk::VkDependencyFlags)0,
1532 0, (const vk::VkMemoryBarrier*)DE_NULL,
1533 0, (const vk::VkBufferMemoryBarrier*)DE_NULL,
1534 1, &renderFinishBarrier);
1535 }
1536
1537 // Add Image->Buffer copy command
1538 {
1539 const vk::VkBufferImageCopy copyParams =
1540 {
1541 (vk::VkDeviceSize)0u, // VkDeviceSize bufferOffset;
1542 (deUint32)RENDER_WIDTH, // deUint32 bufferRowLength;
1543 (deUint32)RENDER_HEIGHT, // deUint32 bufferImageHeight;
1544 {
1545 vk::VK_IMAGE_ASPECT_COLOR_BIT, // VkImageAspect aspect;
1546 0u, // deUint32 mipLevel;
1547 0u, // deUint32 arrayLayer;
1548 1u, // deUint32 arraySize;
1549 }, // VkImageSubresourceCopy imageSubresource
1550 { 0u, 0u, 0u }, // VkOffset3D imageOffset;
1551 { RENDER_WIDTH, RENDER_HEIGHT, 1u } // VkExtent3D imageExtent;
1552 };
1553
1554 vk.cmdCopyImageToBuffer(*cmdBuffer, *colorImage, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, *readImageBuffer, 1u, ©Params);
1555 }
1556
1557 // Add copy finish barrier
1558 {
1559 const vk::VkBufferMemoryBarrier copyFinishBarrier =
1560 {
1561 vk::VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, // VkStructureType sType;
1562 DE_NULL, // const void* pNext;
1563 VK_ACCESS_TRANSFER_WRITE_BIT, // VkAccessFlags srcAccessMask;
1564 VK_ACCESS_HOST_READ_BIT, // VkAccessFlags dstAccessMask;
1565 queueFamilyIndex, // deUint32 srcQueueFamilyIndex;
1566 queueFamilyIndex, // deUint32 destQueueFamilyIndex;
1567 *readImageBuffer, // VkBuffer buffer;
1568 0u, // VkDeviceSize offset;
1569 (vk::VkDeviceSize)(RENDER_WIDTH * RENDER_HEIGHT * 4)// VkDeviceSize size;
1570 };
1571
1572 vk.cmdPipelineBarrier(*cmdBuffer, vk::VK_PIPELINE_STAGE_TRANSFER_BIT, vk::VK_PIPELINE_STAGE_HOST_BIT, (vk::VkDependencyFlags)0,
1573 0, (const vk::VkMemoryBarrier*)DE_NULL,
1574 1, ©FinishBarrier,
1575 0, (const vk::VkImageMemoryBarrier*)DE_NULL);
1576 }
1577
1578 VK_CHECK(vk.endCommandBuffer(*cmdBuffer));
1579
1580 // Submit the command buffer
1581 {
1582 const vk::VkFenceCreateInfo fenceParams =
1583 {
1584 vk::VK_STRUCTURE_TYPE_FENCE_CREATE_INFO, // VkStructureType sType;
1585 DE_NULL, // const void* pNext;
1586 0u, // VkFenceCreateFlags flags;
1587 };
1588 const Unique<vk::VkFence> fence(vk::createFence(vk, device, &fenceParams));
1589
1590 const VkSubmitInfo submitInfo =
1591 {
1592 VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
1593 DE_NULL, // const void* pNext;
1594 0u, // deUint32 waitSemaphoreCount;
1595 DE_NULL, // const VkSemaphore* pWaitSemaphores;
1596 (const VkPipelineStageFlags*)DE_NULL,
1597 1u, // deUint32 commandBufferCount;
1598 &cmdBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
1599 0u, // deUint32 signalSemaphoreCount;
1600 DE_NULL // const VkSemaphore* pSignalSemaphores;
1601 };
1602
1603 VK_CHECK(vk.queueSubmit(queue, 1u, &submitInfo, *fence));
1604 VK_CHECK(vk.waitForFences(device, 1u, &fence.get(), DE_TRUE, ~0ull));
1605 }
1606
1607 // Read back the results
1608 tcu::Surface surface(RENDER_WIDTH, RENDER_HEIGHT);
1609 {
1610 const tcu::TextureFormat textureFormat(tcu::TextureFormat::RGBA, tcu::TextureFormat::UNORM_INT8);
1611 const tcu::ConstPixelBufferAccess imgAccess(textureFormat, RENDER_WIDTH, RENDER_HEIGHT, 1, readImageAlloc->getHostPtr());
1612 const vk::VkDeviceSize bufferSize = RENDER_WIDTH * RENDER_HEIGHT * 4;
1613 invalidateMappedMemoryRange(vk, device, readImageAlloc->getMemory(), readImageAlloc->getOffset(), bufferSize);
1614
1615 tcu::copy(surface.getAccess(), imgAccess);
1616 }
1617
1618 // Check if the result image is all white
1619 tcu::RGBA white(tcu::RGBA::white());
1620 int numFailedPixels = 0;
1621
1622 for (int y = 0; y < surface.getHeight(); y++)
1623 {
1624 for (int x = 0; x < surface.getWidth(); x++)
1625 {
1626 if (surface.getPixel(x, y) != white)
1627 numFailedPixels += 1;
1628 }
1629 }
1630
1631 if (numFailedPixels > 0)
1632 {
1633 tcu::TestLog& log = m_context.getTestContext().getLog();
1634 log << tcu::TestLog::Image("Image", "Rendered image", surface);
1635 log << tcu::TestLog::Message << "Image comparison failed, got " << numFailedPixels << " non-white pixels" << tcu::TestLog::EndMessage;
1636
1637 for (size_t blockNdx = 0; blockNdx < m_layout.blocks.size(); blockNdx++)
1638 {
1639 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1640 log << tcu::TestLog::Message << "Block index: " << blockNdx << " infos: " << block << tcu::TestLog::EndMessage;
1641 }
1642
1643 for (size_t uniformNdx = 0; uniformNdx < m_layout.uniforms.size(); uniformNdx++)
1644 {
1645 log << tcu::TestLog::Message << "Uniform index: " << uniformNdx << " infos: " << m_layout.uniforms[uniformNdx] << tcu::TestLog::EndMessage;
1646 }
1647
1648 return tcu::TestStatus::fail("Detected non-white pixels");
1649 }
1650 else
1651 return tcu::TestStatus::pass("Full white image ok");
1652 }
1653
addUniformData(deUint32 size,const void * dataPtr)1654 vk::VkDescriptorBufferInfo UniformBlockCaseInstance::addUniformData (deUint32 size, const void* dataPtr)
1655 {
1656 const VkDevice vkDevice = m_context.getDevice();
1657 const DeviceInterface& vk = m_context.getDeviceInterface();
1658
1659 Move<VkBuffer> buffer = createBuffer(m_context, size, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT);
1660 de::MovePtr<Allocation> alloc = allocateAndBindMemory(m_context, *buffer, vk::MemoryRequirement::HostVisible);
1661
1662 deMemcpy(alloc->getHostPtr(), dataPtr, size);
1663 flushMappedMemoryRange(vk, vkDevice, alloc->getMemory(), alloc->getOffset(), size);
1664
1665 const VkDescriptorBufferInfo descriptor =
1666 {
1667 *buffer, // VkBuffer buffer;
1668 0u, // VkDeviceSize offset;
1669 size, // VkDeviceSize range;
1670
1671 };
1672
1673 m_uniformBuffers.push_back(VkBufferSp(new vk::Unique<vk::VkBuffer>(buffer)));
1674 m_uniformAllocs.push_back(AllocationSp(alloc.release()));
1675
1676 return descriptor;
1677 }
1678
createRenderPass(vk::VkFormat format) const1679 vk::Move<VkRenderPass> UniformBlockCaseInstance::createRenderPass (vk::VkFormat format) const
1680 {
1681 const VkDevice vkDevice = m_context.getDevice();
1682 const DeviceInterface& vk = m_context.getDeviceInterface();
1683
1684 const VkAttachmentDescription attachmentDescription =
1685 {
1686 0u, // VkAttachmentDescriptorFlags flags;
1687 format, // VkFormat format;
1688 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits samples;
1689 VK_ATTACHMENT_LOAD_OP_CLEAR, // VkAttachmentLoadOp loadOp;
1690 VK_ATTACHMENT_STORE_OP_STORE, // VkAttachmentStoreOp storeOp;
1691 VK_ATTACHMENT_LOAD_OP_DONT_CARE, // VkAttachmentLoadOp stencilLoadOp;
1692 VK_ATTACHMENT_STORE_OP_DONT_CARE, // VkAttachmentStoreOp stencilStoreOp;
1693 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout initialLayout;
1694 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, // VkImageLayout finalLayout;
1695 };
1696
1697 const VkAttachmentReference attachmentReference =
1698 {
1699 0u, // deUint32 attachment;
1700 VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL // VkImageLayout layout;
1701 };
1702
1703
1704 const VkSubpassDescription subpassDescription =
1705 {
1706 0u, // VkSubpassDescriptionFlags flags;
1707 VK_PIPELINE_BIND_POINT_GRAPHICS, // VkPipelineBindPoint pipelineBindPoint;
1708 0u, // deUint32 inputAttachmentCount;
1709 DE_NULL, // const VkAttachmentReference* pInputAttachments;
1710 1u, // deUint32 colorAttachmentCount;
1711 &attachmentReference, // const VkAttachmentReference* pColorAttachments;
1712 DE_NULL, // const VkAttachmentReference* pResolveAttachments;
1713 DE_NULL, // const VkAttachmentReference* pDepthStencilAttachment;
1714 0u, // deUint32 preserveAttachmentCount;
1715 DE_NULL // const VkAttachmentReference* pPreserveAttachments;
1716 };
1717
1718 const VkRenderPassCreateInfo renderPassParams =
1719 {
1720 VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO, // VkStructureType sType;
1721 DE_NULL, // const void* pNext;
1722 0u, // VkRenderPassCreateFlags flags;
1723 1u, // deUint32 attachmentCount;
1724 &attachmentDescription, // const VkAttachmentDescription* pAttachments;
1725 1u, // deUint32 subpassCount;
1726 &subpassDescription, // const VkSubpassDescription* pSubpasses;
1727 0u, // deUint32 dependencyCount;
1728 DE_NULL // const VkSubpassDependency* pDependencies;
1729 };
1730
1731 return vk::createRenderPass(vk, vkDevice, &renderPassParams);
1732 }
1733
createFramebuffer(vk::VkRenderPass renderPass,vk::VkImageView colorImageView) const1734 vk::Move<VkFramebuffer> UniformBlockCaseInstance::createFramebuffer (vk::VkRenderPass renderPass, vk::VkImageView colorImageView) const
1735 {
1736 const VkDevice vkDevice = m_context.getDevice();
1737 const DeviceInterface& vk = m_context.getDeviceInterface();
1738
1739 const VkFramebufferCreateInfo framebufferParams =
1740 {
1741 VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO, // VkStructureType sType;
1742 DE_NULL, // const void* pNext;
1743 0u, // VkFramebufferCreateFlags flags;
1744 renderPass, // VkRenderPass renderPass;
1745 1u, // deUint32 attachmentCount;
1746 &colorImageView, // const VkImageView* pAttachments;
1747 RENDER_WIDTH, // deUint32 width;
1748 RENDER_HEIGHT, // deUint32 height;
1749 1u // deUint32 layers;
1750 };
1751
1752 return vk::createFramebuffer(vk, vkDevice, &framebufferParams);
1753 }
1754
createDescriptorSetLayout(void) const1755 vk::Move<VkDescriptorSetLayout> UniformBlockCaseInstance::createDescriptorSetLayout (void) const
1756 {
1757 int numBlocks = (int)m_layout.blocks.size();
1758 int lastBindingNdx = -1;
1759 std::vector<int> lengths;
1760
1761 for (int blockNdx = 0; blockNdx < numBlocks; blockNdx++)
1762 {
1763 const BlockLayoutEntry& block = m_layout.blocks[blockNdx];
1764
1765 if (block.bindingNdx == lastBindingNdx)
1766 {
1767 lengths.back()++;
1768 }
1769 else
1770 {
1771 lengths.push_back(1);
1772 lastBindingNdx = block.bindingNdx;
1773 }
1774 }
1775
1776 vk::DescriptorSetLayoutBuilder layoutBuilder;
1777 for (size_t i = 0; i < lengths.size(); i++)
1778 {
1779 if (lengths[i] > 0)
1780 {
1781 layoutBuilder.addArrayBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, lengths[i], vk::VK_SHADER_STAGE_ALL);
1782 }
1783 else
1784 {
1785 layoutBuilder.addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, vk::VK_SHADER_STAGE_ALL);
1786 }
1787 }
1788
1789 return layoutBuilder.build(m_context.getDeviceInterface(), m_context.getDevice());
1790 }
1791
createDescriptorPool(void) const1792 vk::Move<VkDescriptorPool> UniformBlockCaseInstance::createDescriptorPool (void) const
1793 {
1794 vk::DescriptorPoolBuilder poolBuilder;
1795
1796 return poolBuilder
1797 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, (int)m_layout.blocks.size())
1798 .build(m_context.getDeviceInterface(), m_context.getDevice(), VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1799 }
1800
createPipeline(vk::VkShaderModule vtxShaderModule,vk::VkShaderModule fragShaderModule,vk::VkPipelineLayout pipelineLayout,vk::VkRenderPass renderPass) const1801 vk::Move<VkPipeline> UniformBlockCaseInstance::createPipeline (vk::VkShaderModule vtxShaderModule, vk::VkShaderModule fragShaderModule, vk::VkPipelineLayout pipelineLayout, vk::VkRenderPass renderPass) const
1802 {
1803 const VkDevice vkDevice = m_context.getDevice();
1804 const DeviceInterface& vk = m_context.getDeviceInterface();
1805
1806 const VkVertexInputBindingDescription vertexBinding =
1807 {
1808 0, // deUint32 binding;
1809 (deUint32)sizeof(float) * 4, // deUint32 strideInBytes;
1810 VK_VERTEX_INPUT_RATE_VERTEX // VkVertexInputStepRate inputRate;
1811 };
1812
1813 const VkVertexInputAttributeDescription vertexAttribute =
1814 {
1815 0, // deUint32 location;
1816 0, // deUint32 binding;
1817 VK_FORMAT_R32G32B32A32_SFLOAT, // VkFormat format;
1818 0u // deUint32 offset;
1819 };
1820
1821 const VkPipelineShaderStageCreateInfo shaderStages[2] =
1822 {
1823 {
1824 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1825 DE_NULL, // const void* pNext;
1826 0u, // VkPipelineShaderStageCreateFlags flags;
1827 VK_SHADER_STAGE_VERTEX_BIT, // VkShaderStageFlagBits stage;
1828 vtxShaderModule, // VkShaderModule module;
1829 "main", // const char* pName;
1830 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
1831 },
1832 {
1833 VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO, // VkStructureType sType;
1834 DE_NULL, // const void* pNext;
1835 0u, // VkPipelineShaderStageCreateFlags flags;
1836 VK_SHADER_STAGE_FRAGMENT_BIT, // VkShaderStageFlagBits stage;
1837 fragShaderModule, // VkShaderModule module;
1838 "main", // const char* pName;
1839 DE_NULL // const VkSpecializationInfo* pSpecializationInfo;
1840 }
1841 };
1842
1843 const VkPipelineVertexInputStateCreateInfo vertexInputStateParams =
1844 {
1845 VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // VkStructureType sType;
1846 DE_NULL, // const void* pNext;
1847 0u, // VkPipelineVertexInputStateCreateFlags flags;
1848 1u, // deUint32 vertexBindingDescriptionCount;
1849 &vertexBinding, // const VkVertexInputBindingDescription* pVertexBindingDescriptions;
1850 1u, // deUint32 vertexAttributeDescriptionCount;
1851 &vertexAttribute, // const VkVertexInputAttributeDescription* pVertexAttributeDescriptions;
1852 };
1853
1854 const VkPipelineInputAssemblyStateCreateInfo inputAssemblyStateParams =
1855 {
1856 VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,// VkStructureType sType;
1857 DE_NULL, // const void* pNext;
1858 0u, // VkPipelineInputAssemblyStateCreateFlags flags;
1859 VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST, // VkPrimitiveTopology topology;
1860 false // VkBool32 primitiveRestartEnable;
1861 };
1862
1863 const VkViewport viewport =
1864 {
1865 0.0f, // float originX;
1866 0.0f, // float originY;
1867 (float)RENDER_WIDTH, // float width;
1868 (float)RENDER_HEIGHT, // float height;
1869 0.0f, // float minDepth;
1870 1.0f // float maxDepth;
1871 };
1872
1873
1874 const VkRect2D scissor =
1875 {
1876 {
1877 0u, // deUint32 x;
1878 0u, // deUint32 y;
1879 }, // VkOffset2D offset;
1880 {
1881 RENDER_WIDTH, // deUint32 width;
1882 RENDER_HEIGHT, // deUint32 height;
1883 }, // VkExtent2D extent;
1884 };
1885
1886 const VkPipelineViewportStateCreateInfo viewportStateParams =
1887 {
1888 VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO, // VkStructureType sType;
1889 DE_NULL, // const void* pNext;
1890 0u, // VkPipelineViewportStateCreateFlags flags;
1891 1u, // deUint32 viewportCount;
1892 &viewport, // const VkViewport* pViewports;
1893 1u, // deUint32 scissorsCount;
1894 &scissor, // const VkRect2D* pScissors;
1895 };
1896
1897 const VkPipelineRasterizationStateCreateInfo rasterStateParams =
1898 {
1899 VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO, // VkStructureType sType;
1900 DE_NULL, // const void* pNext;
1901 0u, // VkPipelineRasterizationStateCreateFlags flags;
1902 false, // VkBool32 depthClampEnable;
1903 false, // VkBool32 rasterizerDiscardEnable;
1904 VK_POLYGON_MODE_FILL, // VkPolygonMode polygonMode;
1905 VK_CULL_MODE_NONE, // VkCullModeFlags cullMode;
1906 VK_FRONT_FACE_COUNTER_CLOCKWISE, // VkFrontFace frontFace;
1907 false, // VkBool32 depthBiasEnable;
1908 0.0f, // float depthBiasConstantFactor;
1909 0.0f, // float depthBiasClamp;
1910 0.0f, // float depthBiasSlopeFactor;
1911 1.0f, // float lineWidth;
1912 };
1913
1914 const VkPipelineMultisampleStateCreateInfo multisampleStateParams =
1915 {
1916 VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO, // VkStructureType sType;
1917 DE_NULL, // const void* pNext;
1918 0u, // VkPipelineMultisampleStateCreateFlags flags;
1919 VK_SAMPLE_COUNT_1_BIT, // VkSampleCountFlagBits rasterizationSamples;
1920 VK_FALSE, // VkBool32 sampleShadingEnable;
1921 0.0f, // float minSampleShading;
1922 DE_NULL, // const VkSampleMask* pSampleMask;
1923 VK_FALSE, // VkBool32 alphaToCoverageEnable;
1924 VK_FALSE // VkBool32 alphaToOneEnable;
1925 };
1926
1927 const VkPipelineColorBlendAttachmentState colorBlendAttachmentState =
1928 {
1929 false, // VkBool32 blendEnable;
1930 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendColor;
1931 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendColor;
1932 VK_BLEND_OP_ADD, // VkBlendOp blendOpColor;
1933 VK_BLEND_FACTOR_ONE, // VkBlend srcBlendAlpha;
1934 VK_BLEND_FACTOR_ZERO, // VkBlend destBlendAlpha;
1935 VK_BLEND_OP_ADD, // VkBlendOp blendOpAlpha;
1936 VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT | // VkChannelFlags channelWriteMask;
1937 VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT
1938 };
1939
1940 const VkPipelineColorBlendStateCreateInfo colorBlendStateParams =
1941 {
1942 VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO, // VkStructureType sType;
1943 DE_NULL, // const void* pNext;
1944 0u, // VkPipelineColorBlendStateCreateFlags flags;
1945 false, // VkBool32 logicOpEnable;
1946 VK_LOGIC_OP_COPY, // VkLogicOp logicOp;
1947 1u, // deUint32 attachmentCount;
1948 &colorBlendAttachmentState, // const VkPipelineColorBlendAttachmentState* pAttachments;
1949 { 0.0f, 0.0f, 0.0f, 0.0f }, // float blendConstants[4];
1950 };
1951
1952 const VkGraphicsPipelineCreateInfo graphicsPipelineParams =
1953 {
1954 VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO, // VkStructureType sType;
1955 DE_NULL, // const void* pNext;
1956 0u, // VkPipelineCreateFlags flags;
1957 2u, // deUint32 stageCount;
1958 shaderStages, // const VkPipelineShaderStageCreateInfo* pStages;
1959 &vertexInputStateParams, // const VkPipelineVertexInputStateCreateInfo* pVertexInputState;
1960 &inputAssemblyStateParams, // const VkPipelineInputAssemblyStateCreateInfo* pInputAssemblyState;
1961 DE_NULL, // const VkPipelineTessellationStateCreateInfo* pTessellationState;
1962 &viewportStateParams, // const VkPipelineViewportStateCreateInfo* pViewportState;
1963 &rasterStateParams, // const VkPipelineRasterizationStateCreateInfo* pRasterizationState;
1964 &multisampleStateParams, // const VkPipelineMultisampleStateCreateInfo* pMultisampleState;
1965 DE_NULL, // const VkPipelineDepthStencilStateCreateInfo* pDepthStencilState;
1966 &colorBlendStateParams, // const VkPipelineColorBlendStateCreateInfo* pColorBlendState;
1967 (const VkPipelineDynamicStateCreateInfo*)DE_NULL, // const VkPipelineDynamicStateCreateInfo* pDynamicState;
1968 pipelineLayout, // VkPipelineLayout layout;
1969 renderPass, // VkRenderPass renderPass;
1970 0u, // deUint32 subpass;
1971 0u, // VkPipeline basePipelineHandle;
1972 0u // deInt32 basePipelineIndex;
1973 };
1974
1975 return vk::createGraphicsPipeline(vk, vkDevice, DE_NULL, &graphicsPipelineParams);
1976 }
1977
1978 } // anonymous (utilities)
1979
1980 // UniformBlockCase.
1981
UniformBlockCase(tcu::TestContext & testCtx,const std::string & name,const std::string & description,BufferMode bufferMode)1982 UniformBlockCase::UniformBlockCase (tcu::TestContext& testCtx, const std::string& name, const std::string& description, BufferMode bufferMode)
1983 : TestCase (testCtx, name, description)
1984 , m_bufferMode (bufferMode)
1985 {
1986 }
1987
~UniformBlockCase(void)1988 UniformBlockCase::~UniformBlockCase (void)
1989 {
1990 }
1991
initPrograms(vk::SourceCollections & programCollection) const1992 void UniformBlockCase::initPrograms (vk::SourceCollections& programCollection) const
1993 {
1994 DE_ASSERT(!m_vertShaderSource.empty());
1995 DE_ASSERT(!m_fragShaderSource.empty());
1996
1997 programCollection.glslSources.add("vert") << glu::VertexSource(m_vertShaderSource);
1998 programCollection.glslSources.add("frag") << glu::FragmentSource(m_fragShaderSource);
1999 }
2000
createInstance(Context & context) const2001 TestInstance* UniformBlockCase::createInstance (Context& context) const
2002 {
2003 return new UniformBlockCaseInstance(context, m_bufferMode, m_uniformLayout, m_blockPointers);
2004 }
2005
init(void)2006 void UniformBlockCase::init (void)
2007 {
2008 // Compute reference layout.
2009 computeStd140Layout(m_uniformLayout, m_interface);
2010
2011 // Assign storage for reference values.
2012 {
2013 int totalSize = 0;
2014 for (std::vector<BlockLayoutEntry>::const_iterator blockIter = m_uniformLayout.blocks.begin(); blockIter != m_uniformLayout.blocks.end(); blockIter++)
2015 totalSize += blockIter->size;
2016 m_data.resize(totalSize);
2017
2018 // Pointers for each block.
2019 int curOffset = 0;
2020 for (int blockNdx = 0; blockNdx < (int)m_uniformLayout.blocks.size(); blockNdx++)
2021 {
2022 m_blockPointers[blockNdx] = &m_data[0] + curOffset;
2023 curOffset += m_uniformLayout.blocks[blockNdx].size;
2024 }
2025 }
2026
2027 // Generate values.
2028 generateValues(m_uniformLayout, m_blockPointers, 1 /* seed */);
2029
2030 // Generate shaders.
2031 m_vertShaderSource = generateVertexShader(m_interface, m_uniformLayout, m_blockPointers);
2032 m_fragShaderSource = generateFragmentShader(m_interface, m_uniformLayout, m_blockPointers);
2033 }
2034
2035 } // ubo
2036 } // vkt
2037