1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Compute Shader Built-in variable tests.
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktComputeShaderBuiltinVarTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktComputeTestsUtil.hpp"
28
29 #include "vkDefs.hpp"
30 #include "vkPlatform.hpp"
31 #include "vkRef.hpp"
32 #include "vkPrograms.hpp"
33 #include "vkStrUtil.hpp"
34 #include "vkRefUtil.hpp"
35 #include "vkQueryUtil.hpp"
36 #include "vkBarrierUtil.hpp"
37 #include "vkMemUtil.hpp"
38 #include "vkDeviceUtil.hpp"
39 #include "vkTypeUtil.hpp"
40 #include "vkBuilderUtil.hpp"
41 #include "vkCmdUtil.hpp"
42
43 #include "tcuTestLog.hpp"
44 #include "tcuFormatUtil.hpp"
45 #include "tcuVectorUtil.hpp"
46
47 #include "gluShaderUtil.hpp"
48
49 #include "deUniquePtr.hpp"
50 #include "deSharedPtr.hpp"
51
52 #include <map>
53 #include <string>
54 #include <vector>
55
56 namespace vkt
57 {
58 namespace compute
59 {
60 namespace
61 {
62
63 using namespace vk;
64 using std::string;
65 using std::vector;
66 using std::map;
67 using tcu::TestLog;
68 using tcu::UVec3;
69 using tcu::IVec3;
70
71 class ComputeBuiltinVarInstance;
72 class ComputeBuiltinVarCase;
73
74 static const string s_prefixProgramName ="compute_";
75
compareNumComponents(const UVec3 & a,const UVec3 & b,const int numComps)76 static inline bool compareNumComponents (const UVec3& a, const UVec3& b,const int numComps)
77 {
78 DE_ASSERT(numComps == 1 || numComps == 3);
79 return numComps == 3 ? tcu::allEqual(a, b) : a.x() == b.x();
80 }
81
readResultVec(const deUint32 * ptr,const int numComps)82 static inline UVec3 readResultVec (const deUint32* ptr, const int numComps)
83 {
84 UVec3 res;
85 for (int ndx = 0; ndx < numComps; ndx++)
86 res[ndx] = ptr[ndx];
87 return res;
88 }
89
90 struct LogComps
91 {
92 const UVec3& v;
93 int numComps;
94
LogCompsvkt::compute::__anon6d7696a70111::LogComps95 LogComps (const UVec3 &v_, int numComps_) : v(v_), numComps(numComps_) {}
96 };
97
operator <<(std::ostream & str,const LogComps & c)98 static inline std::ostream& operator<< (std::ostream& str, const LogComps& c)
99 {
100 DE_ASSERT(c.numComps == 1 || c.numComps == 3);
101 return c.numComps == 3 ? str << c.v : str << c.v.x();
102 }
103
104 class SubCase
105 {
106 public:
107 // Use getters instead of public const members, because SubCase must be assignable
108 // in order to be stored in a vector.
109
localSize(void) const110 const UVec3& localSize (void) const { return m_localSize; }
numWorkGroups(void) const111 const UVec3& numWorkGroups (void) const { return m_numWorkGroups; }
112
SubCase(void)113 SubCase (void) {}
SubCase(const UVec3 & localSize_,const UVec3 & numWorkGroups_)114 SubCase (const UVec3& localSize_, const UVec3& numWorkGroups_)
115 : m_localSize (localSize_)
116 , m_numWorkGroups (numWorkGroups_) {}
117
118 private:
119 UVec3 m_localSize;
120 UVec3 m_numWorkGroups;
121 };
122
123
124 class ComputeBuiltinVarInstance : public vkt::TestInstance
125 {
126 public:
127 ComputeBuiltinVarInstance (Context& context,
128 const vector<SubCase>& subCases,
129 const glu::DataType varType,
130 const ComputeBuiltinVarCase* builtinVarCase);
131
132 virtual tcu::TestStatus iterate (void);
133
134 private:
135 const VkDevice m_device;
136 const DeviceInterface& m_vki;
137 const VkQueue m_queue;
138 const deUint32 m_queueFamilyIndex;
139 vector<SubCase> m_subCases;
140 const ComputeBuiltinVarCase* m_builtin_var_case;
141 int m_subCaseNdx;
142 const glu::DataType m_varType;
143 };
144
145 class ComputeBuiltinVarCase : public vkt::TestCase
146 {
147 public:
148 ComputeBuiltinVarCase (tcu::TestContext& context, const string& name, const char* varName, glu::DataType varType, bool readByComponent);
149 ~ComputeBuiltinVarCase (void);
150
createInstance(Context & context) const151 TestInstance* createInstance (Context& context) const
152 {
153 return new ComputeBuiltinVarInstance(context, m_subCases, m_varType, this);
154 };
155
156 virtual void initPrograms (SourceCollections& programCollection) const;
157 virtual UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const = 0;
158
159 protected:
160 string genBuiltinVarSource (const string& varName, glu::DataType varType, const UVec3& localSize, bool readByComponent) const;
161 vector<SubCase> m_subCases;
162
163 private:
164 deUint32 getProgram (const tcu::UVec3& localSize);
165
166 const string m_varName;
167 const glu::DataType m_varType;
168 int m_subCaseNdx;
169 bool m_readByComponent;
170
171 ComputeBuiltinVarCase (const ComputeBuiltinVarCase& other);
172 ComputeBuiltinVarCase& operator= (const ComputeBuiltinVarCase& other);
173 };
174
ComputeBuiltinVarCase(tcu::TestContext & context,const string & name,const char * varName,glu::DataType varType,bool readByComponent)175 ComputeBuiltinVarCase::ComputeBuiltinVarCase (tcu::TestContext& context, const string& name, const char* varName, glu::DataType varType, bool readByComponent)
176 : TestCase (context, name + (readByComponent ? "_component" : ""), varName)
177 , m_varName (varName)
178 , m_varType (varType)
179 , m_subCaseNdx (0)
180 , m_readByComponent (readByComponent)
181 {
182 }
183
~ComputeBuiltinVarCase(void)184 ComputeBuiltinVarCase::~ComputeBuiltinVarCase (void)
185 {
186 ComputeBuiltinVarCase::deinit();
187 }
188
initPrograms(SourceCollections & programCollection) const189 void ComputeBuiltinVarCase::initPrograms (SourceCollections& programCollection) const
190 {
191 for (std::size_t i = 0; i < m_subCases.size(); i++)
192 {
193 const SubCase& subCase = m_subCases[i];
194 std::ostringstream name;
195 name << s_prefixProgramName << i;
196 programCollection.glslSources.add(name.str()) << glu::ComputeSource(genBuiltinVarSource(m_varName, m_varType, subCase.localSize(), m_readByComponent).c_str());
197 }
198 }
199
genBuiltinVarSource(const string & varName,glu::DataType varType,const UVec3 & localSize,bool readByComponent) const200 string ComputeBuiltinVarCase::genBuiltinVarSource (const string& varName, glu::DataType varType, const UVec3& localSize, bool readByComponent) const
201 {
202 std::ostringstream src;
203
204 src << "#version 310 es\n"
205 << "layout (local_size_x = " << localSize.x() << ", local_size_y = " << localSize.y() << ", local_size_z = " << localSize.z() << ") in;\n";
206
207 // For the gl_WorkGroupSize case, force it to be specialized so that
208 // Glslang can't just bypass the read of the builtin variable.
209 // We will not override these spec constants.
210 src << "layout (local_size_x_id = 0, local_size_y_id = 1, local_size_z_id = 2) in;\n";
211
212 src << "layout(set = 0, binding = 0) uniform Stride\n"
213 << "{\n"
214 << " uvec2 u_stride;\n"
215 << "}stride;\n"
216 << "layout(set = 0, binding = 1, std430) buffer Output\n"
217 << "{\n"
218 << " " << glu::getDataTypeName(varType) << " result[];\n"
219 << "} sb_out;\n"
220 << "\n"
221 << "void main (void)\n"
222 << "{\n"
223 << " highp uint offset = stride.u_stride.x*gl_GlobalInvocationID.z + stride.u_stride.y*gl_GlobalInvocationID.y + gl_GlobalInvocationID.x;\n";
224
225 if (readByComponent && varType != glu::TYPE_UINT) {
226 switch(varType)
227 {
228 case glu::TYPE_UINT_VEC4:
229 src << " sb_out.result[offset].w = " << varName << ".w;\n";
230 // Fall through
231 case glu::TYPE_UINT_VEC3:
232 src << " sb_out.result[offset].z = " << varName << ".z;\n";
233 // Fall through
234 case glu::TYPE_UINT_VEC2:
235 src << " sb_out.result[offset].y = " << varName << ".y;\n"
236 << " sb_out.result[offset].x = " << varName << ".x;\n";
237 break;
238 default:
239 DE_FATAL("Illegal data type");
240 break;
241 }
242 } else {
243 src << " sb_out.result[offset] = " << varName << ";\n";
244 }
245 src << "}\n";
246
247 return src.str();
248 }
249
250 class NumWorkGroupsCase : public ComputeBuiltinVarCase
251 {
252 public:
NumWorkGroupsCase(tcu::TestContext & context,bool readByCompnent)253 NumWorkGroupsCase (tcu::TestContext& context, bool readByCompnent)
254 : ComputeBuiltinVarCase(context, "num_work_groups", "gl_NumWorkGroups", glu::TYPE_UINT_VEC3, readByCompnent)
255 {
256 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
257 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
258 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
259 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
260 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
261 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
262 }
263
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const264 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
265 {
266 DE_UNREF(numWorkGroups);
267 DE_UNREF(workGroupSize);
268 DE_UNREF(workGroupID);
269 DE_UNREF(localInvocationID);
270 return numWorkGroups;
271 }
272 };
273
274 class WorkGroupSizeCase : public ComputeBuiltinVarCase
275 {
276 public:
WorkGroupSizeCase(tcu::TestContext & context,bool readByComponent)277 WorkGroupSizeCase (tcu::TestContext& context, bool readByComponent)
278 : ComputeBuiltinVarCase(context, "work_group_size", "gl_WorkGroupSize", glu::TYPE_UINT_VEC3, readByComponent)
279 {
280 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
281 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(2, 7, 3)));
282 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 1, 1)));
283 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 3, 5)));
284 m_subCases.push_back(SubCase(UVec3(1, 3, 1), UVec3(1, 1, 1)));
285 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(1, 1, 1)));
286 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(3, 3, 1)));
287 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
288 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
289 }
290
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const291 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
292 {
293 DE_UNREF(numWorkGroups);
294 DE_UNREF(workGroupID);
295 DE_UNREF(localInvocationID);
296 return workGroupSize;
297 }
298 };
299
300 //-----------------------------------------------------------------------
301 class WorkGroupIDCase : public ComputeBuiltinVarCase
302 {
303 public:
WorkGroupIDCase(tcu::TestContext & context,bool readbyComponent)304 WorkGroupIDCase (tcu::TestContext& context, bool readbyComponent)
305 : ComputeBuiltinVarCase(context, "work_group_id", "gl_WorkGroupID", glu::TYPE_UINT_VEC3, readbyComponent)
306 {
307 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
308 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
309 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
310 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
311 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
312 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
313 }
314
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const315 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
316 {
317 DE_UNREF(numWorkGroups);
318 DE_UNREF(workGroupSize);
319 DE_UNREF(localInvocationID);
320 return workGroupID;
321 }
322 };
323
324 class LocalInvocationIDCase : public ComputeBuiltinVarCase
325 {
326 public:
LocalInvocationIDCase(tcu::TestContext & context,bool readByComponent)327 LocalInvocationIDCase (tcu::TestContext& context, bool readByComponent)
328 : ComputeBuiltinVarCase(context, "local_invocation_id", "gl_LocalInvocationID", glu::TYPE_UINT_VEC3, readByComponent)
329 {
330 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
331 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(2, 7, 3)));
332 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 1, 1)));
333 m_subCases.push_back(SubCase(UVec3(2, 1, 1), UVec3(1, 3, 5)));
334 m_subCases.push_back(SubCase(UVec3(1, 3, 1), UVec3(1, 1, 1)));
335 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(1, 1, 1)));
336 m_subCases.push_back(SubCase(UVec3(1, 1, 7), UVec3(3, 3, 1)));
337 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
338 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
339 }
340
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const341 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
342 {
343 DE_UNREF(numWorkGroups);
344 DE_UNREF(workGroupSize);
345 DE_UNREF(workGroupID);
346 return localInvocationID;
347 }
348 };
349
350 class GlobalInvocationIDCase : public ComputeBuiltinVarCase
351 {
352 public:
GlobalInvocationIDCase(tcu::TestContext & context,bool readByComponent)353 GlobalInvocationIDCase (tcu::TestContext& context, bool readByComponent)
354 : ComputeBuiltinVarCase(context, "global_invocation_id", "gl_GlobalInvocationID", glu::TYPE_UINT_VEC3, readByComponent)
355 {
356 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
357 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(52, 1, 1)));
358 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
359 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 78)));
360 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
361 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
362 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
363 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
364 }
365
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const366 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
367 {
368 DE_UNREF(numWorkGroups);
369 return workGroupID * workGroupSize + localInvocationID;
370 }
371 };
372
373 class LocalInvocationIndexCase : public ComputeBuiltinVarCase
374 {
375 public:
LocalInvocationIndexCase(tcu::TestContext & context,bool readByComponent)376 LocalInvocationIndexCase (tcu::TestContext& context, bool readByComponent)
377 : ComputeBuiltinVarCase(context, "local_invocation_index", "gl_LocalInvocationIndex", glu::TYPE_UINT, readByComponent)
378 {
379 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 1, 1)));
380 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(1, 39, 1)));
381 m_subCases.push_back(SubCase(UVec3(1, 1, 1), UVec3(4, 7, 11)));
382 m_subCases.push_back(SubCase(UVec3(2, 3, 4), UVec3(4, 7, 11)));
383 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(1, 1, 1)));
384 m_subCases.push_back(SubCase(UVec3(10, 3, 4), UVec3(3, 1, 2)));
385 }
386
computeReference(const UVec3 & numWorkGroups,const UVec3 & workGroupSize,const UVec3 & workGroupID,const UVec3 & localInvocationID) const387 UVec3 computeReference (const UVec3& numWorkGroups, const UVec3& workGroupSize, const UVec3& workGroupID, const UVec3& localInvocationID) const
388 {
389 DE_UNREF(workGroupID);
390 DE_UNREF(numWorkGroups);
391 return UVec3(localInvocationID.z()*workGroupSize.x()*workGroupSize.y() + localInvocationID.y()*workGroupSize.x() + localInvocationID.x(), 0, 0);
392 }
393 };
394
ComputeBuiltinVarInstance(Context & context,const vector<SubCase> & subCases,const glu::DataType varType,const ComputeBuiltinVarCase * builtinVarCase)395 ComputeBuiltinVarInstance::ComputeBuiltinVarInstance (Context& context,
396 const vector<SubCase>& subCases,
397 const glu::DataType varType,
398 const ComputeBuiltinVarCase* builtinVarCase)
399 : vkt::TestInstance (context)
400 , m_device (m_context.getDevice())
401 , m_vki (m_context.getDeviceInterface())
402 , m_queue (context.getUniversalQueue())
403 , m_queueFamilyIndex (context.getUniversalQueueFamilyIndex())
404 , m_subCases (subCases)
405 , m_builtin_var_case (builtinVarCase)
406 , m_subCaseNdx (0)
407 , m_varType (varType)
408 {
409 }
410
iterate(void)411 tcu::TestStatus ComputeBuiltinVarInstance::iterate (void)
412 {
413 std::ostringstream program_name;
414 program_name << s_prefixProgramName << m_subCaseNdx;
415
416 const SubCase& subCase = m_subCases[m_subCaseNdx];
417 const tcu::UVec3 globalSize = subCase.localSize()*subCase.numWorkGroups();
418 const tcu::UVec2 stride (globalSize[0] * globalSize[1], globalSize[0]);
419 const deUint32 sizeOfUniformBuffer = sizeof(stride);
420 const int numScalars = glu::getDataTypeScalarSize(m_varType);
421 const deUint32 numInvocations = subCase.localSize()[0] * subCase.localSize()[1] * subCase.localSize()[2] * subCase.numWorkGroups()[0] * subCase.numWorkGroups()[1] * subCase.numWorkGroups()[2];
422
423 deUint32 resultBufferStride = 0;
424 switch (m_varType)
425 {
426 case glu::TYPE_UINT:
427 resultBufferStride = sizeof(deUint32);
428 break;
429 case glu::TYPE_UINT_VEC2:
430 resultBufferStride = sizeof(tcu::UVec2);
431 break;
432 case glu::TYPE_UINT_VEC3:
433 case glu::TYPE_UINT_VEC4:
434 resultBufferStride = sizeof(tcu::UVec4);
435 break;
436 default:
437 DE_FATAL("Illegal data type");
438 }
439
440 const deUint32 resultBufferSize = numInvocations * resultBufferStride;
441
442 // Create result buffer
443 Buffer uniformBuffer(m_vki, m_device, m_context.getDefaultAllocator(), makeBufferCreateInfo(sizeOfUniformBuffer, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT), MemoryRequirement::HostVisible);
444 Buffer resultBuffer(m_vki, m_device, m_context.getDefaultAllocator(), makeBufferCreateInfo(resultBufferSize, VK_BUFFER_USAGE_STORAGE_BUFFER_BIT), MemoryRequirement::HostVisible);
445
446 {
447 const Allocation& alloc = uniformBuffer.getAllocation();
448 memcpy(alloc.getHostPtr(), &stride, sizeOfUniformBuffer);
449 flushAlloc(m_vki, m_device, alloc);
450 }
451
452 // Create descriptorSetLayout
453 const Unique<VkDescriptorSetLayout> descriptorSetLayout(
454 DescriptorSetLayoutBuilder()
455 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
456 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
457 .build(m_vki, m_device));
458
459 const Unique<VkShaderModule> shaderModule(createShaderModule(m_vki, m_device, m_context.getBinaryCollection().get(program_name.str()), 0u));
460 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(m_vki, m_device, *descriptorSetLayout));
461 const Unique<VkPipeline> pipeline(makeComputePipeline(m_vki, m_device, *pipelineLayout, *shaderModule));
462
463 const Unique<VkDescriptorPool> descriptorPool(
464 DescriptorPoolBuilder()
465 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER)
466 .addType(VK_DESCRIPTOR_TYPE_STORAGE_BUFFER)
467 .build(m_vki, m_device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u));
468
469 const VkBufferMemoryBarrier bufferBarrier = makeBufferMemoryBarrier(
470 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT, *resultBuffer, 0ull, resultBufferSize);
471
472 const Unique<VkCommandPool> cmdPool(makeCommandPool(m_vki, m_device, m_queueFamilyIndex));
473 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(m_vki, m_device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
474
475 // Start recording commands
476 beginCommandBuffer(m_vki, *cmdBuffer);
477
478 m_vki.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
479
480 // Create descriptor set
481 const Unique<VkDescriptorSet> descriptorSet(makeDescriptorSet(m_vki, m_device, *descriptorPool, *descriptorSetLayout));
482
483 const VkDescriptorBufferInfo resultDescriptorInfo = makeDescriptorBufferInfo(*resultBuffer, 0ull, resultBufferSize);
484 const VkDescriptorBufferInfo uniformDescriptorInfo = makeDescriptorBufferInfo(*uniformBuffer, 0ull, sizeOfUniformBuffer);
485
486 DescriptorSetUpdateBuilder()
487 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &uniformDescriptorInfo)
488 .writeSingle(*descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, &resultDescriptorInfo)
489 .update(m_vki, m_device);
490
491 m_vki.cmdBindDescriptorSets(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipelineLayout, 0u, 1u, &descriptorSet.get(), 0u, DE_NULL);
492
493 // Dispatch indirect compute command
494 m_vki.cmdDispatch(*cmdBuffer, subCase.numWorkGroups()[0], subCase.numWorkGroups()[1], subCase.numWorkGroups()[2]);
495
496 m_vki.cmdPipelineBarrier(*cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0,
497 0, (const VkMemoryBarrier*)DE_NULL,
498 1, &bufferBarrier,
499 0, (const VkImageMemoryBarrier*)DE_NULL);
500
501 // End recording commands
502 endCommandBuffer(m_vki, *cmdBuffer);
503
504 // Wait for command buffer execution finish
505 submitCommandsAndWait(m_vki, m_device, m_queue, *cmdBuffer);
506
507 const Allocation& resultAlloc = resultBuffer.getAllocation();
508 invalidateAlloc(m_vki, m_device, resultAlloc);
509
510 const deUint8* ptr = reinterpret_cast<deUint8*>(resultAlloc.getHostPtr());
511
512 int numFailed = 0;
513 const int maxLogPrints = 10;
514
515 tcu::TestContext& testCtx = m_context.getTestContext();
516
517 for (deUint32 groupZ = 0; groupZ < subCase.numWorkGroups().z(); groupZ++)
518 for (deUint32 groupY = 0; groupY < subCase.numWorkGroups().y(); groupY++)
519 for (deUint32 groupX = 0; groupX < subCase.numWorkGroups().x(); groupX++)
520 for (deUint32 localZ = 0; localZ < subCase.localSize().z(); localZ++)
521 for (deUint32 localY = 0; localY < subCase.localSize().y(); localY++)
522 for (deUint32 localX = 0; localX < subCase.localSize().x(); localX++)
523 {
524 const UVec3 refGroupID(groupX, groupY, groupZ);
525 const UVec3 refLocalID(localX, localY, localZ);
526 const UVec3 refGlobalID = refGroupID * subCase.localSize() + refLocalID;
527
528 const deUint32 refOffset = stride.x()*refGlobalID.z() + stride.y()*refGlobalID.y() + refGlobalID.x();
529
530 const UVec3 refValue = m_builtin_var_case->computeReference(subCase.numWorkGroups(), subCase.localSize(), refGroupID, refLocalID);
531
532 const deUint32* resPtr = (const deUint32*)(ptr + refOffset * resultBufferStride);
533 const UVec3 resValue = readResultVec(resPtr, numScalars);
534
535 if (!compareNumComponents(refValue, resValue, numScalars))
536 {
537 if (numFailed < maxLogPrints)
538 testCtx.getLog()
539 << TestLog::Message
540 << "ERROR: comparison failed at offset " << refOffset
541 << ": expected " << LogComps(refValue, numScalars)
542 << ", got " << LogComps(resValue, numScalars)
543 << TestLog::EndMessage;
544 else if (numFailed == maxLogPrints)
545 testCtx.getLog() << TestLog::Message << "..." << TestLog::EndMessage;
546
547 numFailed += 1;
548 }
549 }
550
551 testCtx.getLog() << TestLog::Message << (numInvocations - numFailed) << " / " << numInvocations << " values passed" << TestLog::EndMessage;
552
553 if (numFailed > 0)
554 return tcu::TestStatus::fail("Comparison failed");
555
556 m_subCaseNdx += 1;
557 return (m_subCaseNdx < (int)m_subCases.size()) ? tcu::TestStatus::incomplete() :tcu::TestStatus::pass("Comparison succeeded");
558 }
559
560 class ComputeShaderBuiltinVarTests : public tcu::TestCaseGroup
561 {
562 public:
563 ComputeShaderBuiltinVarTests (tcu::TestContext& context);
564
565 void init (void);
566
567 private:
568 ComputeShaderBuiltinVarTests (const ComputeShaderBuiltinVarTests& other);
569 ComputeShaderBuiltinVarTests& operator= (const ComputeShaderBuiltinVarTests& other);
570 };
571
ComputeShaderBuiltinVarTests(tcu::TestContext & context)572 ComputeShaderBuiltinVarTests::ComputeShaderBuiltinVarTests (tcu::TestContext& context)
573 : TestCaseGroup(context, "builtin_var", "Shader builtin var tests")
574 {
575 }
576
init(void)577 void ComputeShaderBuiltinVarTests::init (void)
578 {
579 // Builtin variables with vector values should be read whole and by component.
580 for (int i = 0; i < 2; i++)
581 {
582 const bool readByComponent = (i != 0);
583 addChild(new NumWorkGroupsCase(this->getTestContext(), readByComponent));
584 addChild(new WorkGroupSizeCase(this->getTestContext(), readByComponent));
585 addChild(new WorkGroupIDCase(this->getTestContext(), readByComponent));
586 addChild(new LocalInvocationIDCase(this->getTestContext(), readByComponent));
587 addChild(new GlobalInvocationIDCase(this->getTestContext(), readByComponent));
588 }
589 // Local invocation index is already just a scalar.
590 addChild(new LocalInvocationIndexCase(this->getTestContext(), false));
591 }
592
593 } // anonymous
594
createComputeShaderBuiltinVarTests(tcu::TestContext & testCtx)595 tcu::TestCaseGroup* createComputeShaderBuiltinVarTests (tcu::TestContext& testCtx)
596 {
597 return new ComputeShaderBuiltinVarTests(testCtx);
598 }
599
600 } // compute
601 } // vkt
602