1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2016 The Khronos Group Inc.
6 * Copyright (c) 2016 The Android Open Source Project
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Image load/store Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktImageLoadStoreTests.hpp"
26 #include "vktTestCaseUtil.hpp"
27 #include "vktImageTestsUtil.hpp"
28 #include "vktImageLoadStoreUtil.hpp"
29 #include "vktImageTexture.hpp"
30
31 #include "vkDefs.hpp"
32 #include "vkRef.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPlatform.hpp"
35 #include "vkPrograms.hpp"
36 #include "vkMemUtil.hpp"
37 #include "vkBarrierUtil.hpp"
38 #include "vkBuilderUtil.hpp"
39 #include "vkQueryUtil.hpp"
40 #include "vkImageUtil.hpp"
41 #include "vkCmdUtil.hpp"
42
43 #include "deUniquePtr.hpp"
44 #include "deSharedPtr.hpp"
45 #include "deStringUtil.hpp"
46
47 #include "tcuImageCompare.hpp"
48 #include "tcuTexture.hpp"
49 #include "tcuTextureUtil.hpp"
50 #include "tcuFloat.hpp"
51
52 #include <string>
53 #include <vector>
54
55 using namespace vk;
56
57 namespace vkt
58 {
59 namespace image
60 {
61 namespace
62 {
63
makeBufferImageCopy(const Texture & texture)64 inline VkBufferImageCopy makeBufferImageCopy (const Texture& texture)
65 {
66 return image::makeBufferImageCopy(makeExtent3D(texture.layerSize()), texture.numLayers());
67 }
68
getLayerOrSlice(const Texture & texture,const tcu::ConstPixelBufferAccess access,const int layer)69 tcu::ConstPixelBufferAccess getLayerOrSlice (const Texture& texture, const tcu::ConstPixelBufferAccess access, const int layer)
70 {
71 switch (texture.type())
72 {
73 case IMAGE_TYPE_1D:
74 case IMAGE_TYPE_2D:
75 case IMAGE_TYPE_BUFFER:
76 // Not layered
77 DE_ASSERT(layer == 0);
78 return access;
79
80 case IMAGE_TYPE_1D_ARRAY:
81 return tcu::getSubregion(access, 0, layer, access.getWidth(), 1);
82
83 case IMAGE_TYPE_2D_ARRAY:
84 case IMAGE_TYPE_CUBE:
85 case IMAGE_TYPE_CUBE_ARRAY:
86 case IMAGE_TYPE_3D: // 3d texture is treated as if depth was the layers
87 return tcu::getSubregion(access, 0, 0, layer, access.getWidth(), access.getHeight(), 1);
88
89 default:
90 DE_FATAL("Internal test error");
91 return tcu::ConstPixelBufferAccess();
92 }
93 }
94
95 //! \return true if all layers match in both pixel buffers
comparePixelBuffers(tcu::TestLog & log,const Texture & texture,const VkFormat format,const tcu::ConstPixelBufferAccess reference,const tcu::ConstPixelBufferAccess result)96 bool comparePixelBuffers (tcu::TestLog& log,
97 const Texture& texture,
98 const VkFormat format,
99 const tcu::ConstPixelBufferAccess reference,
100 const tcu::ConstPixelBufferAccess result)
101 {
102 DE_ASSERT(reference.getFormat() == result.getFormat());
103 DE_ASSERT(reference.getSize() == result.getSize());
104
105 const bool intFormat = isIntegerFormat(format);
106 const bool is3d = (texture.type() == IMAGE_TYPE_3D);
107 const int numLayersOrSlices = (is3d ? texture.size().z() : texture.numLayers());
108 const int numCubeFaces = 6;
109
110 int passedLayers = 0;
111 for (int layerNdx = 0; layerNdx < numLayersOrSlices; ++layerNdx)
112 {
113 const std::string comparisonName = "Comparison" + de::toString(layerNdx);
114 const std::string comparisonDesc = "Image Comparison, " +
115 (isCube(texture) ? "face " + de::toString(layerNdx % numCubeFaces) + ", cube " + de::toString(layerNdx / numCubeFaces) :
116 is3d ? "slice " + de::toString(layerNdx) : "layer " + de::toString(layerNdx));
117
118 const tcu::ConstPixelBufferAccess refLayer = getLayerOrSlice(texture, reference, layerNdx);
119 const tcu::ConstPixelBufferAccess resultLayer = getLayerOrSlice(texture, result, layerNdx);
120
121 bool ok = false;
122 if (intFormat)
123 ok = tcu::intThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::UVec4(0), tcu::COMPARE_LOG_RESULT);
124 else
125 ok = tcu::floatThresholdCompare(log, comparisonName.c_str(), comparisonDesc.c_str(), refLayer, resultLayer, tcu::Vec4(0.01f), tcu::COMPARE_LOG_RESULT);
126
127 if (ok)
128 ++passedLayers;
129 }
130 return passedLayers == numLayersOrSlices;
131 }
132
133 //!< Zero out invalid pixels in the image (denormalized, infinite, NaN values)
replaceBadFloatReinterpretValues(const tcu::PixelBufferAccess access)134 void replaceBadFloatReinterpretValues (const tcu::PixelBufferAccess access)
135 {
136 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_FLOATING_POINT);
137
138 for (int z = 0; z < access.getDepth(); ++z)
139 for (int y = 0; y < access.getHeight(); ++y)
140 for (int x = 0; x < access.getWidth(); ++x)
141 {
142 const tcu::Vec4 color(access.getPixel(x, y, z));
143 tcu::Vec4 newColor = color;
144
145 for (int i = 0; i < 4; ++i)
146 {
147 if (access.getFormat().type == tcu::TextureFormat::HALF_FLOAT)
148 {
149 const tcu::Float16 f(color[i]);
150 if (f.isDenorm() || f.isInf() || f.isNaN())
151 newColor[i] = 0.0f;
152 }
153 else
154 {
155 const tcu::Float32 f(color[i]);
156 if (f.isDenorm() || f.isInf() || f.isNaN())
157 newColor[i] = 0.0f;
158 }
159 }
160
161 if (newColor != color)
162 access.setPixel(newColor, x, y, z);
163 }
164 }
165
166 //!< replace invalid pixels in the image (-128)
replaceSnormReinterpretValues(const tcu::PixelBufferAccess access)167 void replaceSnormReinterpretValues (const tcu::PixelBufferAccess access)
168 {
169 DE_ASSERT(tcu::getTextureChannelClass(access.getFormat().type) == tcu::TEXTURECHANNELCLASS_SIGNED_FIXED_POINT);
170
171 for (int z = 0; z < access.getDepth(); ++z)
172 for (int y = 0; y < access.getHeight(); ++y)
173 for (int x = 0; x < access.getWidth(); ++x)
174 {
175 const tcu::IVec4 color(access.getPixelInt(x, y, z));
176 tcu::IVec4 newColor = color;
177
178 for (int i = 0; i < 4; ++i)
179 {
180 const deInt32 oldColor(color[i]);
181 if (oldColor == -128) newColor[i] = -127;
182 }
183
184 if (newColor != color)
185 access.setPixel(newColor, x, y, z);
186 }
187 }
188
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat,const VkFormat readFormat)189 tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat, const VkFormat readFormat)
190 {
191 // Generate a reference image data using the storage format
192
193 tcu::TextureLevel reference(mapVkFormat(imageFormat), imageSize.x(), imageSize.y(), imageSize.z());
194 const tcu::PixelBufferAccess access = reference.getAccess();
195
196 const float storeColorScale = computeStoreColorScale(imageFormat, imageSize);
197 const float storeColorBias = computeStoreColorBias(imageFormat);
198
199 const bool intFormat = isIntegerFormat(imageFormat);
200 const int xMax = imageSize.x() - 1;
201 const int yMax = imageSize.y() - 1;
202
203 for (int z = 0; z < imageSize.z(); ++z)
204 for (int y = 0; y < imageSize.y(); ++y)
205 for (int x = 0; x < imageSize.x(); ++x)
206 {
207 const tcu::IVec4 color(x^y^z, (xMax - x)^y^z, x^(yMax - y)^z, (xMax - x)^(yMax - y)^z);
208
209 if (intFormat)
210 access.setPixel(color, x, y, z);
211 else
212 access.setPixel(color.asFloat()*storeColorScale + storeColorBias, x, y, z);
213 }
214
215 // If the image is to be accessed as a float texture, get rid of invalid values
216
217 if (isFloatFormat(readFormat) && imageFormat != readFormat)
218 replaceBadFloatReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
219 if (isSnormFormat(readFormat) && imageFormat != readFormat)
220 replaceSnormReinterpretValues(tcu::PixelBufferAccess(mapVkFormat(readFormat), imageSize, access.getDataPtr()));
221
222 return reference;
223 }
224
generateReferenceImage(const tcu::IVec3 & imageSize,const VkFormat imageFormat)225 inline tcu::TextureLevel generateReferenceImage (const tcu::IVec3& imageSize, const VkFormat imageFormat)
226 {
227 return generateReferenceImage(imageSize, imageFormat, imageFormat);
228 }
229
flipHorizontally(const tcu::PixelBufferAccess access)230 void flipHorizontally (const tcu::PixelBufferAccess access)
231 {
232 const int xMax = access.getWidth() - 1;
233 const int halfWidth = access.getWidth() / 2;
234
235 if (isIntegerFormat(mapTextureFormat(access.getFormat())))
236 for (int z = 0; z < access.getDepth(); z++)
237 for (int y = 0; y < access.getHeight(); y++)
238 for (int x = 0; x < halfWidth; x++)
239 {
240 const tcu::UVec4 temp = access.getPixelUint(xMax - x, y, z);
241 access.setPixel(access.getPixelUint(x, y, z), xMax - x, y, z);
242 access.setPixel(temp, x, y, z);
243 }
244 else
245 for (int z = 0; z < access.getDepth(); z++)
246 for (int y = 0; y < access.getHeight(); y++)
247 for (int x = 0; x < halfWidth; x++)
248 {
249 const tcu::Vec4 temp = access.getPixel(xMax - x, y, z);
250 access.setPixel(access.getPixel(x, y, z), xMax - x, y, z);
251 access.setPixel(temp, x, y, z);
252 }
253 }
254
formatsAreCompatible(const VkFormat format0,const VkFormat format1)255 inline bool formatsAreCompatible (const VkFormat format0, const VkFormat format1)
256 {
257 return format0 == format1 || mapVkFormat(format0).getPixelSize() == mapVkFormat(format1).getPixelSize();
258 }
259
commandImageWriteBarrierBetweenShaderInvocations(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const Texture & texture)260 void commandImageWriteBarrierBetweenShaderInvocations (Context& context, const VkCommandBuffer cmdBuffer, const VkImage image, const Texture& texture)
261 {
262 const DeviceInterface& vk = context.getDeviceInterface();
263
264 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
265 const VkImageMemoryBarrier shaderWriteBarrier = makeImageMemoryBarrier(
266 VK_ACCESS_SHADER_WRITE_BIT, 0u,
267 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
268 image, fullImageSubresourceRange);
269
270 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier);
271 }
272
commandBufferWriteBarrierBeforeHostRead(Context & context,const VkCommandBuffer cmdBuffer,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes)273 void commandBufferWriteBarrierBeforeHostRead (Context& context, const VkCommandBuffer cmdBuffer, const VkBuffer buffer, const VkDeviceSize bufferSizeBytes)
274 {
275 const DeviceInterface& vk = context.getDeviceInterface();
276
277 const VkBufferMemoryBarrier shaderWriteBarrier = makeBufferMemoryBarrier(
278 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
279 buffer, 0ull, bufferSizeBytes);
280
281 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &shaderWriteBarrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
282 }
283
284 //! Copy all layers of an image to a buffer.
commandCopyImageToBuffer(Context & context,const VkCommandBuffer cmdBuffer,const VkImage image,const VkBuffer buffer,const VkDeviceSize bufferSizeBytes,const Texture & texture)285 void commandCopyImageToBuffer (Context& context,
286 const VkCommandBuffer cmdBuffer,
287 const VkImage image,
288 const VkBuffer buffer,
289 const VkDeviceSize bufferSizeBytes,
290 const Texture& texture)
291 {
292 const DeviceInterface& vk = context.getDeviceInterface();
293
294 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, texture.numLayers());
295 const VkImageMemoryBarrier prepareForTransferBarrier = makeImageMemoryBarrier(
296 VK_ACCESS_SHADER_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
297 VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
298 image, fullImageSubresourceRange);
299
300 const VkBufferImageCopy copyRegion = makeBufferImageCopy(texture);
301
302 const VkBufferMemoryBarrier copyBarrier = makeBufferMemoryBarrier(
303 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
304 buffer, 0ull, bufferSizeBytes);
305
306 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &prepareForTransferBarrier);
307 vk.cmdCopyImageToBuffer(cmdBuffer, image, VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, buffer, 1u, ©Region);
308 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_HOST_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, ©Barrier, 0, (const VkImageMemoryBarrier*)DE_NULL);
309 }
310
311 class StoreTest : public TestCase
312 {
313 public:
314 enum TestFlags
315 {
316 FLAG_SINGLE_LAYER_BIND = 0x1, //!< Run the shader multiple times, each time binding a different layer.
317 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 0x2, //!< Declare the format of the images in the shader code
318 };
319
320 StoreTest (tcu::TestContext& testCtx,
321 const std::string& name,
322 const std::string& description,
323 const Texture& texture,
324 const VkFormat format,
325 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER);
326
327 void initPrograms (SourceCollections& programCollection) const;
328
329 TestInstance* createInstance (Context& context) const;
330
331 private:
332 const Texture m_texture;
333 const VkFormat m_format;
334 const bool m_declareImageFormatInShader;
335 const bool m_singleLayerBind;
336 };
337
StoreTest(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Texture & texture,const VkFormat format,const deUint32 flags)338 StoreTest::StoreTest (tcu::TestContext& testCtx,
339 const std::string& name,
340 const std::string& description,
341 const Texture& texture,
342 const VkFormat format,
343 const deUint32 flags)
344 : TestCase (testCtx, name, description)
345 , m_texture (texture)
346 , m_format (format)
347 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
348 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
349 {
350 if (m_singleLayerBind)
351 DE_ASSERT(m_texture.numLayers() > 1);
352 }
353
initPrograms(SourceCollections & programCollection) const354 void StoreTest::initPrograms (SourceCollections& programCollection) const
355 {
356 const float storeColorScale = computeStoreColorScale(m_format, m_texture.size());
357 const float storeColorBias = computeStoreColorBias(m_format);
358 DE_ASSERT(colorScaleAndBiasAreValid(m_format, storeColorScale, storeColorBias));
359
360 const std::string xMax = de::toString(m_texture.size().x() - 1);
361 const std::string yMax = de::toString(m_texture.size().y() - 1);
362 const std::string signednessPrefix = isUintFormat(m_format) ? "u" : isIntFormat(m_format) ? "i" : "";
363 const std::string colorBaseExpr = signednessPrefix + "vec4("
364 + "gx^gy^gz, "
365 + "(" + xMax + "-gx)^gy^gz, "
366 + "gx^(" + yMax + "-gy)^gz, "
367 + "(" + xMax + "-gx)^(" + yMax + "-gy)^gz)";
368
369 const std::string colorExpr = colorBaseExpr + (storeColorScale == 1.0f ? "" : "*" + de::toString(storeColorScale))
370 + (storeColorBias == 0.0f ? "" : " + float(" + de::toString(storeColorBias) + ")");
371
372 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
373 const std::string texelCoordStr = (dimension == 1 ? "gx" : dimension == 2 ? "ivec2(gx, gy)" : dimension == 3 ? "ivec3(gx, gy, gz)" : "");
374
375 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
376 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
377 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
378
379 for (deUint32 variant = 0; variant <= 1; variant++)
380 {
381 std::ostringstream src;
382 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
383 << "\n"
384 << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
385 if (variant == 0)
386 src << "layout (binding = 0, " << formatQualifierStr << ") writeonly uniform " << imageTypeStr << " u_image;\n";
387 else
388 src << "layout (binding = 0) writeonly uniform " << imageTypeStr << " u_image;\n";
389
390 if (m_singleLayerBind)
391 src << "layout (binding = 1) readonly uniform Constants {\n"
392 << " int u_layerNdx;\n"
393 << "};\n";
394
395 src << "\n"
396 << "void main (void)\n"
397 << "{\n"
398 << " int gx = int(gl_GlobalInvocationID.x);\n"
399 << " int gy = int(gl_GlobalInvocationID.y);\n"
400 << " int gz = " << (m_singleLayerBind ? "u_layerNdx" : "int(gl_GlobalInvocationID.z)") << ";\n"
401 << " imageStore(u_image, " << texelCoordStr << ", " << colorExpr << ");\n"
402 << "}\n";
403
404 programCollection.glslSources.add(variant == 0 ? "comp" : "comp_fmt_unknown") << glu::ComputeSource(src.str());
405 }
406 }
407
408 //! Generic test iteration algorithm for image tests
409 class BaseTestInstance : public TestInstance
410 {
411 public:
412 BaseTestInstance (Context& context,
413 const Texture& texture,
414 const VkFormat format,
415 const bool declareImageFormatInShader,
416 const bool singleLayerBind);
417
418 tcu::TestStatus iterate (void);
419
~BaseTestInstance(void)420 virtual ~BaseTestInstance (void) {}
421
422 protected:
423 virtual VkDescriptorSetLayout prepareDescriptors (void) = 0;
424 virtual tcu::TestStatus verifyResult (void) = 0;
425
426 virtual void commandBeforeCompute (const VkCommandBuffer cmdBuffer) = 0;
427 virtual void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer) = 0;
428 virtual void commandAfterCompute (const VkCommandBuffer cmdBuffer) = 0;
429
430 virtual void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
431 const VkPipelineLayout pipelineLayout,
432 const int layerNdx) = 0;
checkRequirements(void)433 virtual void checkRequirements (void) {};
434
435 const Texture m_texture;
436 const VkFormat m_format;
437 const bool m_declareImageFormatInShader;
438 const bool m_singleLayerBind;
439 };
440
BaseTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind)441 BaseTestInstance::BaseTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind)
442 : TestInstance (context)
443 , m_texture (texture)
444 , m_format (format)
445 , m_declareImageFormatInShader (declareImageFormatInShader)
446 , m_singleLayerBind (singleLayerBind)
447 {
448 }
449
iterate(void)450 tcu::TestStatus BaseTestInstance::iterate (void)
451 {
452 checkRequirements();
453
454 const DeviceInterface& vk = m_context.getDeviceInterface();
455 const VkDevice device = m_context.getDevice();
456 const VkQueue queue = m_context.getUniversalQueue();
457 const deUint32 queueFamilyIndex = m_context.getUniversalQueueFamilyIndex();
458
459 const Unique<VkShaderModule> shaderModule(createShaderModule(vk, device, m_context.getBinaryCollection().get(m_declareImageFormatInShader ? "comp" : "comp_fmt_unknown"), 0));
460
461 const VkDescriptorSetLayout descriptorSetLayout = prepareDescriptors();
462 const Unique<VkPipelineLayout> pipelineLayout(makePipelineLayout(vk, device, descriptorSetLayout));
463 const Unique<VkPipeline> pipeline(makeComputePipeline(vk, device, *pipelineLayout, *shaderModule));
464
465 const Unique<VkCommandPool> cmdPool(createCommandPool(vk, device, VK_COMMAND_POOL_CREATE_TRANSIENT_BIT, queueFamilyIndex));
466 const Unique<VkCommandBuffer> cmdBuffer(allocateCommandBuffer(vk, device, *cmdPool, VK_COMMAND_BUFFER_LEVEL_PRIMARY));
467
468 beginCommandBuffer(vk, *cmdBuffer);
469
470 vk.cmdBindPipeline(*cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, *pipeline);
471 commandBeforeCompute(*cmdBuffer);
472
473 const tcu::IVec3 workSize = (m_singleLayerBind ? m_texture.layerSize() : m_texture.size());
474 const int loopNumLayers = (m_singleLayerBind ? m_texture.numLayers() : 1);
475 for (int layerNdx = 0; layerNdx < loopNumLayers; ++layerNdx)
476 {
477 commandBindDescriptorsForLayer(*cmdBuffer, *pipelineLayout, layerNdx);
478
479 if (layerNdx > 0)
480 commandBetweenShaderInvocations(*cmdBuffer);
481
482 vk.cmdDispatch(*cmdBuffer, workSize.x(), workSize.y(), workSize.z());
483 }
484
485 commandAfterCompute(*cmdBuffer);
486
487 endCommandBuffer(vk, *cmdBuffer);
488
489 submitCommandsAndWait(vk, device, queue, *cmdBuffer);
490
491 return verifyResult();
492 }
493
494 //! Base store test implementation
495 class StoreTestInstance : public BaseTestInstance
496 {
497 public:
498 StoreTestInstance (Context& context,
499 const Texture& texture,
500 const VkFormat format,
501 const bool declareImageFormatInShader,
502 const bool singleLayerBind);
503
504 protected:
505 tcu::TestStatus verifyResult (void);
506
507 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)508 void commandBeforeCompute (const VkCommandBuffer) {}
commandBetweenShaderInvocations(const VkCommandBuffer)509 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
commandAfterCompute(const VkCommandBuffer)510 void commandAfterCompute (const VkCommandBuffer) {}
511 void checkRequirements (void);
512
513 de::MovePtr<Buffer> m_imageBuffer;
514 const VkDeviceSize m_imageSizeBytes;
515 };
516
StoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind)517 StoreTestInstance::StoreTestInstance (Context& context, const Texture& texture, const VkFormat format, const bool declareImageFormatInShader, const bool singleLayerBind)
518 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind)
519 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
520 {
521 const DeviceInterface& vk = m_context.getDeviceInterface();
522 const VkDevice device = m_context.getDevice();
523 Allocator& allocator = m_context.getDefaultAllocator();
524
525 // A helper buffer with enough space to hold the whole image. Usage flags accommodate all derived test instances.
526
527 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
528 vk, device, allocator,
529 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT),
530 MemoryRequirement::HostVisible));
531 }
532
verifyResult(void)533 tcu::TestStatus StoreTestInstance::verifyResult (void)
534 {
535 const DeviceInterface& vk = m_context.getDeviceInterface();
536 const VkDevice device = m_context.getDevice();
537
538 const tcu::IVec3 imageSize = m_texture.size();
539 const tcu::TextureLevel reference = generateReferenceImage(imageSize, m_format);
540
541 const Allocation& alloc = m_imageBuffer->getAllocation();
542 invalidateAlloc(vk, device, alloc);
543 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_format), imageSize, alloc.getHostPtr());
544
545 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_format, reference.getAccess(), result))
546 return tcu::TestStatus::pass("Passed");
547 else
548 return tcu::TestStatus::fail("Image comparison failed");
549 }
550
checkRequirements(void)551 void StoreTestInstance::checkRequirements (void)
552 {
553 const VkPhysicalDeviceFeatures features = m_context.getDeviceFeatures();
554
555 if (!m_declareImageFormatInShader && !features.shaderStorageImageWriteWithoutFormat)
556 throw tcu::NotSupportedError("shaderStorageImageWriteWithoutFormat feature not supported");
557
558 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY && !features.imageCubeArray)
559 TCU_THROW(NotSupportedError, "imageCubeArray feature not supported");
560 }
561
562 //! Store test for images
563 class ImageStoreTestInstance : public StoreTestInstance
564 {
565 public:
566 ImageStoreTestInstance (Context& context,
567 const Texture& texture,
568 const VkFormat format,
569 const bool declareImageFormatInShader,
570 const bool singleLayerBind);
571
572 protected:
573 VkDescriptorSetLayout prepareDescriptors (void);
574 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
575 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
576 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
577
578 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
579 const VkPipelineLayout pipelineLayout,
580 const int layerNdx);
581
582 de::MovePtr<Image> m_image;
583 de::MovePtr<Buffer> m_constantsBuffer;
584 const VkDeviceSize m_constantsBufferChunkSizeBytes;
585 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
586 Move<VkDescriptorPool> m_descriptorPool;
587 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
588 std::vector<SharedVkImageView> m_allImageViews;
589 };
590
ImageStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader,const bool singleLayerBind)591 ImageStoreTestInstance::ImageStoreTestInstance (Context& context,
592 const Texture& texture,
593 const VkFormat format,
594 const bool declareImageFormatInShader,
595 const bool singleLayerBind)
596 : StoreTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind)
597 , m_constantsBufferChunkSizeBytes (getOptimalUniformBufferChunkSize(context.getInstanceInterface(), context.getPhysicalDevice(), sizeof(deUint32)))
598 , m_allDescriptorSets (texture.numLayers())
599 , m_allImageViews (texture.numLayers())
600 {
601 const DeviceInterface& vk = m_context.getDeviceInterface();
602 const VkDevice device = m_context.getDevice();
603 Allocator& allocator = m_context.getDefaultAllocator();
604
605 m_image = de::MovePtr<Image>(new Image(
606 vk, device, allocator,
607 makeImageCreateInfo(m_texture, m_format, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, 0u),
608 MemoryRequirement::Any));
609
610 // This buffer will be used to pass constants to the shader
611
612 const int numLayers = m_texture.numLayers();
613 const VkDeviceSize constantsBufferSizeBytes = numLayers * m_constantsBufferChunkSizeBytes;
614 m_constantsBuffer = de::MovePtr<Buffer>(new Buffer(
615 vk, device, allocator,
616 makeBufferCreateInfo(constantsBufferSizeBytes, VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT),
617 MemoryRequirement::HostVisible));
618
619 {
620 const Allocation& alloc = m_constantsBuffer->getAllocation();
621 deUint8* const basePtr = static_cast<deUint8*>(alloc.getHostPtr());
622
623 deMemset(alloc.getHostPtr(), 0, static_cast<size_t>(constantsBufferSizeBytes));
624
625 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
626 {
627 deUint32* valuePtr = reinterpret_cast<deUint32*>(basePtr + layerNdx * m_constantsBufferChunkSizeBytes);
628 *valuePtr = static_cast<deUint32>(layerNdx);
629 }
630
631 flushAlloc(vk, device, alloc);
632 }
633 }
634
prepareDescriptors(void)635 VkDescriptorSetLayout ImageStoreTestInstance::prepareDescriptors (void)
636 {
637 const DeviceInterface& vk = m_context.getDeviceInterface();
638 const VkDevice device = m_context.getDevice();
639
640 const int numLayers = m_texture.numLayers();
641 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
642 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
643 .addSingleBinding(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
644 .build(vk, device);
645
646 m_descriptorPool = DescriptorPoolBuilder()
647 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
648 .addType(VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, numLayers)
649 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
650
651 if (m_singleLayerBind)
652 {
653 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
654 {
655 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
656 m_allImageViews[layerNdx] = makeVkSharedPtr(makeImageView(
657 vk, device, m_image->get(), mapImageViewType(getImageTypeForSingleLayer(m_texture.type())), m_format,
658 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u)));
659 }
660 }
661 else // bind all layers at once
662 {
663 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
664 m_allImageViews[0] = makeVkSharedPtr(makeImageView(
665 vk, device, m_image->get(), mapImageViewType(m_texture.type()), m_format,
666 makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers)));
667 }
668
669 return *m_descriptorSetLayout; // not passing the ownership
670 }
671
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)672 void ImageStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
673 {
674 const DeviceInterface& vk = m_context.getDeviceInterface();
675 const VkDevice device = m_context.getDevice();
676
677 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
678 const VkImageView imageView = **m_allImageViews[layerNdx];
679
680 const VkDescriptorImageInfo descriptorImageInfo = makeDescriptorImageInfo(DE_NULL, imageView, VK_IMAGE_LAYOUT_GENERAL);
681
682 // Set the next chunk of the constants buffer. Each chunk begins with layer index that we've set before.
683 const VkDescriptorBufferInfo descriptorConstantsBufferInfo = makeDescriptorBufferInfo(
684 m_constantsBuffer->get(), layerNdx*m_constantsBufferChunkSizeBytes, m_constantsBufferChunkSizeBytes);
685
686 DescriptorSetUpdateBuilder()
687 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorImageInfo)
688 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, &descriptorConstantsBufferInfo)
689 .update(vk, device);
690 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
691 }
692
commandBeforeCompute(const VkCommandBuffer cmdBuffer)693 void ImageStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
694 {
695 const DeviceInterface& vk = m_context.getDeviceInterface();
696
697 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
698 const VkImageMemoryBarrier setImageLayoutBarrier = makeImageMemoryBarrier(
699 0u, 0u,
700 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
701 m_image->get(), fullImageSubresourceRange);
702
703 const VkDeviceSize constantsBufferSize = m_texture.numLayers() * m_constantsBufferChunkSizeBytes;
704 const VkBufferMemoryBarrier writeConstantsBarrier = makeBufferMemoryBarrier(
705 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
706 m_constantsBuffer->get(), 0ull, constantsBufferSize);
707
708 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &writeConstantsBarrier, 1, &setImageLayoutBarrier);
709 }
710
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)711 void ImageStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
712 {
713 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_image->get(), m_texture);
714 }
715
commandAfterCompute(const VkCommandBuffer cmdBuffer)716 void ImageStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
717 {
718 commandCopyImageToBuffer(m_context, cmdBuffer, m_image->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
719 }
720
721 //! Store test for buffers
722 class BufferStoreTestInstance : public StoreTestInstance
723 {
724 public:
725 BufferStoreTestInstance (Context& context,
726 const Texture& texture,
727 const VkFormat format,
728 const bool declareImageFormatInShader);
729
730 protected:
731 VkDescriptorSetLayout prepareDescriptors (void);
732 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
733
734 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
735 const VkPipelineLayout pipelineLayout,
736 const int layerNdx);
737
738 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
739 Move<VkDescriptorPool> m_descriptorPool;
740 Move<VkDescriptorSet> m_descriptorSet;
741 Move<VkBufferView> m_bufferView;
742 };
743
BufferStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const bool declareImageFormatInShader)744 BufferStoreTestInstance::BufferStoreTestInstance (Context& context,
745 const Texture& texture,
746 const VkFormat format,
747 const bool declareImageFormatInShader)
748 : StoreTestInstance(context, texture, format, declareImageFormatInShader, false)
749 {
750 }
751
prepareDescriptors(void)752 VkDescriptorSetLayout BufferStoreTestInstance::prepareDescriptors (void)
753 {
754 const DeviceInterface& vk = m_context.getDeviceInterface();
755 const VkDevice device = m_context.getDevice();
756
757 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
758 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
759 .build(vk, device);
760
761 m_descriptorPool = DescriptorPoolBuilder()
762 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
763 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
764
765 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
766 m_bufferView = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
767
768 return *m_descriptorSetLayout; // not passing the ownership
769 }
770
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)771 void BufferStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
772 {
773 DE_ASSERT(layerNdx == 0);
774 DE_UNREF(layerNdx);
775
776 const VkDevice device = m_context.getDevice();
777 const DeviceInterface& vk = m_context.getDeviceInterface();
778
779 DescriptorSetUpdateBuilder()
780 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferView.get())
781 .update(vk, device);
782 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
783 }
784
commandAfterCompute(const VkCommandBuffer cmdBuffer)785 void BufferStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
786 {
787 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBuffer->get(), m_imageSizeBytes);
788 }
789
790 class LoadStoreTest : public TestCase
791 {
792 public:
793 enum TestFlags
794 {
795 FLAG_SINGLE_LAYER_BIND = 1 << 0, //!< Run the shader multiple times, each time binding a different layer.
796 FLAG_RESTRICT_IMAGES = 1 << 1, //!< If given, images in the shader will be qualified with "restrict".
797 FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER = 1 << 2, //!< Declare the format of the images in the shader code
798 };
799
800 LoadStoreTest (tcu::TestContext& testCtx,
801 const std::string& name,
802 const std::string& description,
803 const Texture& texture,
804 const VkFormat format,
805 const VkFormat imageFormat,
806 const deUint32 flags = FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER);
807
808 void initPrograms (SourceCollections& programCollection) const;
809 TestInstance* createInstance (Context& context) const;
810
811 private:
812 const Texture m_texture;
813 const VkFormat m_format; //!< Format as accessed in the shader
814 const VkFormat m_imageFormat; //!< Storage format
815 const bool m_declareImageFormatInShader; //!< Whether the shader will specify the format layout qualifier of the images
816 const bool m_singleLayerBind;
817 const bool m_restrictImages;
818 };
819
LoadStoreTest(tcu::TestContext & testCtx,const std::string & name,const std::string & description,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const deUint32 flags)820 LoadStoreTest::LoadStoreTest (tcu::TestContext& testCtx,
821 const std::string& name,
822 const std::string& description,
823 const Texture& texture,
824 const VkFormat format,
825 const VkFormat imageFormat,
826 const deUint32 flags)
827 : TestCase (testCtx, name, description)
828 , m_texture (texture)
829 , m_format (format)
830 , m_imageFormat (imageFormat)
831 , m_declareImageFormatInShader ((flags & FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER) != 0)
832 , m_singleLayerBind ((flags & FLAG_SINGLE_LAYER_BIND) != 0)
833 , m_restrictImages ((flags & FLAG_RESTRICT_IMAGES) != 0)
834 {
835 if (m_singleLayerBind)
836 DE_ASSERT(m_texture.numLayers() > 1);
837
838 DE_ASSERT(formatsAreCompatible(m_format, m_imageFormat));
839 }
840
initPrograms(SourceCollections & programCollection) const841 void LoadStoreTest::initPrograms (SourceCollections& programCollection) const
842 {
843 const int dimension = (m_singleLayerBind ? m_texture.layerDimension() : m_texture.dimension());
844 const ImageType usedImageType = (m_singleLayerBind ? getImageTypeForSingleLayer(m_texture.type()) : m_texture.type());
845 const std::string formatQualifierStr = getShaderImageFormatQualifier(mapVkFormat(m_format));
846 const std::string imageTypeStr = getShaderImageType(mapVkFormat(m_format), usedImageType);
847 const std::string maybeRestrictStr = (m_restrictImages ? "restrict " : "");
848 const std::string xMax = de::toString(m_texture.size().x() - 1);
849
850 for (deUint32 variant = 0; variant <= 1; variant++)
851 {
852 std::ostringstream src;
853 src << glu::getGLSLVersionDeclaration(glu::GLSL_VERSION_440) << "\n"
854 << "\n";
855 if (variant != 0)
856 {
857 src << "#extension GL_EXT_shader_image_load_formatted : require\n";
858 }
859 src << "layout (local_size_x = 1, local_size_y = 1, local_size_z = 1) in;\n";
860 if (variant == 0)
861 src << "layout (binding = 0, " << formatQualifierStr << ") " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
862 else
863 src << "layout (binding = 0) " << maybeRestrictStr << "readonly uniform " << imageTypeStr << " u_image0;\n";
864 src << "layout (binding = 1, " << formatQualifierStr << ") " << maybeRestrictStr << "writeonly uniform " << imageTypeStr << " u_image1;\n"
865 << "\n"
866 << "void main (void)\n"
867 << "{\n"
868 << (dimension == 1 ?
869 " int pos = int(gl_GlobalInvocationID.x);\n"
870 " imageStore(u_image1, pos, imageLoad(u_image0, " + xMax + "-pos));\n"
871 : dimension == 2 ?
872 " ivec2 pos = ivec2(gl_GlobalInvocationID.xy);\n"
873 " imageStore(u_image1, pos, imageLoad(u_image0, ivec2(" + xMax + "-pos.x, pos.y)));\n"
874 : dimension == 3 ?
875 " ivec3 pos = ivec3(gl_GlobalInvocationID);\n"
876 " imageStore(u_image1, pos, imageLoad(u_image0, ivec3(" + xMax + "-pos.x, pos.y, pos.z)));\n"
877 : "")
878 << "}\n";
879
880 programCollection.glslSources.add(variant == 0 ? "comp" : "comp_fmt_unknown") << glu::ComputeSource(src.str());
881 }
882 }
883
884 //! Load/store test base implementation
885 class LoadStoreTestInstance : public BaseTestInstance
886 {
887 public:
888 LoadStoreTestInstance (Context& context,
889 const Texture& texture,
890 const VkFormat format,
891 const VkFormat imageFormat,
892 const bool declareImageFormatInShader,
893 const bool singleLayerBind);
894
895 protected:
896 virtual Buffer* getResultBuffer (void) const = 0; //!< Get the buffer that contains the result image
897
898 tcu::TestStatus verifyResult (void);
899
900 // Add empty implementations for functions that might be not needed
commandBeforeCompute(const VkCommandBuffer)901 void commandBeforeCompute (const VkCommandBuffer) {}
commandBetweenShaderInvocations(const VkCommandBuffer)902 void commandBetweenShaderInvocations (const VkCommandBuffer) {}
commandAfterCompute(const VkCommandBuffer)903 void commandAfterCompute (const VkCommandBuffer) {}
904 void checkRequirements (void);
905
906 de::MovePtr<Buffer> m_imageBuffer; //!< Source data and helper buffer
907 const VkDeviceSize m_imageSizeBytes;
908 const VkFormat m_imageFormat; //!< Image format (for storage, may be different than texture format)
909 tcu::TextureLevel m_referenceImage; //!< Used as input data and later to verify result image
910 };
911
LoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool singleLayerBind)912 LoadStoreTestInstance::LoadStoreTestInstance (Context& context,
913 const Texture& texture,
914 const VkFormat format,
915 const VkFormat imageFormat,
916 const bool declareImageFormatInShader,
917 const bool singleLayerBind)
918 : BaseTestInstance (context, texture, format, declareImageFormatInShader, singleLayerBind)
919 , m_imageSizeBytes (getImageSizeBytes(texture.size(), format))
920 , m_imageFormat (imageFormat)
921 , m_referenceImage (generateReferenceImage(texture.size(), imageFormat, format))
922 {
923 const DeviceInterface& vk = m_context.getDeviceInterface();
924 const VkDevice device = m_context.getDevice();
925 Allocator& allocator = m_context.getDefaultAllocator();
926
927 // A helper buffer with enough space to hold the whole image.
928
929 m_imageBuffer = de::MovePtr<Buffer>(new Buffer(
930 vk, device, allocator,
931 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_TRANSFER_DST_BIT | VK_BUFFER_USAGE_TRANSFER_SRC_BIT),
932 MemoryRequirement::HostVisible));
933
934 // Copy reference data to buffer for subsequent upload to image.
935
936 const Allocation& alloc = m_imageBuffer->getAllocation();
937 deMemcpy(alloc.getHostPtr(), m_referenceImage.getAccess().getDataPtr(), static_cast<size_t>(m_imageSizeBytes));
938 flushAlloc(vk, device, alloc);
939 }
940
verifyResult(void)941 tcu::TestStatus LoadStoreTestInstance::verifyResult (void)
942 {
943 const DeviceInterface& vk = m_context.getDeviceInterface();
944 const VkDevice device = m_context.getDevice();
945
946 // Apply the same transformation as done in the shader
947 const tcu::PixelBufferAccess reference = m_referenceImage.getAccess();
948 flipHorizontally(reference);
949
950 const Allocation& alloc = getResultBuffer()->getAllocation();
951 invalidateAlloc(vk, device, alloc);
952 const tcu::ConstPixelBufferAccess result(mapVkFormat(m_imageFormat), m_texture.size(), alloc.getHostPtr());
953
954 if (comparePixelBuffers(m_context.getTestContext().getLog(), m_texture, m_imageFormat, reference, result))
955 return tcu::TestStatus::pass("Passed");
956 else
957 return tcu::TestStatus::fail("Image comparison failed");
958 }
959
checkRequirements(void)960 void LoadStoreTestInstance::checkRequirements (void)
961 {
962 const VkPhysicalDeviceFeatures features = m_context.getDeviceFeatures();
963
964 if (!m_declareImageFormatInShader && !features.shaderStorageImageReadWithoutFormat)
965 throw tcu::NotSupportedError("shaderStorageImageReadWithoutFormat feature not supported");
966
967 if (m_texture.type() == IMAGE_TYPE_CUBE_ARRAY && !features.imageCubeArray)
968 TCU_THROW(NotSupportedError, "imageCubeArray feature not supported");
969 }
970
971
972 //! Load/store test for images
973 class ImageLoadStoreTestInstance : public LoadStoreTestInstance
974 {
975 public:
976 ImageLoadStoreTestInstance (Context& context,
977 const Texture& texture,
978 const VkFormat format,
979 const VkFormat imageFormat,
980 const bool declareImageFormatInShader,
981 const bool singleLayerBind);
982
983 protected:
984 VkDescriptorSetLayout prepareDescriptors (void);
985 void commandBeforeCompute (const VkCommandBuffer cmdBuffer);
986 void commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer);
987 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
988
989 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
990 const VkPipelineLayout pipelineLayout,
991 const int layerNdx);
992
getResultBuffer(void) const993 Buffer* getResultBuffer (void) const { return m_imageBuffer.get(); }
994
995 de::MovePtr<Image> m_imageSrc;
996 de::MovePtr<Image> m_imageDst;
997 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
998 Move<VkDescriptorPool> m_descriptorPool;
999 std::vector<SharedVkDescriptorSet> m_allDescriptorSets;
1000 std::vector<SharedVkImageView> m_allSrcImageViews;
1001 std::vector<SharedVkImageView> m_allDstImageViews;
1002 };
1003
ImageLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader,const bool singleLayerBind)1004 ImageLoadStoreTestInstance::ImageLoadStoreTestInstance (Context& context,
1005 const Texture& texture,
1006 const VkFormat format,
1007 const VkFormat imageFormat,
1008 const bool declareImageFormatInShader,
1009 const bool singleLayerBind)
1010 : LoadStoreTestInstance (context, texture, format, imageFormat, declareImageFormatInShader, singleLayerBind)
1011 , m_allDescriptorSets (texture.numLayers())
1012 , m_allSrcImageViews (texture.numLayers())
1013 , m_allDstImageViews (texture.numLayers())
1014 {
1015 const DeviceInterface& vk = m_context.getDeviceInterface();
1016 const VkDevice device = m_context.getDevice();
1017 Allocator& allocator = m_context.getDefaultAllocator();
1018 const VkImageCreateFlags imageFlags = (m_format == m_imageFormat ? 0u : (VkImageCreateFlags)VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT);
1019
1020 m_imageSrc = de::MovePtr<Image>(new Image(
1021 vk, device, allocator,
1022 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT, imageFlags),
1023 MemoryRequirement::Any));
1024
1025 m_imageDst = de::MovePtr<Image>(new Image(
1026 vk, device, allocator,
1027 makeImageCreateInfo(m_texture, m_imageFormat, VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_TRANSFER_SRC_BIT, imageFlags),
1028 MemoryRequirement::Any));
1029 }
1030
prepareDescriptors(void)1031 VkDescriptorSetLayout ImageLoadStoreTestInstance::prepareDescriptors (void)
1032 {
1033 const VkDevice device = m_context.getDevice();
1034 const DeviceInterface& vk = m_context.getDeviceInterface();
1035
1036 const int numLayers = m_texture.numLayers();
1037 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1038 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1039 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, VK_SHADER_STAGE_COMPUTE_BIT)
1040 .build(vk, device);
1041
1042 m_descriptorPool = DescriptorPoolBuilder()
1043 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1044 .addType(VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, numLayers)
1045 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, numLayers);
1046
1047 if (m_singleLayerBind)
1048 {
1049 for (int layerNdx = 0; layerNdx < numLayers; ++layerNdx)
1050 {
1051 const VkImageViewType viewType = mapImageViewType(getImageTypeForSingleLayer(m_texture.type()));
1052 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, layerNdx, 1u);
1053
1054 m_allDescriptorSets[layerNdx] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1055 m_allSrcImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1056 m_allDstImageViews[layerNdx] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1057 }
1058 }
1059 else // bind all layers at once
1060 {
1061 const VkImageViewType viewType = mapImageViewType(m_texture.type());
1062 const VkImageSubresourceRange subresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, numLayers);
1063
1064 m_allDescriptorSets[0] = makeVkSharedPtr(makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout));
1065 m_allSrcImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageSrc->get(), viewType, m_format, subresourceRange));
1066 m_allDstImageViews[0] = makeVkSharedPtr(makeImageView(vk, device, m_imageDst->get(), viewType, m_format, subresourceRange));
1067 }
1068
1069 return *m_descriptorSetLayout; // not passing the ownership
1070 }
1071
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1072 void ImageLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1073 {
1074 const VkDevice device = m_context.getDevice();
1075 const DeviceInterface& vk = m_context.getDeviceInterface();
1076
1077 const VkDescriptorSet descriptorSet = **m_allDescriptorSets[layerNdx];
1078 const VkImageView srcImageView = **m_allSrcImageViews[layerNdx];
1079 const VkImageView dstImageView = **m_allDstImageViews[layerNdx];
1080
1081 const VkDescriptorImageInfo descriptorSrcImageInfo = makeDescriptorImageInfo(DE_NULL, srcImageView, VK_IMAGE_LAYOUT_GENERAL);
1082 const VkDescriptorImageInfo descriptorDstImageInfo = makeDescriptorImageInfo(DE_NULL, dstImageView, VK_IMAGE_LAYOUT_GENERAL);
1083
1084 DescriptorSetUpdateBuilder()
1085 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorSrcImageInfo)
1086 .writeSingle(descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_IMAGE, &descriptorDstImageInfo)
1087 .update(vk, device);
1088 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &descriptorSet, 0u, DE_NULL);
1089 }
1090
commandBeforeCompute(const VkCommandBuffer cmdBuffer)1091 void ImageLoadStoreTestInstance::commandBeforeCompute (const VkCommandBuffer cmdBuffer)
1092 {
1093 const DeviceInterface& vk = m_context.getDeviceInterface();
1094
1095 const VkImageSubresourceRange fullImageSubresourceRange = makeImageSubresourceRange(VK_IMAGE_ASPECT_COLOR_BIT, 0u, 1u, 0u, m_texture.numLayers());
1096 {
1097 const VkImageMemoryBarrier preCopyImageBarriers[] =
1098 {
1099 makeImageMemoryBarrier(
1100 0u, VK_ACCESS_TRANSFER_WRITE_BIT,
1101 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1102 m_imageSrc->get(), fullImageSubresourceRange),
1103 makeImageMemoryBarrier(
1104 0u, VK_ACCESS_SHADER_WRITE_BIT,
1105 VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
1106 m_imageDst->get(), fullImageSubresourceRange)
1107 };
1108
1109 const VkBufferMemoryBarrier barrierFlushHostWriteBeforeCopy = makeBufferMemoryBarrier(
1110 VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
1111 m_imageBuffer->get(), 0ull, m_imageSizeBytes);
1112
1113 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_HOST_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT,
1114 (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 1, &barrierFlushHostWriteBeforeCopy, DE_LENGTH_OF_ARRAY(preCopyImageBarriers), preCopyImageBarriers);
1115 }
1116 {
1117 const VkImageMemoryBarrier barrierAfterCopy = makeImageMemoryBarrier(
1118 VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_SHADER_READ_BIT,
1119 VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, VK_IMAGE_LAYOUT_GENERAL,
1120 m_imageSrc->get(), fullImageSubresourceRange);
1121
1122 const VkBufferImageCopy copyRegion = makeBufferImageCopy(m_texture);
1123
1124 vk.cmdCopyBufferToImage(cmdBuffer, m_imageBuffer->get(), m_imageSrc->get(), VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, 1u, ©Region);
1125 vk.cmdPipelineBarrier(cmdBuffer, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, (VkDependencyFlags)0, 0, (const VkMemoryBarrier*)DE_NULL, 0, (const VkBufferMemoryBarrier*)DE_NULL, 1, &barrierAfterCopy);
1126 }
1127 }
1128
commandBetweenShaderInvocations(const VkCommandBuffer cmdBuffer)1129 void ImageLoadStoreTestInstance::commandBetweenShaderInvocations (const VkCommandBuffer cmdBuffer)
1130 {
1131 commandImageWriteBarrierBetweenShaderInvocations(m_context, cmdBuffer, m_imageDst->get(), m_texture);
1132 }
1133
commandAfterCompute(const VkCommandBuffer cmdBuffer)1134 void ImageLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1135 {
1136 commandCopyImageToBuffer(m_context, cmdBuffer, m_imageDst->get(), m_imageBuffer->get(), m_imageSizeBytes, m_texture);
1137 }
1138
1139 //! Load/store test for buffers
1140 class BufferLoadStoreTestInstance : public LoadStoreTestInstance
1141 {
1142 public:
1143 BufferLoadStoreTestInstance (Context& context,
1144 const Texture& texture,
1145 const VkFormat format,
1146 const VkFormat imageFormat,
1147 const bool declareImageFormatInShader);
1148
1149 protected:
1150 VkDescriptorSetLayout prepareDescriptors (void);
1151 void commandAfterCompute (const VkCommandBuffer cmdBuffer);
1152
1153 void commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer,
1154 const VkPipelineLayout pipelineLayout,
1155 const int layerNdx);
1156
getResultBuffer(void) const1157 Buffer* getResultBuffer (void) const { return m_imageBufferDst.get(); }
1158
1159 de::MovePtr<Buffer> m_imageBufferDst;
1160 Move<VkDescriptorSetLayout> m_descriptorSetLayout;
1161 Move<VkDescriptorPool> m_descriptorPool;
1162 Move<VkDescriptorSet> m_descriptorSet;
1163 Move<VkBufferView> m_bufferViewSrc;
1164 Move<VkBufferView> m_bufferViewDst;
1165 };
1166
BufferLoadStoreTestInstance(Context & context,const Texture & texture,const VkFormat format,const VkFormat imageFormat,const bool declareImageFormatInShader)1167 BufferLoadStoreTestInstance::BufferLoadStoreTestInstance (Context& context,
1168 const Texture& texture,
1169 const VkFormat format,
1170 const VkFormat imageFormat,
1171 const bool declareImageFormatInShader)
1172 : LoadStoreTestInstance(context, texture, format, imageFormat, declareImageFormatInShader, false)
1173 {
1174 const DeviceInterface& vk = m_context.getDeviceInterface();
1175 const VkDevice device = m_context.getDevice();
1176 Allocator& allocator = m_context.getDefaultAllocator();
1177
1178 // Create a destination buffer.
1179
1180 m_imageBufferDst = de::MovePtr<Buffer>(new Buffer(
1181 vk, device, allocator,
1182 makeBufferCreateInfo(m_imageSizeBytes, VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT),
1183 MemoryRequirement::HostVisible));
1184 }
1185
prepareDescriptors(void)1186 VkDescriptorSetLayout BufferLoadStoreTestInstance::prepareDescriptors (void)
1187 {
1188 const DeviceInterface& vk = m_context.getDeviceInterface();
1189 const VkDevice device = m_context.getDevice();
1190
1191 m_descriptorSetLayout = DescriptorSetLayoutBuilder()
1192 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1193 .addSingleBinding(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, VK_SHADER_STAGE_COMPUTE_BIT)
1194 .build(vk, device);
1195
1196 m_descriptorPool = DescriptorPoolBuilder()
1197 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1198 .addType(VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER)
1199 .build(vk, device, VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT, 1u);
1200
1201 m_descriptorSet = makeDescriptorSet(vk, device, *m_descriptorPool, *m_descriptorSetLayout);
1202 m_bufferViewSrc = makeBufferView(vk, device, m_imageBuffer->get(), m_format, 0ull, m_imageSizeBytes);
1203 m_bufferViewDst = makeBufferView(vk, device, m_imageBufferDst->get(), m_format, 0ull, m_imageSizeBytes);
1204
1205 return *m_descriptorSetLayout; // not passing the ownership
1206 }
1207
commandBindDescriptorsForLayer(const VkCommandBuffer cmdBuffer,const VkPipelineLayout pipelineLayout,const int layerNdx)1208 void BufferLoadStoreTestInstance::commandBindDescriptorsForLayer (const VkCommandBuffer cmdBuffer, const VkPipelineLayout pipelineLayout, const int layerNdx)
1209 {
1210 DE_ASSERT(layerNdx == 0);
1211 DE_UNREF(layerNdx);
1212
1213 const VkDevice device = m_context.getDevice();
1214 const DeviceInterface& vk = m_context.getDeviceInterface();
1215
1216 DescriptorSetUpdateBuilder()
1217 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(0u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewSrc.get())
1218 .writeSingle(*m_descriptorSet, DescriptorSetUpdateBuilder::Location::binding(1u), VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER, &m_bufferViewDst.get())
1219 .update(vk, device);
1220 vk.cmdBindDescriptorSets(cmdBuffer, VK_PIPELINE_BIND_POINT_COMPUTE, pipelineLayout, 0u, 1u, &m_descriptorSet.get(), 0u, DE_NULL);
1221 }
1222
commandAfterCompute(const VkCommandBuffer cmdBuffer)1223 void BufferLoadStoreTestInstance::commandAfterCompute (const VkCommandBuffer cmdBuffer)
1224 {
1225 commandBufferWriteBarrierBeforeHostRead(m_context, cmdBuffer, m_imageBufferDst->get(), m_imageSizeBytes);
1226 }
1227
createInstance(Context & context) const1228 TestInstance* StoreTest::createInstance (Context& context) const
1229 {
1230 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1231 return new BufferStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader);
1232 else
1233 return new ImageStoreTestInstance(context, m_texture, m_format, m_declareImageFormatInShader, m_singleLayerBind);
1234 }
1235
createInstance(Context & context) const1236 TestInstance* LoadStoreTest::createInstance (Context& context) const
1237 {
1238 if (m_texture.type() == IMAGE_TYPE_BUFFER)
1239 return new BufferLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader);
1240 else
1241 return new ImageLoadStoreTestInstance(context, m_texture, m_format, m_imageFormat, m_declareImageFormatInShader, m_singleLayerBind);
1242 }
1243
1244 static const Texture s_textures[] =
1245 {
1246 Texture(IMAGE_TYPE_1D, tcu::IVec3(64, 1, 1), 1),
1247 Texture(IMAGE_TYPE_1D_ARRAY, tcu::IVec3(64, 1, 1), 8),
1248 Texture(IMAGE_TYPE_2D, tcu::IVec3(64, 64, 1), 1),
1249 Texture(IMAGE_TYPE_2D_ARRAY, tcu::IVec3(64, 64, 1), 8),
1250 Texture(IMAGE_TYPE_3D, tcu::IVec3(64, 64, 8), 1),
1251 Texture(IMAGE_TYPE_CUBE, tcu::IVec3(64, 64, 1), 6),
1252 Texture(IMAGE_TYPE_CUBE_ARRAY, tcu::IVec3(64, 64, 1), 2*6),
1253 Texture(IMAGE_TYPE_BUFFER, tcu::IVec3(64, 1, 1), 1),
1254 };
1255
getTestTexture(const ImageType imageType)1256 const Texture& getTestTexture (const ImageType imageType)
1257 {
1258 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1259 if (s_textures[textureNdx].type() == imageType)
1260 return s_textures[textureNdx];
1261
1262 DE_FATAL("Internal error");
1263 return s_textures[0];
1264 }
1265
1266 static const VkFormat s_formats[] =
1267 {
1268 VK_FORMAT_R32G32B32A32_SFLOAT,
1269 VK_FORMAT_R16G16B16A16_SFLOAT,
1270 VK_FORMAT_R32_SFLOAT,
1271
1272 VK_FORMAT_R32G32B32A32_UINT,
1273 VK_FORMAT_R16G16B16A16_UINT,
1274 VK_FORMAT_R8G8B8A8_UINT,
1275 VK_FORMAT_R32_UINT,
1276
1277 VK_FORMAT_R32G32B32A32_SINT,
1278 VK_FORMAT_R16G16B16A16_SINT,
1279 VK_FORMAT_R8G8B8A8_SINT,
1280 VK_FORMAT_R32_SINT,
1281
1282 VK_FORMAT_R8G8B8A8_UNORM,
1283
1284 VK_FORMAT_R8G8B8A8_SNORM,
1285 };
1286
1287 } // anonymous ns
1288
createImageStoreTests(tcu::TestContext & testCtx)1289 tcu::TestCaseGroup* createImageStoreTests (tcu::TestContext& testCtx)
1290 {
1291 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "store", "Plain imageStore() cases"));
1292 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for write images"));
1293 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for write images"));
1294
1295 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1296 {
1297 const Texture& texture = s_textures[textureNdx];
1298 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1299 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1300 const bool isLayered = (texture.numLayers() > 1);
1301
1302 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1303 {
1304 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx]));
1305 groupWithoutFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], 0));
1306
1307 if (isLayered)
1308 groupWithFormatByImageViewType->addChild(new StoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
1309 texture, s_formats[formatNdx],
1310 StoreTest::FLAG_SINGLE_LAYER_BIND | StoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
1311 }
1312
1313 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
1314 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
1315 }
1316
1317 testGroup->addChild(testGroupWithFormat.release());
1318 testGroup->addChild(testGroupWithoutFormat.release());
1319
1320 return testGroup.release();
1321 }
1322
createImageLoadStoreTests(tcu::TestContext & testCtx)1323 tcu::TestCaseGroup* createImageLoadStoreTests (tcu::TestContext& testCtx)
1324 {
1325 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "load_store", "Cases with imageLoad() followed by imageStore()"));
1326 de::MovePtr<tcu::TestCaseGroup> testGroupWithFormat(new tcu::TestCaseGroup(testCtx, "with_format", "Declare a format layout qualifier for read images"));
1327 de::MovePtr<tcu::TestCaseGroup> testGroupWithoutFormat(new tcu::TestCaseGroup(testCtx, "without_format", "Do not declare a format layout qualifier for read images"));
1328
1329 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1330 {
1331 const Texture& texture = s_textures[textureNdx];
1332 de::MovePtr<tcu::TestCaseGroup> groupWithFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1333 de::MovePtr<tcu::TestCaseGroup> groupWithoutFormatByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1334 const bool isLayered = (texture.numLayers() > 1);
1335
1336 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1337 {
1338 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx]));
1339 groupWithoutFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]), "", texture, s_formats[formatNdx], s_formats[formatNdx], 0));
1340
1341 if (isLayered)
1342 groupWithFormatByImageViewType->addChild(new LoadStoreTest(testCtx, getFormatShortString(s_formats[formatNdx]) + "_single_layer", "",
1343 texture, s_formats[formatNdx], s_formats[formatNdx],
1344 LoadStoreTest::FLAG_SINGLE_LAYER_BIND | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
1345 }
1346
1347 testGroupWithFormat->addChild(groupWithFormatByImageViewType.release());
1348 testGroupWithoutFormat->addChild(groupWithoutFormatByImageViewType.release());
1349 }
1350
1351 testGroup->addChild(testGroupWithFormat.release());
1352 testGroup->addChild(testGroupWithoutFormat.release());
1353
1354 return testGroup.release();
1355 }
1356
createImageFormatReinterpretTests(tcu::TestContext & testCtx)1357 tcu::TestCaseGroup* createImageFormatReinterpretTests (tcu::TestContext& testCtx)
1358 {
1359 de::MovePtr<tcu::TestCaseGroup> testGroup(new tcu::TestCaseGroup(testCtx, "format_reinterpret", "Cases with differing texture and image formats"));
1360
1361 for (int textureNdx = 0; textureNdx < DE_LENGTH_OF_ARRAY(s_textures); ++textureNdx)
1362 {
1363 const Texture& texture = s_textures[textureNdx];
1364 de::MovePtr<tcu::TestCaseGroup> groupByImageViewType (new tcu::TestCaseGroup(testCtx, getImageTypeName(texture.type()).c_str(), ""));
1365
1366 for (int imageFormatNdx = 0; imageFormatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++imageFormatNdx)
1367 for (int formatNdx = 0; formatNdx < DE_LENGTH_OF_ARRAY(s_formats); ++formatNdx)
1368 {
1369 const std::string caseName = getFormatShortString(s_formats[imageFormatNdx]) + "_" + getFormatShortString(s_formats[formatNdx]);
1370 if (imageFormatNdx != formatNdx && formatsAreCompatible(s_formats[imageFormatNdx], s_formats[formatNdx]))
1371 groupByImageViewType->addChild(new LoadStoreTest(testCtx, caseName, "", texture, s_formats[formatNdx], s_formats[imageFormatNdx]));
1372 }
1373 testGroup->addChild(groupByImageViewType.release());
1374 }
1375
1376 return testGroup.release();
1377 }
1378
createImageQualifierRestrictCase(tcu::TestContext & testCtx,const ImageType imageType,const std::string & name)1379 de::MovePtr<TestCase> createImageQualifierRestrictCase (tcu::TestContext& testCtx, const ImageType imageType, const std::string& name)
1380 {
1381 const VkFormat format = VK_FORMAT_R32G32B32A32_UINT;
1382 const Texture& texture = getTestTexture(imageType);
1383 return de::MovePtr<TestCase>(new LoadStoreTest(testCtx, name, "", texture, format, format, LoadStoreTest::FLAG_RESTRICT_IMAGES | LoadStoreTest::FLAG_DECLARE_IMAGE_FORMAT_IN_SHADER));
1384 }
1385
1386 } // image
1387 } // vkt
1388