1 /*------------------------------------------------------------------------
2 * Vulkan Conformance Tests
3 * ------------------------
4 *
5 * Copyright (c) 2015 The Khronos Group Inc.
6 * Copyright (c) 2015 Intel Corporation
7 *
8 * Licensed under the Apache License, Version 2.0 (the "License");
9 * you may not use this file except in compliance with the License.
10 * You may obtain a copy of the License at
11 *
12 * http://www.apache.org/licenses/LICENSE-2.0
13 *
14 * Unless required by applicable law or agreed to in writing, software
15 * distributed under the License is distributed on an "AS IS" BASIS,
16 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
17 * See the License for the specific language governing permissions and
18 * limitations under the License.
19 *
20 *//*!
21 * \file
22 * \brief Vulkan Occlusion Query Tests
23 *//*--------------------------------------------------------------------*/
24
25 #include "vktQueryPoolOcclusionTests.hpp"
26
27 #include "vktTestCase.hpp"
28
29 #include "vktDrawImageObjectUtil.hpp"
30 #include "vktDrawBufferObjectUtil.hpp"
31 #include "vktDrawCreateInfoUtil.hpp"
32 #include "vkBuilderUtil.hpp"
33 #include "vkRefUtil.hpp"
34 #include "vkPrograms.hpp"
35 #include "vkTypeUtil.hpp"
36 #include "vkCmdUtil.hpp"
37
38 #include "tcuTestLog.hpp"
39 #include "tcuResource.hpp"
40 #include "tcuImageCompare.hpp"
41 #include "tcuCommandLine.hpp"
42
43 namespace vkt
44 {
45
46 namespace QueryPool
47 {
48
49 using namespace Draw;
50
51 namespace
52 {
53
54 struct StateObjects
55 {
56 StateObjects (const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive);
57 void setVertices (const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices);
58
59 enum
60 {
61 WIDTH = 128,
62 HEIGHT = 128
63 };
64
65 vkt::Context &m_context;
66
67 vk::Move<vk::VkPipeline> m_pipeline;
68 vk::Move<vk::VkPipelineLayout> m_pipelineLayout;
69
70 de::SharedPtr<Image> m_colorAttachmentImage, m_DepthImage;
71 vk::Move<vk::VkImageView> m_attachmentView;
72 vk::Move<vk::VkImageView> m_depthiew;
73
74 vk::Move<vk::VkRenderPass> m_renderPass;
75 vk::Move<vk::VkFramebuffer> m_framebuffer;
76
77 de::SharedPtr<Buffer> m_vertexBuffer;
78
79 vk::VkFormat m_colorAttachmentFormat;
80 };
81
StateObjects(const vk::DeviceInterface & vk,vkt::Context & context,const int numVertices,vk::VkPrimitiveTopology primitive)82 StateObjects::StateObjects (const vk::DeviceInterface&vk, vkt::Context &context, const int numVertices, vk::VkPrimitiveTopology primitive)
83 : m_context(context)
84 , m_colorAttachmentFormat(vk::VK_FORMAT_R8G8B8A8_UNORM)
85
86 {
87 vk::VkFormat depthFormat = vk::VK_FORMAT_D16_UNORM;
88 const vk::VkDevice device = m_context.getDevice();
89
90 //attachment images and views
91 {
92 vk::VkExtent3D imageExtent =
93 {
94 WIDTH, // width;
95 HEIGHT, // height;
96 1 // depth;
97 };
98
99 const ImageCreateInfo colorImageCreateInfo(vk::VK_IMAGE_TYPE_2D, m_colorAttachmentFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
100 vk::VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | vk::VK_IMAGE_USAGE_TRANSFER_SRC_BIT);
101
102 m_colorAttachmentImage = Image::createAndAlloc(vk, device, colorImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
103
104 const ImageViewCreateInfo attachmentViewInfo(m_colorAttachmentImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, m_colorAttachmentFormat);
105 m_attachmentView = vk::createImageView(vk, device, &attachmentViewInfo);
106
107 ImageCreateInfo depthImageCreateInfo(vk::VK_IMAGE_TYPE_2D, depthFormat, imageExtent, 1, 1, vk::VK_SAMPLE_COUNT_1_BIT, vk::VK_IMAGE_TILING_OPTIMAL,
108 vk::VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT);
109
110 m_DepthImage = Image::createAndAlloc(vk, device, depthImageCreateInfo, m_context.getDefaultAllocator(), m_context.getUniversalQueueFamilyIndex());
111
112 // Construct a depth view from depth image
113 const ImageViewCreateInfo depthViewInfo(m_DepthImage->object(), vk::VK_IMAGE_VIEW_TYPE_2D, depthFormat);
114 m_depthiew = vk::createImageView(vk, device, &depthViewInfo);
115 }
116
117 {
118 // Renderpass and Framebuffer
119
120 RenderPassCreateInfo renderPassCreateInfo;
121 renderPassCreateInfo.addAttachment(AttachmentDescription(m_colorAttachmentFormat, // format
122 vk::VK_SAMPLE_COUNT_1_BIT, // samples
123 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
124 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
125 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
126 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
127 vk::VK_IMAGE_LAYOUT_GENERAL, // initialLauout
128 vk::VK_IMAGE_LAYOUT_GENERAL)); // finalLayout
129
130 renderPassCreateInfo.addAttachment(AttachmentDescription(depthFormat, // format
131 vk::VK_SAMPLE_COUNT_1_BIT, // samples
132 vk::VK_ATTACHMENT_LOAD_OP_CLEAR, // loadOp
133 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // storeOp
134 vk::VK_ATTACHMENT_LOAD_OP_DONT_CARE, // stencilLoadOp
135 vk::VK_ATTACHMENT_STORE_OP_DONT_CARE, // stencilLoadOp
136 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, // initialLauout
137 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL)); // finalLayout
138
139 const vk::VkAttachmentReference colorAttachmentReference =
140 {
141 0, // attachment
142 vk::VK_IMAGE_LAYOUT_GENERAL // layout
143 };
144
145 const vk::VkAttachmentReference depthAttachmentReference =
146 {
147 1, // attachment
148 vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL // layout
149 };
150
151 renderPassCreateInfo.addSubpass(SubpassDescription(vk::VK_PIPELINE_BIND_POINT_GRAPHICS, // pipelineBindPoint
152 0, // flags
153 0, // inputCount
154 DE_NULL, // pInputAttachments
155 1, // colorCount
156 &colorAttachmentReference, // pColorAttachments
157 DE_NULL, // pResolveAttachments
158 depthAttachmentReference, // depthStencilAttachment
159 0, // preserveCount
160 DE_NULL)); // preserveAttachments
161
162 m_renderPass = vk::createRenderPass(vk, device, &renderPassCreateInfo);
163
164 std::vector<vk::VkImageView> attachments(2);
165 attachments[0] = *m_attachmentView;
166 attachments[1] = *m_depthiew;
167
168 FramebufferCreateInfo framebufferCreateInfo(*m_renderPass, attachments, WIDTH, HEIGHT, 1);
169 m_framebuffer = vk::createFramebuffer(vk, device, &framebufferCreateInfo);
170 }
171
172 {
173 // Pipeline
174
175 vk::Unique<vk::VkShaderModule> vs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("vert"), 0));
176 vk::Unique<vk::VkShaderModule> fs(vk::createShaderModule(vk, device, m_context.getBinaryCollection().get("frag"), 0));
177
178 const PipelineCreateInfo::ColorBlendState::Attachment attachmentState;
179
180 const PipelineLayoutCreateInfo pipelineLayoutCreateInfo;
181 m_pipelineLayout = vk::createPipelineLayout(vk, device, &pipelineLayoutCreateInfo);
182
183 const vk::VkVertexInputBindingDescription vf_binding_desc =
184 {
185 0, // binding;
186 4 * (deUint32)sizeof(float), // stride;
187 vk::VK_VERTEX_INPUT_RATE_VERTEX // inputRate
188 };
189
190 const vk::VkVertexInputAttributeDescription vf_attribute_desc =
191 {
192 0, // location;
193 0, // binding;
194 vk::VK_FORMAT_R32G32B32A32_SFLOAT, // format;
195 0 // offset;
196 };
197
198 const vk::VkPipelineVertexInputStateCreateInfo vf_info =
199 { // sType;
200 vk::VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO, // pNext;
201 NULL, // flags;
202 0u, // vertexBindingDescriptionCount;
203 1, // pVertexBindingDescriptions;
204 &vf_binding_desc, // vertexAttributeDescriptionCount;
205 1, // pVertexAttributeDescriptions;
206 &vf_attribute_desc
207 };
208
209 PipelineCreateInfo pipelineCreateInfo(*m_pipelineLayout, *m_renderPass, 0, 0);
210 pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*vs, "main", vk::VK_SHADER_STAGE_VERTEX_BIT));
211 pipelineCreateInfo.addShader(PipelineCreateInfo::PipelineShaderStage(*fs, "main", vk::VK_SHADER_STAGE_FRAGMENT_BIT));
212 pipelineCreateInfo.addState(PipelineCreateInfo::InputAssemblerState(primitive));
213 pipelineCreateInfo.addState(PipelineCreateInfo::ColorBlendState(1, &attachmentState));
214 const vk::VkViewport viewport = vk::makeViewport(WIDTH, HEIGHT);
215 const vk::VkRect2D scissor = vk::makeRect2D(WIDTH, HEIGHT);
216 pipelineCreateInfo.addState(PipelineCreateInfo::ViewportState(1, std::vector<vk::VkViewport>(1, viewport), std::vector<vk::VkRect2D>(1, scissor)));
217 pipelineCreateInfo.addState(PipelineCreateInfo::DepthStencilState(true, true, vk::VK_COMPARE_OP_GREATER_OR_EQUAL));
218 pipelineCreateInfo.addState(PipelineCreateInfo::RasterizerState());
219 pipelineCreateInfo.addState(PipelineCreateInfo::MultiSampleState());
220 pipelineCreateInfo.addState(vf_info);
221 m_pipeline = vk::createGraphicsPipeline(vk, device, DE_NULL, &pipelineCreateInfo);
222 }
223
224 {
225 // Vertex buffer
226 const size_t kBufferSize = numVertices * sizeof(tcu::Vec4);
227 m_vertexBuffer = Buffer::createAndAlloc(vk, device, BufferCreateInfo(kBufferSize, vk::VK_BUFFER_USAGE_VERTEX_BUFFER_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
228 }
229 }
230
setVertices(const vk::DeviceInterface & vk,std::vector<tcu::Vec4> vertices)231 void StateObjects::setVertices (const vk::DeviceInterface&vk, std::vector<tcu::Vec4> vertices)
232 {
233 const vk::VkDevice device = m_context.getDevice();
234
235 tcu::Vec4 *ptr = reinterpret_cast<tcu::Vec4*>(m_vertexBuffer->getBoundMemory().getHostPtr());
236 std::copy(vertices.begin(), vertices.end(), ptr);
237
238 vk::flushAlloc(vk, device, m_vertexBuffer->getBoundMemory());
239 }
240
241 enum OcclusionQueryResultSize
242 {
243 RESULT_SIZE_64_BIT,
244 RESULT_SIZE_32_BIT,
245 };
246
247 enum OcclusionQueryWait
248 {
249 WAIT_QUEUE,
250 WAIT_QUERY,
251 WAIT_NONE
252 };
253
254 enum OcclusionQueryResultsMode
255 {
256 RESULTS_MODE_GET,
257 RESULTS_MODE_COPY
258 };
259
260 struct OcclusionQueryTestVector
261 {
262 vk::VkQueryControlFlags queryControlFlags;
263 OcclusionQueryResultSize queryResultSize;
264 OcclusionQueryWait queryWait;
265 OcclusionQueryResultsMode queryResultsMode;
266 vk::VkDeviceSize queryResultsStride;
267 bool queryResultsAvailability;
268 vk::VkPrimitiveTopology primitiveTopology;
269 bool discardHalf;
270 };
271
272 class BasicOcclusionQueryTestInstance : public vkt::TestInstance
273 {
274 public:
275 BasicOcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector);
276 ~BasicOcclusionQueryTestInstance (void);
277 private:
278 tcu::TestStatus iterate (void);
279
280 enum
281 {
282 NUM_QUERIES_IN_POOL = 2,
283 QUERY_INDEX_CAPTURE_EMPTY = 0,
284 QUERY_INDEX_CAPTURE_DRAWCALL = 1,
285 NUM_VERTICES_IN_DRAWCALL = 3
286 };
287
288 OcclusionQueryTestVector m_testVector;
289 StateObjects* m_stateObjects;
290 vk::VkQueryPool m_queryPool;
291 };
292
BasicOcclusionQueryTestInstance(vkt::Context & context,const OcclusionQueryTestVector & testVector)293 BasicOcclusionQueryTestInstance::BasicOcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector)
294 : TestInstance (context)
295 , m_testVector (testVector)
296 {
297 DE_ASSERT(testVector.queryResultSize == RESULT_SIZE_64_BIT
298 && testVector.queryWait == WAIT_QUEUE
299 && testVector.queryResultsMode == RESULTS_MODE_GET
300 && testVector.queryResultsStride == sizeof(deUint64)
301 && testVector.queryResultsAvailability == false
302 && testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST);
303
304 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
305 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
306
307 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL, m_testVector.primitiveTopology);
308
309 const vk::VkDevice device = m_context.getDevice();
310 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
311
312 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo =
313 {
314 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
315 DE_NULL,
316 0u,
317 vk::VK_QUERY_TYPE_OCCLUSION,
318 NUM_QUERIES_IN_POOL,
319 0
320 };
321 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
322
323 std::vector<tcu::Vec4> vertices(NUM_VERTICES_IN_DRAWCALL);
324 vertices[0] = tcu::Vec4(0.5, 0.5, 0.0, 1.0);
325 vertices[1] = tcu::Vec4(0.5, 0.0, 0.0, 1.0);
326 vertices[2] = tcu::Vec4(0.0, 0.5, 0.0, 1.0);
327 m_stateObjects->setVertices(vk, vertices);
328 }
329
~BasicOcclusionQueryTestInstance(void)330 BasicOcclusionQueryTestInstance::~BasicOcclusionQueryTestInstance (void)
331 {
332 if (m_stateObjects)
333 delete m_stateObjects;
334
335 if (m_queryPool != DE_NULL)
336 {
337 const vk::VkDevice device = m_context.getDevice();
338 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
339
340 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
341 }
342 }
343
iterate(void)344 tcu::TestStatus BasicOcclusionQueryTestInstance::iterate (void)
345 {
346 tcu::TestLog &log = m_context.getTestContext().getLog();
347 const vk::VkDevice device = m_context.getDevice();
348 const vk::VkQueue queue = m_context.getUniversalQueue();
349 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
350
351 const CmdPoolCreateInfo cmdPoolCreateInfo (m_context.getUniversalQueueFamilyIndex());
352 vk::Move<vk::VkCommandPool> cmdPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
353
354 vk::Unique<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, *cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
355
356 beginCommandBuffer(vk, *cmdBuffer);
357
358 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
359 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
360 initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
361 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
362
363 std::vector<vk::VkClearValue> renderPassClearValues(2);
364 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
365
366 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
367
368 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
369
370 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
371
372 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
373 const vk::VkDeviceSize vertexBufferOffset = 0;
374 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
375
376 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY, m_testVector.queryControlFlags);
377 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_EMPTY);
378
379 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL, m_testVector.queryControlFlags);
380 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, 0, 0);
381 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_DRAWCALL);
382
383 endRenderPass(vk, *cmdBuffer);
384
385 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT,
386 vk::VK_IMAGE_LAYOUT_GENERAL, vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT,
387 vk::VK_ACCESS_TRANSFER_READ_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
388
389 endCommandBuffer(vk, *cmdBuffer);
390
391 submitCommandsAndWait(vk, device, queue, cmdBuffer.get());
392
393 deUint64 queryResults[NUM_QUERIES_IN_POOL] = { 0 };
394 size_t queryResultsSize = sizeof(queryResults);
395
396 vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, queryResultsSize, queryResults, sizeof(queryResults[0]), vk::VK_QUERY_RESULT_64_BIT);
397
398 if (queryResult == vk::VK_NOT_READY)
399 {
400 TCU_FAIL("Query result not avaliable, but vkWaitIdle() was called.");
401 }
402
403 VK_CHECK(queryResult);
404
405 log << tcu::TestLog::Section("OcclusionQueryResults",
406 "Occlusion query results");
407 for (int ndx = 0; ndx < DE_LENGTH_OF_ARRAY(queryResults); ++ndx)
408 {
409 log << tcu::TestLog::Message << "query[ slot == " << ndx
410 << "] result == " << queryResults[ndx] << tcu::TestLog::EndMessage;
411 }
412
413 bool passed = true;
414
415 for (int queryNdx = 0; queryNdx < DE_LENGTH_OF_ARRAY(queryResults); ++queryNdx)
416 {
417
418 deUint64 expectedValue;
419
420 switch (queryNdx)
421 {
422 case QUERY_INDEX_CAPTURE_EMPTY:
423 expectedValue = 0;
424 break;
425 case QUERY_INDEX_CAPTURE_DRAWCALL:
426 expectedValue = NUM_VERTICES_IN_DRAWCALL;
427 break;
428 }
429
430 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || expectedValue == 0)
431 {
432 // require precise value
433 if (queryResults[queryNdx] != expectedValue)
434 {
435 log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
436 "wrong value of query for index "
437 << queryNdx << ", expected " << expectedValue << ", got "
438 << queryResults[0] << "." << tcu::TestLog::EndMessage;
439 passed = false;
440 }
441 }
442 else
443 {
444 // require imprecize value > 0
445 if (queryResults[queryNdx] == 0)
446 {
447 log << tcu::TestLog::Message << "vkGetQueryPoolResults returned "
448 "wrong value of query for index "
449 << queryNdx << ", expected any non-zero value, got "
450 << queryResults[0] << "." << tcu::TestLog::EndMessage;
451 passed = false;
452 }
453 }
454 }
455 log << tcu::TestLog::EndSection;
456
457 if (passed)
458 {
459 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
460 }
461 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
462 }
463
464 class OcclusionQueryTestInstance : public vkt::TestInstance
465 {
466 public:
467 OcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector);
468 ~OcclusionQueryTestInstance (void);
469 private:
470 tcu::TestStatus iterate (void);
471
472 bool hasSeparateResetCmdBuf (void) const;
473 bool hasSeparateCopyCmdBuf (void) const;
474
475 vk::Move<vk::VkCommandBuffer> recordQueryPoolReset (vk::VkCommandPool commandPool);
476 vk::Move<vk::VkCommandBuffer> recordRender (vk::VkCommandPool commandPool);
477 vk::Move<vk::VkCommandBuffer> recordCopyResults (vk::VkCommandPool commandPool);
478
479 void captureResults (deUint64* retResults, deUint64* retAvailability, bool allowNotReady);
480 void logResults (const deUint64* results, const deUint64* availability);
481 bool validateResults (const deUint64* results, const deUint64* availability, bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology);
482
483 enum
484 {
485 NUM_QUERIES_IN_POOL = 3,
486 QUERY_INDEX_CAPTURE_ALL = 0,
487 QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED = 1,
488 QUERY_INDEX_CAPTURE_OCCLUDED = 2
489 };
490 enum
491 {
492 NUM_VERTICES_IN_DRAWCALL = 3,
493 NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL = 3,
494 NUM_VERTICES_IN_OCCLUDER_DRAWCALL = 3,
495 NUM_VERTICES = NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL
496 };
497 enum
498 {
499 START_VERTEX = 0,
500 START_VERTEX_PARTIALLY_OCCLUDED = START_VERTEX + NUM_VERTICES_IN_DRAWCALL,
501 START_VERTEX_OCCLUDER = START_VERTEX_PARTIALLY_OCCLUDED + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL
502 };
503
504 OcclusionQueryTestVector m_testVector;
505
506 const vk::VkQueryResultFlags m_queryResultFlags;
507
508 StateObjects* m_stateObjects;
509 vk::VkQueryPool m_queryPool;
510 de::SharedPtr<Buffer> m_queryPoolResultsBuffer;
511
512 vk::Move<vk::VkCommandPool> m_commandPool;
513 vk::Move<vk::VkCommandBuffer> m_queryPoolResetCommandBuffer;
514 vk::Move<vk::VkCommandBuffer> m_renderCommandBuffer;
515 vk::Move<vk::VkCommandBuffer> m_copyResultsCommandBuffer;
516 };
517
OcclusionQueryTestInstance(vkt::Context & context,const OcclusionQueryTestVector & testVector)518 OcclusionQueryTestInstance::OcclusionQueryTestInstance (vkt::Context &context, const OcclusionQueryTestVector& testVector)
519 : vkt::TestInstance (context)
520 , m_testVector (testVector)
521 , m_queryResultFlags ((m_testVector.queryWait == WAIT_QUERY ? vk::VK_QUERY_RESULT_WAIT_BIT : 0)
522 | (m_testVector.queryResultSize == RESULT_SIZE_64_BIT ? vk::VK_QUERY_RESULT_64_BIT : 0)
523 | (m_testVector.queryResultsAvailability ? vk::VK_QUERY_RESULT_WITH_AVAILABILITY_BIT : 0))
524 {
525 const vk::VkDevice device = m_context.getDevice();
526 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
527
528 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) && !m_context.getDeviceFeatures().occlusionQueryPrecise)
529 throw tcu::NotSupportedError("Precise occlusion queries are not supported");
530
531 m_stateObjects = new StateObjects(m_context.getDeviceInterface(), m_context, NUM_VERTICES_IN_DRAWCALL + NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL + NUM_VERTICES_IN_OCCLUDER_DRAWCALL, m_testVector.primitiveTopology);
532
533 const vk::VkQueryPoolCreateInfo queryPoolCreateInfo =
534 {
535 vk::VK_STRUCTURE_TYPE_QUERY_POOL_CREATE_INFO,
536 DE_NULL,
537 0u,
538 vk::VK_QUERY_TYPE_OCCLUSION,
539 NUM_QUERIES_IN_POOL,
540 0
541 };
542
543 VK_CHECK(vk.createQueryPool(device, &queryPoolCreateInfo, /*pAllocator*/ DE_NULL, &m_queryPool));
544
545 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
546 {
547 const vk::VkDeviceSize resultsBufferSize = m_testVector.queryResultsStride * NUM_QUERIES_IN_POOL;
548 m_queryPoolResultsBuffer = Buffer::createAndAlloc(vk, device, BufferCreateInfo(resultsBufferSize, vk::VK_BUFFER_USAGE_TRANSFER_DST_BIT), m_context.getDefaultAllocator(), vk::MemoryRequirement::HostVisible);
549 }
550
551 const CmdPoolCreateInfo cmdPoolCreateInfo (m_context.getUniversalQueueFamilyIndex());
552 m_commandPool = vk::createCommandPool(vk, device, &cmdPoolCreateInfo);
553 m_renderCommandBuffer = recordRender(*m_commandPool);
554
555 if (hasSeparateResetCmdBuf())
556 {
557 m_queryPoolResetCommandBuffer = recordQueryPoolReset(*m_commandPool);
558 }
559
560 if (hasSeparateCopyCmdBuf())
561 {
562 m_copyResultsCommandBuffer = recordCopyResults(*m_commandPool);
563 }
564 }
565
~OcclusionQueryTestInstance(void)566 OcclusionQueryTestInstance::~OcclusionQueryTestInstance (void)
567 {
568 const vk::VkDevice device = m_context.getDevice();
569
570 if (m_stateObjects)
571 delete m_stateObjects;
572
573 if (m_queryPool != DE_NULL)
574 {
575 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
576 vk.destroyQueryPool(device, m_queryPool, /*pAllocator*/ DE_NULL);
577 }
578 }
579
iterate(void)580 tcu::TestStatus OcclusionQueryTestInstance::iterate (void)
581 {
582 const vk::VkQueue queue = m_context.getUniversalQueue();
583 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
584 tcu::TestLog& log = m_context.getTestContext().getLog();
585 std::vector<tcu::Vec4> vertices (NUM_VERTICES);
586
587 // 1st triangle
588 vertices[START_VERTEX + 0] = tcu::Vec4( 0.5, 0.5, 0.5, 1.0);
589 vertices[START_VERTEX + 1] = tcu::Vec4( 0.5, -0.5, 0.5, 1.0);
590 vertices[START_VERTEX + 2] = tcu::Vec4(-0.5, 0.5, 0.5, 1.0);
591 // 2nd triangle - partially occluding the scene
592 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 0] = tcu::Vec4(-0.5, -0.5, 1.0, 1.0);
593 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
594 vertices[START_VERTEX_PARTIALLY_OCCLUDED + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
595 // 3nd triangle - fully occluding the scene
596 vertices[START_VERTEX_OCCLUDER + 0] = tcu::Vec4( 0.5, 0.5, 1.0, 1.0);
597 vertices[START_VERTEX_OCCLUDER + 1] = tcu::Vec4( 0.5, -0.5, 1.0, 1.0);
598 vertices[START_VERTEX_OCCLUDER + 2] = tcu::Vec4(-0.5, 0.5, 1.0, 1.0);
599
600 m_stateObjects->setVertices(vk, vertices);
601
602 if (hasSeparateResetCmdBuf())
603 {
604 const vk::VkSubmitInfo submitInfoReset =
605 {
606 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
607 DE_NULL, // const void* pNext;
608 0u, // deUint32 waitSemaphoreCount;
609 DE_NULL, // const VkSemaphore* pWaitSemaphores;
610 (const vk::VkPipelineStageFlags*)DE_NULL,
611 1u, // deUint32 commandBufferCount;
612 &m_queryPoolResetCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
613 0u, // deUint32 signalSemaphoreCount;
614 DE_NULL // const VkSemaphore* pSignalSemaphores;
615 };
616
617 vk.queueSubmit(queue, 1, &submitInfoReset, DE_NULL);
618
619 // Trivially wait for reset to complete. This is to ensure the query pool is in reset state before
620 // host accesses, so as to not insert any synchronization before capturing the results needed for WAIT_NONE
621 // variant of test.
622 VK_CHECK(vk.queueWaitIdle(queue));
623 }
624
625 {
626 const vk::VkSubmitInfo submitInfoRender =
627 {
628 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
629 DE_NULL, // const void* pNext;
630 0, // deUint32 waitSemaphoreCount;
631 DE_NULL, // const VkSemaphore* pWaitSemaphores;
632 (const vk::VkPipelineStageFlags*)DE_NULL,
633 1, // deUint32 commandBufferCount;
634 &m_renderCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
635 0, // deUint32 signalSemaphoreCount;
636 DE_NULL // const VkSemaphore* pSignalSemaphores;
637 };
638 vk.queueSubmit(queue, 1, &submitInfoRender, DE_NULL);
639 }
640
641 if (m_testVector.queryWait == WAIT_QUEUE)
642 {
643 VK_CHECK(vk.queueWaitIdle(queue));
644 }
645
646 if (hasSeparateCopyCmdBuf())
647 {
648 // In case of WAIT_QUEUE test variant, the previously submitted m_renderCommandBuffer did not
649 // contain vkCmdCopyQueryResults, so additional cmd buffer is needed.
650
651 // In the case of WAIT_NONE or WAIT_QUERY, vkCmdCopyQueryResults is stored in m_renderCommandBuffer.
652
653 const vk::VkSubmitInfo submitInfo =
654 {
655 vk::VK_STRUCTURE_TYPE_SUBMIT_INFO, // VkStructureType sType;
656 DE_NULL, // const void* pNext;
657 0, // deUint32 waitSemaphoreCount;
658 DE_NULL, // const VkSemaphore* pWaitSemaphores;
659 (const vk::VkPipelineStageFlags*)DE_NULL,
660 1, // deUint32 commandBufferCount;
661 &m_copyResultsCommandBuffer.get(), // const VkCommandBuffer* pCommandBuffers;
662 0, // deUint32 signalSemaphoreCount;
663 DE_NULL // const VkSemaphore* pSignalSemaphores;
664 };
665 vk.queueSubmit(queue, 1, &submitInfo, DE_NULL);
666 }
667
668 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
669 {
670 // In case of vkCmdCopyQueryResults is used, test must always wait for it
671 // to complete before we can read the result buffer.
672
673 VK_CHECK(vk.queueWaitIdle(queue));
674 }
675
676 deUint64 queryResults [NUM_QUERIES_IN_POOL];
677 deUint64 queryAvailability [NUM_QUERIES_IN_POOL];
678
679 // Allow not ready results only if nobody waited before getting the query results
680 const bool allowNotReady = (m_testVector.queryWait == WAIT_NONE);
681
682 captureResults(queryResults, queryAvailability, allowNotReady);
683
684 log << tcu::TestLog::Section("OcclusionQueryResults", "Occlusion query results");
685
686 logResults(queryResults, queryAvailability);
687 bool passed = validateResults(queryResults, queryAvailability, allowNotReady, m_testVector.primitiveTopology);
688
689 log << tcu::TestLog::EndSection;
690
691 if (m_testVector.queryResultsMode != RESULTS_MODE_COPY)
692 {
693 VK_CHECK(vk.queueWaitIdle(queue));
694 }
695
696 if (passed)
697 {
698 return tcu::TestStatus(QP_TEST_RESULT_PASS, "Query result verification passed");
699 }
700 return tcu::TestStatus(QP_TEST_RESULT_FAIL, "Query result verification failed");
701 }
702
hasSeparateResetCmdBuf(void) const703 bool OcclusionQueryTestInstance::hasSeparateResetCmdBuf (void) const
704 {
705 // Determine if resetting query pool should be performed in separate command buffer
706 // to avoid race condition between host query access and device query reset.
707
708 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
709 {
710 // We copy query results on device, so there is no race condition between
711 // host and device
712 return false;
713 }
714 if (m_testVector.queryWait == WAIT_QUEUE)
715 {
716 // We wait for queue to be complete before accessing query results
717 return false;
718 }
719
720 // Separate command buffer with reset must be submitted & completed before
721 // host accesses the query results
722 return true;
723 }
724
hasSeparateCopyCmdBuf(void) const725 bool OcclusionQueryTestInstance::hasSeparateCopyCmdBuf (void) const
726 {
727 // Copy query results must go into separate command buffer, if we want to wait on queue before that
728 return (m_testVector.queryResultsMode == RESULTS_MODE_COPY && m_testVector.queryWait == WAIT_QUEUE);
729 }
730
recordQueryPoolReset(vk::VkCommandPool cmdPool)731 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordQueryPoolReset (vk::VkCommandPool cmdPool)
732 {
733 const vk::VkDevice device = m_context.getDevice();
734 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
735
736 DE_ASSERT(hasSeparateResetCmdBuf());
737
738 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
739
740 beginCommandBuffer(vk, *cmdBuffer);
741 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
742 endCommandBuffer(vk, *cmdBuffer);
743
744 return cmdBuffer;
745 }
746
recordRender(vk::VkCommandPool cmdPool)747 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordRender (vk::VkCommandPool cmdPool)
748 {
749 const vk::VkDevice device = m_context.getDevice();
750 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
751
752 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
753
754 beginCommandBuffer(vk, *cmdBuffer);
755
756 initialTransitionColor2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_LAYOUT_GENERAL,
757 vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT);
758 initialTransitionDepth2DImage(vk, *cmdBuffer, m_stateObjects->m_DepthImage->object(), vk::VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL,
759 vk::VK_ACCESS_DEPTH_STENCIL_ATTACHMENT_WRITE_BIT, vk::VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | vk::VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT);
760
761 std::vector<vk::VkClearValue> renderPassClearValues(2);
762 deMemset(&renderPassClearValues[0], 0, static_cast<int>(renderPassClearValues.size()) * sizeof(vk::VkClearValue));
763
764 if (!hasSeparateResetCmdBuf())
765 {
766 vk.cmdResetQueryPool(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL);
767 }
768
769 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
770
771 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
772
773 vk::VkBuffer vertexBuffer = m_stateObjects->m_vertexBuffer->object();
774 const vk::VkDeviceSize vertexBufferOffset = 0;
775 vk.cmdBindVertexBuffers(*cmdBuffer, 0, 1, &vertexBuffer, &vertexBufferOffset);
776
777 // Draw un-occluded geometry
778 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL, m_testVector.queryControlFlags);
779 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
780 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_ALL);
781
782 endRenderPass(vk, *cmdBuffer);
783
784 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
785
786 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
787
788 // Draw un-occluded geometry
789 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
790
791 // Partially occlude geometry
792 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
793
794 // Draw partially-occluded geometry
795 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED, m_testVector.queryControlFlags);
796 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
797 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED);
798
799 endRenderPass(vk, *cmdBuffer);
800
801 beginRenderPass(vk, *cmdBuffer, *m_stateObjects->m_renderPass, *m_stateObjects->m_framebuffer, vk::makeRect2D(0, 0, StateObjects::WIDTH, StateObjects::HEIGHT), (deUint32)renderPassClearValues.size(), &renderPassClearValues[0]);
802
803 vk.cmdBindPipeline(*cmdBuffer, vk::VK_PIPELINE_BIND_POINT_GRAPHICS, *m_stateObjects->m_pipeline);
804
805 // Draw un-occluded geometry
806 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
807
808 // Partially occlude geometry
809 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_PARTIALLY_OCCLUDED_DRAWCALL, 1, START_VERTEX_PARTIALLY_OCCLUDED, 0);
810
811 // Occlude geometry
812 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_OCCLUDER_DRAWCALL, 1, START_VERTEX_OCCLUDER, 0);
813
814 // Draw occluded geometry
815 vk.cmdBeginQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED, m_testVector.queryControlFlags);
816 vk.cmdDraw(*cmdBuffer, NUM_VERTICES_IN_DRAWCALL, 1, START_VERTEX, 0);
817 vk.cmdEndQuery(*cmdBuffer, m_queryPool, QUERY_INDEX_CAPTURE_OCCLUDED);
818
819 endRenderPass(vk, *cmdBuffer);
820
821 if (m_testVector.queryResultsMode == RESULTS_MODE_COPY && !hasSeparateCopyCmdBuf())
822 {
823 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), /*dstOffset*/ 0, m_testVector.queryResultsStride, m_queryResultFlags);
824 }
825
826 transition2DImage(vk, *cmdBuffer, m_stateObjects->m_colorAttachmentImage->object(), vk::VK_IMAGE_ASPECT_COLOR_BIT, vk::VK_IMAGE_LAYOUT_GENERAL,
827 vk::VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, vk::VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT, vk::VK_ACCESS_TRANSFER_READ_BIT,
828 vk::VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT, vk::VK_PIPELINE_STAGE_TRANSFER_BIT);
829
830 endCommandBuffer(vk, *cmdBuffer);
831
832 return cmdBuffer;
833 }
834
recordCopyResults(vk::VkCommandPool cmdPool)835 vk::Move<vk::VkCommandBuffer> OcclusionQueryTestInstance::recordCopyResults (vk::VkCommandPool cmdPool)
836 {
837 const vk::VkDevice device = m_context.getDevice();
838 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
839
840 vk::Move<vk::VkCommandBuffer> cmdBuffer (vk::allocateCommandBuffer(vk, device, cmdPool, vk::VK_COMMAND_BUFFER_LEVEL_PRIMARY));
841
842 beginCommandBuffer(vk, *cmdBuffer);
843 vk.cmdCopyQueryPoolResults(*cmdBuffer, m_queryPool, 0, NUM_QUERIES_IN_POOL, m_queryPoolResultsBuffer->object(), /*dstOffset*/ 0, m_testVector.queryResultsStride, m_queryResultFlags);
844 endCommandBuffer(vk, *cmdBuffer);
845
846 return cmdBuffer;
847 }
848
captureResults(deUint64 * retResults,deUint64 * retAvailAbility,bool allowNotReady)849 void OcclusionQueryTestInstance::captureResults (deUint64* retResults, deUint64* retAvailAbility, bool allowNotReady)
850 {
851
852 const vk::VkDevice device = m_context.getDevice();
853 const vk::DeviceInterface& vk = m_context.getDeviceInterface();
854 std::vector<deUint8> resultsBuffer (static_cast<size_t>(m_testVector.queryResultsStride) * NUM_QUERIES_IN_POOL);
855
856 if (m_testVector.queryResultsMode == RESULTS_MODE_GET)
857 {
858 const vk::VkResult queryResult = vk.getQueryPoolResults(device, m_queryPool, 0, NUM_QUERIES_IN_POOL, resultsBuffer.size(), &resultsBuffer[0], m_testVector.queryResultsStride, m_queryResultFlags);
859 if (queryResult == vk::VK_NOT_READY && !allowNotReady)
860 {
861 TCU_FAIL("getQueryPoolResults returned VK_NOT_READY, but results should be already available.");
862 }
863 else
864 {
865 VK_CHECK(queryResult);
866 }
867 }
868 else if (m_testVector.queryResultsMode == RESULTS_MODE_COPY)
869 {
870 const vk::Allocation& allocation = m_queryPoolResultsBuffer->getBoundMemory();
871 const void* allocationData = allocation.getHostPtr();
872
873 vk::invalidateAlloc(vk, device, allocation);
874
875 deMemcpy(&resultsBuffer[0], allocationData, resultsBuffer.size());
876 }
877
878 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; queryNdx++)
879 {
880 const void* srcPtr = &resultsBuffer[queryNdx * static_cast<size_t>(m_testVector.queryResultsStride)];
881 if (m_testVector.queryResultSize == RESULT_SIZE_32_BIT)
882 {
883 const deUint32* srcPtrTyped = static_cast<const deUint32*>(srcPtr);
884 retResults[queryNdx] = *srcPtrTyped;
885 if (m_testVector.queryResultsAvailability)
886 {
887 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
888 }
889 }
890 else if (m_testVector.queryResultSize == RESULT_SIZE_64_BIT)
891 {
892 const deUint64* srcPtrTyped = static_cast<const deUint64*>(srcPtr);
893 retResults[queryNdx] = *srcPtrTyped;
894
895 if (m_testVector.queryResultsAvailability)
896 {
897 if (m_testVector.queryResultsAvailability)
898 {
899 retAvailAbility[queryNdx] = *(srcPtrTyped + 1);
900 }
901 }
902 }
903 else
904 {
905 TCU_FAIL("Wrong m_testVector.queryResultSize");
906 }
907 }
908 }
909
logResults(const deUint64 * results,const deUint64 * availability)910 void OcclusionQueryTestInstance::logResults (const deUint64* results, const deUint64* availability)
911 {
912 tcu::TestLog& log = m_context.getTestContext().getLog();
913
914 for (int ndx = 0; ndx < NUM_QUERIES_IN_POOL; ++ndx)
915 {
916 if (!m_testVector.queryResultsAvailability)
917 {
918 log << tcu::TestLog::Message << "query[ slot == " << ndx << "] result == " << results[ndx] << tcu::TestLog::EndMessage;
919 }
920 else
921 {
922 log << tcu::TestLog::Message << "query[ slot == " << ndx << "] result == " << results[ndx] << ", availability == " << availability[ndx] << tcu::TestLog::EndMessage;
923 }
924 }
925 }
926
validateResults(const deUint64 * results,const deUint64 * availability,bool allowUnavailable,vk::VkPrimitiveTopology primitiveTopology)927 bool OcclusionQueryTestInstance::validateResults (const deUint64* results , const deUint64* availability, bool allowUnavailable, vk::VkPrimitiveTopology primitiveTopology)
928 {
929 bool passed = true;
930 tcu::TestLog& log = m_context.getTestContext().getLog();
931
932 for (int queryNdx = 0; queryNdx < NUM_QUERIES_IN_POOL; ++queryNdx)
933 {
934 deUint64 expectedValueMin = 0;
935 deUint64 expectedValueMax = 0;
936
937 if (m_testVector.queryResultsAvailability && availability[queryNdx] == 0)
938 {
939 // query result was not available
940 if (!allowUnavailable)
941 {
942 log << tcu::TestLog::Message << "query results availability was 0 for index "
943 << queryNdx << ", expected any value greater than 0." << tcu::TestLog::EndMessage;
944 passed = false;
945 continue;
946 }
947 }
948 else
949 {
950 // query is available, so expect proper result values
951 if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
952 {
953 switch (queryNdx)
954 {
955 case QUERY_INDEX_CAPTURE_OCCLUDED:
956 expectedValueMin = 0;
957 expectedValueMax = 0;
958 break;
959 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
960 expectedValueMin = 1;
961 expectedValueMax = 1;
962 break;
963 case QUERY_INDEX_CAPTURE_ALL:
964 expectedValueMin = NUM_VERTICES_IN_DRAWCALL;
965 expectedValueMax = NUM_VERTICES_IN_DRAWCALL;
966 break;
967 }
968 }
969 else if (primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST)
970 {
971 switch (queryNdx)
972 {
973 case QUERY_INDEX_CAPTURE_OCCLUDED:
974 expectedValueMin = 0;
975 expectedValueMax = 0;
976 break;
977 case QUERY_INDEX_CAPTURE_PARTIALLY_OCCLUDED:
978 case QUERY_INDEX_CAPTURE_ALL:
979 {
980 const int primWidth = StateObjects::WIDTH / 2;
981 const int primHeight = StateObjects::HEIGHT / 2;
982 const int primArea = primWidth * primHeight / 2;
983
984 if (m_testVector.discardHalf)
985 {
986 expectedValueMin = (int)(0.95f * primArea * 0.5f);
987 expectedValueMax = (int)(1.05f * primArea * 0.5f);
988 }
989 else
990 {
991 expectedValueMin = (int)(0.97f * primArea);
992 expectedValueMax = (int)(1.03f * primArea);
993 }
994 }
995 }
996 }
997 else
998 {
999 TCU_FAIL("Unsupported primitive topology");
1000 }
1001 }
1002
1003 if ((m_testVector.queryControlFlags & vk::VK_QUERY_CONTROL_PRECISE_BIT) || (expectedValueMin == 0 && expectedValueMax == 0))
1004 {
1005 // require precise value
1006 if (results[queryNdx] < expectedValueMin || results[queryNdx] > expectedValueMax)
1007 {
1008 log << tcu::TestLog::Message << "wrong value of query for index "
1009 << queryNdx << ", expected the value minimum of " << expectedValueMin << ", maximum of " << expectedValueMax << " got "
1010 << results[queryNdx] << "." << tcu::TestLog::EndMessage;
1011 passed = false;
1012 }
1013 }
1014 else
1015 {
1016 // require imprecise value greater than 0
1017 if (results[queryNdx] == 0)
1018 {
1019 log << tcu::TestLog::Message << "wrong value of query for index "
1020 << queryNdx << ", expected any non-zero value, got "
1021 << results[queryNdx] << "." << tcu::TestLog::EndMessage;
1022 passed = false;
1023 }
1024 }
1025 }
1026 return passed;
1027 }
1028
1029 template<class Instance>
1030 class QueryPoolOcclusionTest : public vkt::TestCase
1031 {
1032 public:
QueryPoolOcclusionTest(tcu::TestContext & context,const char * name,const char * description,const OcclusionQueryTestVector & testVector)1033 QueryPoolOcclusionTest (tcu::TestContext &context, const char *name, const char *description, const OcclusionQueryTestVector& testVector)
1034 : TestCase (context, name, description)
1035 , m_testVector (testVector)
1036 {
1037 }
1038 private:
createInstance(vkt::Context & context) const1039 vkt::TestInstance* createInstance (vkt::Context& context) const
1040 {
1041 return new Instance(context, m_testVector);
1042 }
1043
initPrograms(vk::SourceCollections & programCollection) const1044 void initPrograms(vk::SourceCollections& programCollection) const
1045 {
1046 const char* const discard =
1047 " if ((int(gl_FragCoord.x) % 2) == (int(gl_FragCoord.y) % 2))\n"
1048 " discard;\n";
1049
1050 const std::string fragSrc = std::string(
1051 "#version 400\n"
1052 "layout(location = 0) out vec4 out_FragColor;\n"
1053 "void main()\n"
1054 "{\n"
1055 " out_FragColor = vec4(0.07, 0.48, 0.75, 1.0);\n")
1056 + std::string(m_testVector.discardHalf ? discard : "")
1057 + "}\n";
1058
1059 programCollection.glslSources.add("frag") << glu::FragmentSource(fragSrc.c_str());
1060
1061 programCollection.glslSources.add("vert") << glu::VertexSource("#version 430\n"
1062 "layout(location = 0) in vec4 in_Position;\n"
1063 "out gl_PerVertex { vec4 gl_Position; float gl_PointSize; };\n"
1064 "void main() {\n"
1065 " gl_Position = in_Position;\n"
1066 " gl_PointSize = 1.0;\n"
1067 "}\n");
1068 }
1069
1070 OcclusionQueryTestVector m_testVector;
1071 };
1072
1073 } //anonymous
1074
QueryPoolOcclusionTests(tcu::TestContext & testCtx)1075 QueryPoolOcclusionTests::QueryPoolOcclusionTests (tcu::TestContext &testCtx)
1076 : TestCaseGroup(testCtx, "occlusion_query", "Tests for occlusion queries")
1077 {
1078 /* Left blank on purpose */
1079 }
1080
~QueryPoolOcclusionTests(void)1081 QueryPoolOcclusionTests::~QueryPoolOcclusionTests (void)
1082 {
1083 /* Left blank on purpose */
1084 }
1085
init(void)1086 void QueryPoolOcclusionTests::init (void)
1087 {
1088 OcclusionQueryTestVector baseTestVector;
1089 baseTestVector.queryControlFlags = 0;
1090 baseTestVector.queryResultSize = RESULT_SIZE_64_BIT;
1091 baseTestVector.queryWait = WAIT_QUEUE;
1092 baseTestVector.queryResultsMode = RESULTS_MODE_GET;
1093 baseTestVector.queryResultsStride = sizeof(deUint64);
1094 baseTestVector.queryResultsAvailability = false;
1095 baseTestVector.primitiveTopology = vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST;
1096 baseTestVector.discardHalf = false;
1097
1098 //Basic tests
1099 {
1100 OcclusionQueryTestVector testVector = baseTestVector;
1101 testVector.queryControlFlags = 0;
1102 addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_conservative", "draw with conservative occlusion query", testVector));
1103 testVector.queryControlFlags = vk::VK_QUERY_CONTROL_PRECISE_BIT;
1104 addChild(new QueryPoolOcclusionTest<BasicOcclusionQueryTestInstance>(m_testCtx, "basic_precise", "draw with precise occlusion query", testVector));
1105 }
1106
1107 // Functional test
1108 {
1109 const vk::VkQueryControlFlags controlFlags[] = { 0, vk::VK_QUERY_CONTROL_PRECISE_BIT };
1110 const char* const controlFlagsStr[] = { "conservative", "precise" };
1111
1112 for (int controlFlagIdx = 0; controlFlagIdx < DE_LENGTH_OF_ARRAY(controlFlags); ++controlFlagIdx)
1113 {
1114
1115 const vk::VkPrimitiveTopology primitiveTopology[] = { vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST, vk::VK_PRIMITIVE_TOPOLOGY_TRIANGLE_LIST };
1116 const char* const primitiveTopologyStr[] = { "points", "triangles" };
1117 for (int primitiveTopologyIdx = 0; primitiveTopologyIdx < DE_LENGTH_OF_ARRAY(primitiveTopology); ++primitiveTopologyIdx)
1118 {
1119
1120 const OcclusionQueryResultSize resultSize[] = { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1121 const char* const resultSizeStr[] = { "32", "64" };
1122
1123 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSize); ++resultSizeIdx)
1124 {
1125
1126 const OcclusionQueryWait wait[] = { WAIT_QUEUE, WAIT_QUERY };
1127 const char* const waitStr[] = { "queue", "query" };
1128
1129 for (int waitIdx = 0; waitIdx < DE_LENGTH_OF_ARRAY(wait); ++waitIdx)
1130 {
1131 const OcclusionQueryResultsMode resultsMode[] = { RESULTS_MODE_GET, RESULTS_MODE_COPY };
1132 const char* const resultsModeStr[] = { "get", "copy" };
1133
1134 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1135 {
1136
1137 const bool testAvailability[] = { false, true };
1138 const char* const testAvailabilityStr[] = { "without", "with"};
1139
1140 for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1141 {
1142 const bool discardHalf[] = { false, true };
1143 const char* const discardHalfStr[] = { "", "_discard" };
1144
1145 for (int discardHalfIdx = 0; discardHalfIdx < DE_LENGTH_OF_ARRAY(discardHalf); ++discardHalfIdx)
1146 {
1147 OcclusionQueryTestVector testVector = baseTestVector;
1148 testVector.queryControlFlags = controlFlags[controlFlagIdx];
1149 testVector.queryResultSize = resultSize[resultSizeIdx];
1150 testVector.queryWait = wait[waitIdx];
1151 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1152 testVector.queryResultsStride = (testVector.queryResultSize == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64));
1153 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1154 testVector.primitiveTopology = primitiveTopology[primitiveTopologyIdx];
1155 testVector.discardHalf = discardHalf[discardHalfIdx];
1156
1157 if (testVector.discardHalf && testVector.primitiveTopology == vk::VK_PRIMITIVE_TOPOLOGY_POINT_LIST)
1158 continue; // Discarding half of the pixels in fragment shader doesn't make sense with one-pixel-sized points.
1159
1160 if (testVector.queryResultsAvailability)
1161 {
1162 testVector.queryResultsStride *= 2;
1163 }
1164
1165 std::ostringstream testName;
1166 std::ostringstream testDescr;
1167
1168 testName << resultsModeStr[resultsModeIdx] << "_results"
1169 << "_" << controlFlagsStr[controlFlagIdx]
1170 << "_size_" << resultSizeStr[resultSizeIdx]
1171 << "_wait_" << waitStr[waitIdx]
1172 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability"
1173 << "_draw_" << primitiveTopologyStr[primitiveTopologyIdx]
1174 << discardHalfStr[discardHalfIdx];
1175
1176 testDescr << "draw occluded " << primitiveTopologyStr[primitiveTopologyIdx]
1177 << "with " << controlFlagsStr[controlFlagIdx] << ", "
1178 << resultsModeStr[resultsModeIdx] << " results "
1179 << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1180 << resultSizeStr[resultSizeIdx] << "bit variables,"
1181 << (testVector.discardHalf ? " discarding half of the fragments," : "")
1182 << "wait for results on" << waitStr[waitIdx];
1183
1184 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));
1185 }
1186 }
1187 }
1188 }
1189 }
1190 }
1191 }
1192 }
1193 // Test different strides
1194 {
1195 const OcclusionQueryResultsMode resultsMode[] = { RESULTS_MODE_GET, RESULTS_MODE_COPY };
1196 const char* const resultsModeStr[] = { "get", "copy" };
1197
1198 for (int resultsModeIdx = 0; resultsModeIdx < DE_LENGTH_OF_ARRAY(resultsMode); ++resultsModeIdx)
1199 {
1200 const OcclusionQueryResultSize resultSizes[] = { RESULT_SIZE_32_BIT, RESULT_SIZE_64_BIT };
1201 const char* const resultSizeStr[] = { "32", "64" };
1202
1203 const bool testAvailability[] = { false, true };
1204 const char* const testAvailabilityStr[] = { "without", "with" };
1205
1206 for (int testAvailabilityIdx = 0; testAvailabilityIdx < DE_LENGTH_OF_ARRAY(testAvailability); ++testAvailabilityIdx)
1207 {
1208 for (int resultSizeIdx = 0; resultSizeIdx < DE_LENGTH_OF_ARRAY(resultSizes); ++resultSizeIdx)
1209 {
1210 const vk::VkDeviceSize resultSize = (resultSizes[resultSizeIdx] == RESULT_SIZE_32_BIT ? sizeof(deUint32) : sizeof(deUint64));
1211
1212 // \todo [2015-12-18 scygan] Ensure only stride values aligned to resultSize are allowed. Otherwise test should be extended.
1213 const vk::VkDeviceSize strides[] =
1214 {
1215 1 * resultSize,
1216 2 * resultSize,
1217 3 * resultSize,
1218 4 * resultSize,
1219 5 * resultSize,
1220 13 * resultSize,
1221 1024 * resultSize
1222 };
1223
1224 for (int strideIdx = 0; strideIdx < DE_LENGTH_OF_ARRAY(strides); strideIdx++)
1225 {
1226 OcclusionQueryTestVector testVector = baseTestVector;
1227 testVector.queryResultsMode = resultsMode[resultsModeIdx];
1228 testVector.queryResultSize = resultSizes[resultSizeIdx];
1229 testVector.queryResultsAvailability = testAvailability[testAvailabilityIdx];
1230 testVector.queryResultsStride = strides[strideIdx];
1231
1232 const vk::VkDeviceSize elementSize = (testVector.queryResultsAvailability ? resultSize * 2 : resultSize);
1233
1234 if (elementSize > testVector.queryResultsStride)
1235 {
1236 continue;
1237 }
1238
1239 std::ostringstream testName;
1240 std::ostringstream testDescr;
1241
1242 testName << resultsModeStr[resultsModeIdx]
1243 << "_results_size_" << resultSizeStr[resultSizeIdx]
1244 << "_stride_" << strides[strideIdx]
1245 << "_" << testAvailabilityStr[testAvailabilityIdx] << "_availability";
1246
1247 testDescr << resultsModeStr[resultsModeIdx] << " results "
1248 << testAvailabilityStr[testAvailabilityIdx] << " availability bit as "
1249 << resultSizeStr[resultSizeIdx] << "bit variables, with stride" << strides[strideIdx];
1250
1251 addChild(new QueryPoolOcclusionTest<OcclusionQueryTestInstance>(m_testCtx, testName.str().c_str(), testDescr.str().c_str(), testVector));
1252 }
1253 }
1254 }
1255 }
1256
1257 }
1258 }
1259
1260 } //QueryPool
1261 } //vkt
1262
1263