1 /*
2 * Copyright (c) 2015-2016 The Khronos Group Inc.
3 * Copyright (c) 2015-2016 Valve Corporation
4 * Copyright (c) 2015-2016 LunarG, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and/or associated documentation files (the "Materials"), to
8 * deal in the Materials without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Materials, and to permit persons to whom the Materials are
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice(s) and this permission notice shall be included in
14 * all copies or substantial portions of the Materials.
15 *
16 * THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 *
20 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
21 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
22 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE MATERIALS OR THE
23 * USE OR OTHER DEALINGS IN THE MATERIALS.
24 *
25 * Author: Courtney Goeltzenleuchter <courtney@LunarG.com>
26 * Author: Tony Barbour <tony@LunarG.com>
27 */
28
29 #include <iostream>
30 #include <string.h> // memset(), memcmp()
31 #include <assert.h>
32 #include <stdarg.h>
33 #include "vktestbinding.h"
34
35 namespace {
36
37 #define NON_DISPATCHABLE_HANDLE_INIT(create_func, dev, ...) \
38 do { \
39 handle_type handle; \
40 if (EXPECT(create_func(dev.handle(), __VA_ARGS__, NULL, &handle) == \
41 VK_SUCCESS)) \
42 NonDispHandle::init(dev.handle(), handle); \
43 } while (0)
44
45 #define NON_DISPATCHABLE_HANDLE_DTOR(cls, destroy_func) \
46 cls::~cls() { \
47 if (initialized()) \
48 destroy_func(device(), handle(), NULL); \
49 }
50
51 #define STRINGIFY(x) #x
52 #define EXPECT(expr) \
53 ((expr) ? true : expect_failure(STRINGIFY(expr), __FILE__, __LINE__, \
54 __FUNCTION__))
55
56 vk_testing::ErrorCallback error_callback;
57
expect_failure(const char * expr,const char * file,unsigned int line,const char * function)58 bool expect_failure(const char *expr, const char *file, unsigned int line,
59 const char *function) {
60 if (error_callback) {
61 error_callback(expr, file, line, function);
62 } else {
63 std::cerr << file << ":" << line << ": " << function
64 << ": Expectation `" << expr << "' failed.\n";
65 }
66
67 return false;
68 }
69
70 template <class T, class S>
make_handles(const std::vector<S> & v)71 std::vector<T> make_handles(const std::vector<S> &v) {
72 std::vector<T> handles;
73 handles.reserve(v.size());
74 for (typename std::vector<S>::const_iterator it = v.begin(); it != v.end();
75 it++)
76 handles.push_back((*it)->handle());
77 return handles;
78 }
79
get_resource_alloc_info(const vk_testing::Device & dev,const VkMemoryRequirements & reqs,VkMemoryPropertyFlags mem_props)80 VkMemoryAllocateInfo get_resource_alloc_info(const vk_testing::Device &dev,
81 const VkMemoryRequirements &reqs,
82 VkMemoryPropertyFlags mem_props) {
83 VkMemoryAllocateInfo info =
84 vk_testing::DeviceMemory::alloc_info(reqs.size, 0);
85 dev.phy().set_memory_type(reqs.memoryTypeBits, &info, mem_props);
86
87 return info;
88 }
89
90 } // namespace
91
92 namespace vk_testing {
93
set_error_callback(ErrorCallback callback)94 void set_error_callback(ErrorCallback callback) { error_callback = callback; }
95
properties() const96 VkPhysicalDeviceProperties PhysicalDevice::properties() const {
97 VkPhysicalDeviceProperties info;
98
99 vkGetPhysicalDeviceProperties(handle(), &info);
100
101 return info;
102 }
103
queue_properties() const104 std::vector<VkQueueFamilyProperties> PhysicalDevice::queue_properties() const {
105 std::vector<VkQueueFamilyProperties> info;
106 uint32_t count;
107
108 // Call once with NULL data to receive count
109 vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, NULL);
110 info.resize(count);
111 vkGetPhysicalDeviceQueueFamilyProperties(handle(), &count, info.data());
112
113 return info;
114 }
115
memory_properties() const116 VkPhysicalDeviceMemoryProperties PhysicalDevice::memory_properties() const {
117 VkPhysicalDeviceMemoryProperties info;
118
119 vkGetPhysicalDeviceMemoryProperties(handle(), &info);
120
121 return info;
122 }
123
124 /*
125 * Return list of Global layers available
126 */
GetGlobalLayers()127 std::vector<VkLayerProperties> GetGlobalLayers() {
128 VkResult err;
129 std::vector<VkLayerProperties> layers;
130 uint32_t layer_count;
131
132 do {
133 layer_count = 0;
134 err = vkEnumerateInstanceLayerProperties(&layer_count, NULL);
135
136 if (err == VK_SUCCESS) {
137 layers.reserve(layer_count);
138 err =
139 vkEnumerateInstanceLayerProperties(&layer_count, layers.data());
140 }
141 } while (err == VK_INCOMPLETE);
142
143 assert(err == VK_SUCCESS);
144
145 return layers;
146 }
147
148 /*
149 * Return list of Global extensions provided by the ICD / Loader
150 */
GetGlobalExtensions()151 std::vector<VkExtensionProperties> GetGlobalExtensions() {
152 return GetGlobalExtensions(NULL);
153 }
154
155 /*
156 * Return list of Global extensions provided by the specified layer
157 * If pLayerName is NULL, will return extensions implemented by the loader /
158 * ICDs
159 */
GetGlobalExtensions(const char * pLayerName)160 std::vector<VkExtensionProperties> GetGlobalExtensions(const char *pLayerName) {
161 std::vector<VkExtensionProperties> exts;
162 uint32_t ext_count;
163 VkResult err;
164
165 do {
166 ext_count = 0;
167 err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count,
168 NULL);
169
170 if (err == VK_SUCCESS) {
171 exts.resize(ext_count);
172 err = vkEnumerateInstanceExtensionProperties(pLayerName, &ext_count,
173 exts.data());
174 }
175 } while (err == VK_INCOMPLETE);
176
177 assert(err == VK_SUCCESS);
178
179 return exts;
180 }
181
182 /*
183 * Return list of PhysicalDevice extensions provided by the ICD / Loader
184 */
extensions() const185 std::vector<VkExtensionProperties> PhysicalDevice::extensions() const {
186 return extensions(NULL);
187 }
188
189 /*
190 * Return list of PhysicalDevice extensions provided by the specified layer
191 * If pLayerName is NULL, will return extensions for ICD / loader.
192 */
193 std::vector<VkExtensionProperties>
extensions(const char * pLayerName) const194 PhysicalDevice::extensions(const char *pLayerName) const {
195 std::vector<VkExtensionProperties> exts;
196 VkResult err;
197
198 do {
199 uint32_t extCount = 0;
200 err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName,
201 &extCount, NULL);
202
203 if (err == VK_SUCCESS) {
204 exts.resize(extCount);
205 err = vkEnumerateDeviceExtensionProperties(handle(), pLayerName,
206 &extCount, exts.data());
207 }
208 } while (err == VK_INCOMPLETE);
209
210 assert(err == VK_SUCCESS);
211
212 return exts;
213 }
214
set_memory_type(const uint32_t type_bits,VkMemoryAllocateInfo * info,const VkFlags properties,const VkFlags forbid) const215 bool PhysicalDevice::set_memory_type(const uint32_t type_bits,
216 VkMemoryAllocateInfo *info,
217 const VkFlags properties,
218 const VkFlags forbid) const {
219 uint32_t type_mask = type_bits;
220 // Search memtypes to find first index with those properties
221 for (uint32_t i = 0; i < memory_properties_.memoryTypeCount; i++) {
222 if ((type_mask & 1) == 1) {
223 // Type is available, does it match user properties?
224 if ((memory_properties_.memoryTypes[i].propertyFlags &
225 properties) == properties &&
226 (memory_properties_.memoryTypes[i].propertyFlags & forbid) ==
227 0) {
228 info->memoryTypeIndex = i;
229 return true;
230 }
231 }
232 type_mask >>= 1;
233 }
234 // No memory types matched, return failure
235 return false;
236 }
237
238 /*
239 * Return list of PhysicalDevice layers
240 */
layers() const241 std::vector<VkLayerProperties> PhysicalDevice::layers() const {
242 std::vector<VkLayerProperties> layer_props;
243 VkResult err;
244
245 do {
246 uint32_t layer_count = 0;
247 err = vkEnumerateDeviceLayerProperties(handle(), &layer_count, NULL);
248
249 if (err == VK_SUCCESS) {
250 layer_props.reserve(layer_count);
251 err = vkEnumerateDeviceLayerProperties(handle(), &layer_count,
252 layer_props.data());
253 }
254 } while (err == VK_INCOMPLETE);
255
256 assert(err == VK_SUCCESS);
257
258 return layer_props;
259 }
260
~Device()261 Device::~Device() {
262 if (!initialized())
263 return;
264
265 for (int i = 0; i < QUEUE_COUNT; i++) {
266 for (std::vector<Queue *>::iterator it = queues_[i].begin();
267 it != queues_[i].end(); it++)
268 delete *it;
269 queues_[i].clear();
270 }
271
272 vkDestroyDevice(handle(), NULL);
273 }
274
init(std::vector<const char * > & layers,std::vector<const char * > & extensions)275 void Device::init(std::vector<const char *> &layers,
276 std::vector<const char *> &extensions) {
277 // request all queues
278 const std::vector<VkQueueFamilyProperties> queue_props =
279 phy_.queue_properties();
280 std::vector<VkDeviceQueueCreateInfo> queue_info;
281 queue_info.reserve(queue_props.size());
282
283 std::vector<std::vector<float>> queue_priorities;
284
285 for (uint32_t i = 0; i < (uint32_t)queue_props.size(); i++) {
286 VkDeviceQueueCreateInfo qi = {};
287 qi.sType = VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO;
288 qi.pNext = NULL;
289 qi.queueFamilyIndex = i;
290 qi.queueCount = queue_props[i].queueCount;
291
292 queue_priorities.emplace_back(qi.queueCount, 0.0);
293
294 qi.pQueuePriorities = queue_priorities[i].data();
295 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
296 graphics_queue_node_index_ = i;
297 }
298 queue_info.push_back(qi);
299 }
300
301 VkDeviceCreateInfo dev_info = {};
302 dev_info.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO;
303 dev_info.pNext = NULL;
304 dev_info.queueCreateInfoCount = queue_info.size();
305 dev_info.pQueueCreateInfos = queue_info.data();
306 dev_info.enabledLayerCount = layers.size();
307 dev_info.ppEnabledLayerNames = layers.data();
308 dev_info.enabledExtensionCount = extensions.size();
309 dev_info.ppEnabledExtensionNames = extensions.data();
310
311 init(dev_info);
312 }
313
init(const VkDeviceCreateInfo & info)314 void Device::init(const VkDeviceCreateInfo &info) {
315 VkDevice dev;
316
317 if (EXPECT(vkCreateDevice(phy_.handle(), &info, NULL, &dev) == VK_SUCCESS))
318 Handle::init(dev);
319
320 init_queues();
321 init_formats();
322 }
323
init_queues()324 void Device::init_queues() {
325 uint32_t queue_node_count;
326
327 // Call with NULL data to get count
328 vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count,
329 NULL);
330 EXPECT(queue_node_count >= 1);
331
332 VkQueueFamilyProperties *queue_props =
333 new VkQueueFamilyProperties[queue_node_count];
334
335 vkGetPhysicalDeviceQueueFamilyProperties(phy_.handle(), &queue_node_count,
336 queue_props);
337
338 for (uint32_t i = 0; i < queue_node_count; i++) {
339 VkQueue queue;
340
341 for (uint32_t j = 0; j < queue_props[i].queueCount; j++) {
342 // TODO: Need to add support for separate MEMMGR and work queues,
343 // including synchronization
344 vkGetDeviceQueue(handle(), i, j, &queue);
345
346 if (queue_props[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
347 queues_[GRAPHICS].push_back(new Queue(queue, i));
348 }
349
350 if (queue_props[i].queueFlags & VK_QUEUE_COMPUTE_BIT) {
351 queues_[COMPUTE].push_back(new Queue(queue, i));
352 }
353
354 if (queue_props[i].queueFlags & VK_QUEUE_TRANSFER_BIT) {
355 queues_[DMA].push_back(new Queue(queue, i));
356 }
357 }
358 }
359
360 delete[] queue_props;
361
362 EXPECT(!queues_[GRAPHICS].empty() || !queues_[COMPUTE].empty());
363 }
364
init_formats()365 void Device::init_formats() {
366 for (int f = VK_FORMAT_BEGIN_RANGE; f <= VK_FORMAT_END_RANGE; f++) {
367 const VkFormat fmt = static_cast<VkFormat>(f);
368 const VkFormatProperties props = format_properties(fmt);
369
370 if (props.linearTilingFeatures) {
371 const Format tmp = {fmt, VK_IMAGE_TILING_LINEAR,
372 props.linearTilingFeatures};
373 formats_.push_back(tmp);
374 }
375
376 if (props.optimalTilingFeatures) {
377 const Format tmp = {fmt, VK_IMAGE_TILING_OPTIMAL,
378 props.optimalTilingFeatures};
379 formats_.push_back(tmp);
380 }
381 }
382
383 EXPECT(!formats_.empty());
384 }
385
format_properties(VkFormat format)386 VkFormatProperties Device::format_properties(VkFormat format) {
387 VkFormatProperties data;
388 vkGetPhysicalDeviceFormatProperties(phy().handle(), format, &data);
389
390 return data;
391 }
392
wait()393 void Device::wait() { EXPECT(vkDeviceWaitIdle(handle()) == VK_SUCCESS); }
394
wait(const std::vector<const Fence * > & fences,bool wait_all,uint64_t timeout)395 VkResult Device::wait(const std::vector<const Fence *> &fences, bool wait_all,
396 uint64_t timeout) {
397 const std::vector<VkFence> fence_handles = make_handles<VkFence>(fences);
398 VkResult err = vkWaitForFences(handle(), fence_handles.size(),
399 fence_handles.data(), wait_all, timeout);
400 EXPECT(err == VK_SUCCESS || err == VK_TIMEOUT);
401
402 return err;
403 }
404
update_descriptor_sets(const std::vector<VkWriteDescriptorSet> & writes,const std::vector<VkCopyDescriptorSet> & copies)405 void Device::update_descriptor_sets(
406 const std::vector<VkWriteDescriptorSet> &writes,
407 const std::vector<VkCopyDescriptorSet> &copies) {
408 vkUpdateDescriptorSets(handle(), writes.size(), writes.data(),
409 copies.size(), copies.data());
410 }
411
submit(const std::vector<const CommandBuffer * > & cmds,Fence & fence)412 void Queue::submit(const std::vector<const CommandBuffer *> &cmds,
413 Fence &fence) {
414 const std::vector<VkCommandBuffer> cmd_handles =
415 make_handles<VkCommandBuffer>(cmds);
416 VkSubmitInfo submit_info;
417 submit_info.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
418 submit_info.pNext = NULL;
419 submit_info.waitSemaphoreCount = 0;
420 submit_info.pWaitSemaphores = NULL;
421 submit_info.pWaitDstStageMask = NULL;
422 submit_info.commandBufferCount = (uint32_t)cmd_handles.size();
423 submit_info.pCommandBuffers = cmd_handles.data();
424 submit_info.signalSemaphoreCount = 0;
425 submit_info.pSignalSemaphores = NULL;
426
427 EXPECT(vkQueueSubmit(handle(), 1, &submit_info, fence.handle()) ==
428 VK_SUCCESS);
429 }
430
submit(const CommandBuffer & cmd,Fence & fence)431 void Queue::submit(const CommandBuffer &cmd, Fence &fence) {
432 submit(std::vector<const CommandBuffer *>(1, &cmd), fence);
433 }
434
submit(const CommandBuffer & cmd)435 void Queue::submit(const CommandBuffer &cmd) {
436 Fence fence;
437 submit(cmd, fence);
438 }
439
wait()440 void Queue::wait() { EXPECT(vkQueueWaitIdle(handle()) == VK_SUCCESS); }
441
~DeviceMemory()442 DeviceMemory::~DeviceMemory() {
443 if (initialized())
444 vkFreeMemory(device(), handle(), NULL);
445 }
446
init(const Device & dev,const VkMemoryAllocateInfo & info)447 void DeviceMemory::init(const Device &dev, const VkMemoryAllocateInfo &info) {
448 NON_DISPATCHABLE_HANDLE_INIT(vkAllocateMemory, dev, &info);
449 }
450
map(VkFlags flags) const451 const void *DeviceMemory::map(VkFlags flags) const {
452 void *data;
453 if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags,
454 &data) == VK_SUCCESS))
455 data = NULL;
456
457 return data;
458 }
459
map(VkFlags flags)460 void *DeviceMemory::map(VkFlags flags) {
461 void *data;
462 if (!EXPECT(vkMapMemory(device(), handle(), 0, VK_WHOLE_SIZE, flags,
463 &data) == VK_SUCCESS))
464 data = NULL;
465
466 return data;
467 }
468
unmap() const469 void DeviceMemory::unmap() const { vkUnmapMemory(device(), handle()); }
470
NON_DISPATCHABLE_HANDLE_DTOR(Fence,vkDestroyFence)471 NON_DISPATCHABLE_HANDLE_DTOR(Fence, vkDestroyFence)
472
473 void Fence::init(const Device &dev, const VkFenceCreateInfo &info) {
474 NON_DISPATCHABLE_HANDLE_INIT(vkCreateFence, dev, &info);
475 }
476
NON_DISPATCHABLE_HANDLE_DTOR(Semaphore,vkDestroySemaphore)477 NON_DISPATCHABLE_HANDLE_DTOR(Semaphore, vkDestroySemaphore)
478
479 void Semaphore::init(const Device &dev, const VkSemaphoreCreateInfo &info) {
480 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSemaphore, dev, &info);
481 }
482
NON_DISPATCHABLE_HANDLE_DTOR(Event,vkDestroyEvent)483 NON_DISPATCHABLE_HANDLE_DTOR(Event, vkDestroyEvent)
484
485 void Event::init(const Device &dev, const VkEventCreateInfo &info) {
486 NON_DISPATCHABLE_HANDLE_INIT(vkCreateEvent, dev, &info);
487 }
488
set()489 void Event::set() { EXPECT(vkSetEvent(device(), handle()) == VK_SUCCESS); }
490
reset()491 void Event::reset() { EXPECT(vkResetEvent(device(), handle()) == VK_SUCCESS); }
492
NON_DISPATCHABLE_HANDLE_DTOR(QueryPool,vkDestroyQueryPool)493 NON_DISPATCHABLE_HANDLE_DTOR(QueryPool, vkDestroyQueryPool)
494
495 void QueryPool::init(const Device &dev, const VkQueryPoolCreateInfo &info) {
496 NON_DISPATCHABLE_HANDLE_INIT(vkCreateQueryPool, dev, &info);
497 }
498
results(uint32_t first,uint32_t count,size_t size,void * data,size_t stride)499 VkResult QueryPool::results(uint32_t first, uint32_t count, size_t size,
500 void *data, size_t stride) {
501 VkResult err = vkGetQueryPoolResults(device(), handle(), first, count, size,
502 data, stride, 0);
503 EXPECT(err == VK_SUCCESS || err == VK_NOT_READY);
504
505 return err;
506 }
507
NON_DISPATCHABLE_HANDLE_DTOR(Buffer,vkDestroyBuffer)508 NON_DISPATCHABLE_HANDLE_DTOR(Buffer, vkDestroyBuffer)
509
510 void Buffer::init(const Device &dev, const VkBufferCreateInfo &info,
511 VkMemoryPropertyFlags mem_props) {
512 init_no_mem(dev, info);
513
514 internal_mem_.init(
515 dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
516 bind_memory(internal_mem_, 0);
517 }
518
init_no_mem(const Device & dev,const VkBufferCreateInfo & info)519 void Buffer::init_no_mem(const Device &dev, const VkBufferCreateInfo &info) {
520 NON_DISPATCHABLE_HANDLE_INIT(vkCreateBuffer, dev, &info);
521 create_info_ = info;
522 }
523
memory_requirements() const524 VkMemoryRequirements Buffer::memory_requirements() const {
525 VkMemoryRequirements reqs;
526
527 vkGetBufferMemoryRequirements(device(), handle(), &reqs);
528
529 return reqs;
530 }
531
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)532 void Buffer::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
533 EXPECT(vkBindBufferMemory(device(), handle(), mem.handle(), mem_offset) ==
534 VK_SUCCESS);
535 }
536
NON_DISPATCHABLE_HANDLE_DTOR(BufferView,vkDestroyBufferView)537 NON_DISPATCHABLE_HANDLE_DTOR(BufferView, vkDestroyBufferView)
538
539 void BufferView::init(const Device &dev, const VkBufferViewCreateInfo &info) {
540 NON_DISPATCHABLE_HANDLE_INIT(vkCreateBufferView, dev, &info);
541 }
542
NON_DISPATCHABLE_HANDLE_DTOR(Image,vkDestroyImage)543 NON_DISPATCHABLE_HANDLE_DTOR(Image, vkDestroyImage)
544
545 void Image::init(const Device &dev, const VkImageCreateInfo &info,
546 VkMemoryPropertyFlags mem_props) {
547 init_no_mem(dev, info);
548
549 internal_mem_.init(
550 dev, get_resource_alloc_info(dev, memory_requirements(), mem_props));
551 bind_memory(internal_mem_, 0);
552 }
553
init_no_mem(const Device & dev,const VkImageCreateInfo & info)554 void Image::init_no_mem(const Device &dev, const VkImageCreateInfo &info) {
555 NON_DISPATCHABLE_HANDLE_INIT(vkCreateImage, dev, &info);
556 init_info(dev, info);
557 }
558
init_info(const Device & dev,const VkImageCreateInfo & info)559 void Image::init_info(const Device &dev, const VkImageCreateInfo &info) {
560 create_info_ = info;
561
562 for (std::vector<Device::Format>::const_iterator it = dev.formats().begin();
563 it != dev.formats().end(); it++) {
564 if (memcmp(&it->format, &create_info_.format, sizeof(it->format)) ==
565 0 &&
566 it->tiling == create_info_.tiling) {
567 format_features_ = it->features;
568 break;
569 }
570 }
571 }
572
memory_requirements() const573 VkMemoryRequirements Image::memory_requirements() const {
574 VkMemoryRequirements reqs;
575
576 vkGetImageMemoryRequirements(device(), handle(), &reqs);
577
578 return reqs;
579 }
580
bind_memory(const DeviceMemory & mem,VkDeviceSize mem_offset)581 void Image::bind_memory(const DeviceMemory &mem, VkDeviceSize mem_offset) {
582 EXPECT(vkBindImageMemory(device(), handle(), mem.handle(), mem_offset) ==
583 VK_SUCCESS);
584 }
585
586 VkSubresourceLayout
subresource_layout(const VkImageSubresource & subres) const587 Image::subresource_layout(const VkImageSubresource &subres) const {
588 VkSubresourceLayout data;
589 size_t size = sizeof(data);
590 vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
591 if (size != sizeof(data))
592 memset(&data, 0, sizeof(data));
593
594 return data;
595 }
596
597 VkSubresourceLayout
subresource_layout(const VkImageSubresourceLayers & subrescopy) const598 Image::subresource_layout(const VkImageSubresourceLayers &subrescopy) const {
599 VkSubresourceLayout data;
600 VkImageSubresource subres =
601 subresource(image_aspect(subrescopy.aspectMask), subrescopy.mipLevel,
602 subrescopy.baseArrayLayer);
603 size_t size = sizeof(data);
604 vkGetImageSubresourceLayout(device(), handle(), &subres, &data);
605 if (size != sizeof(data))
606 memset(&data, 0, sizeof(data));
607
608 return data;
609 }
610
transparent() const611 bool Image::transparent() const {
612 return (
613 create_info_.tiling == VK_IMAGE_TILING_LINEAR &&
614 create_info_.samples == VK_SAMPLE_COUNT_1_BIT &&
615 !(create_info_.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
616 VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)));
617 }
618
NON_DISPATCHABLE_HANDLE_DTOR(ImageView,vkDestroyImageView)619 NON_DISPATCHABLE_HANDLE_DTOR(ImageView, vkDestroyImageView)
620
621 void ImageView::init(const Device &dev, const VkImageViewCreateInfo &info) {
622 NON_DISPATCHABLE_HANDLE_INIT(vkCreateImageView, dev, &info);
623 }
624
NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule,vkDestroyShaderModule)625 NON_DISPATCHABLE_HANDLE_DTOR(ShaderModule, vkDestroyShaderModule)
626
627 void ShaderModule::init(const Device &dev,
628 const VkShaderModuleCreateInfo &info) {
629 NON_DISPATCHABLE_HANDLE_INIT(vkCreateShaderModule, dev, &info);
630 }
631
init_try(const Device & dev,const VkShaderModuleCreateInfo & info)632 VkResult ShaderModule::init_try(const Device &dev,
633 const VkShaderModuleCreateInfo &info) {
634 VkShaderModule mod;
635
636 VkResult err = vkCreateShaderModule(dev.handle(), &info, NULL, &mod);
637 if (err == VK_SUCCESS)
638 NonDispHandle::init(dev.handle(), mod);
639
640 return err;
641 }
642
NON_DISPATCHABLE_HANDLE_DTOR(Pipeline,vkDestroyPipeline)643 NON_DISPATCHABLE_HANDLE_DTOR(Pipeline, vkDestroyPipeline)
644
645 void Pipeline::init(const Device &dev,
646 const VkGraphicsPipelineCreateInfo &info) {
647 VkPipelineCache cache;
648 VkPipelineCacheCreateInfo ci;
649 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
650 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
651 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
652 if (err == VK_SUCCESS) {
653 NON_DISPATCHABLE_HANDLE_INIT(vkCreateGraphicsPipelines, dev, cache, 1,
654 &info);
655 vkDestroyPipelineCache(dev.handle(), cache, NULL);
656 }
657 }
658
init_try(const Device & dev,const VkGraphicsPipelineCreateInfo & info)659 VkResult Pipeline::init_try(const Device &dev,
660 const VkGraphicsPipelineCreateInfo &info) {
661 VkPipeline pipe;
662 VkPipelineCache cache;
663 VkPipelineCacheCreateInfo ci;
664 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
665 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
666 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
667 EXPECT(err == VK_SUCCESS);
668 if (err == VK_SUCCESS) {
669 err = vkCreateGraphicsPipelines(dev.handle(), cache, 1, &info, NULL,
670 &pipe);
671 if (err == VK_SUCCESS) {
672 NonDispHandle::init(dev.handle(), pipe);
673 }
674 vkDestroyPipelineCache(dev.handle(), cache, NULL);
675 }
676
677 return err;
678 }
679
init(const Device & dev,const VkComputePipelineCreateInfo & info)680 void Pipeline::init(const Device &dev,
681 const VkComputePipelineCreateInfo &info) {
682 VkPipelineCache cache;
683 VkPipelineCacheCreateInfo ci;
684 memset((void *)&ci, 0, sizeof(VkPipelineCacheCreateInfo));
685 ci.sType = VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO;
686 VkResult err = vkCreatePipelineCache(dev.handle(), &ci, NULL, &cache);
687 if (err == VK_SUCCESS) {
688 NON_DISPATCHABLE_HANDLE_INIT(vkCreateComputePipelines, dev, cache, 1,
689 &info);
690 vkDestroyPipelineCache(dev.handle(), cache, NULL);
691 }
692 }
693
NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout,vkDestroyPipelineLayout)694 NON_DISPATCHABLE_HANDLE_DTOR(PipelineLayout, vkDestroyPipelineLayout)
695
696 void PipelineLayout::init(
697 const Device &dev, VkPipelineLayoutCreateInfo &info,
698 const std::vector<const DescriptorSetLayout *> &layouts) {
699 const std::vector<VkDescriptorSetLayout> layout_handles =
700 make_handles<VkDescriptorSetLayout>(layouts);
701 info.pSetLayouts = layout_handles.data();
702
703 NON_DISPATCHABLE_HANDLE_INIT(vkCreatePipelineLayout, dev, &info);
704 }
705
NON_DISPATCHABLE_HANDLE_DTOR(Sampler,vkDestroySampler)706 NON_DISPATCHABLE_HANDLE_DTOR(Sampler, vkDestroySampler)
707
708 void Sampler::init(const Device &dev, const VkSamplerCreateInfo &info) {
709 NON_DISPATCHABLE_HANDLE_INIT(vkCreateSampler, dev, &info);
710 }
711
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout,vkDestroyDescriptorSetLayout)712 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorSetLayout, vkDestroyDescriptorSetLayout)
713
714 void DescriptorSetLayout::init(const Device &dev,
715 const VkDescriptorSetLayoutCreateInfo &info) {
716 NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorSetLayout, dev, &info);
717 }
718
NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool,vkDestroyDescriptorPool)719 NON_DISPATCHABLE_HANDLE_DTOR(DescriptorPool, vkDestroyDescriptorPool)
720
721 void DescriptorPool::init(const Device &dev,
722 const VkDescriptorPoolCreateInfo &info) {
723 setDynamicUsage(info.flags &
724 VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT);
725 NON_DISPATCHABLE_HANDLE_INIT(vkCreateDescriptorPool, dev, &info);
726 }
727
reset()728 void DescriptorPool::reset() {
729 EXPECT(vkResetDescriptorPool(device(), handle(), 0) == VK_SUCCESS);
730 }
731
alloc_sets(const Device & dev,const std::vector<const DescriptorSetLayout * > & layouts)732 std::vector<DescriptorSet *> DescriptorPool::alloc_sets(
733 const Device &dev,
734 const std::vector<const DescriptorSetLayout *> &layouts) {
735 const std::vector<VkDescriptorSetLayout> layout_handles =
736 make_handles<VkDescriptorSetLayout>(layouts);
737
738 std::vector<VkDescriptorSet> set_handles;
739 set_handles.resize(layout_handles.size());
740
741 VkDescriptorSetAllocateInfo alloc_info = {};
742 alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO;
743 alloc_info.descriptorSetCount = layout_handles.size();
744 alloc_info.descriptorPool = handle();
745 alloc_info.pSetLayouts = layout_handles.data();
746 VkResult err =
747 vkAllocateDescriptorSets(device(), &alloc_info, set_handles.data());
748 EXPECT(err == VK_SUCCESS);
749
750 std::vector<DescriptorSet *> sets;
751 for (std::vector<VkDescriptorSet>::const_iterator it = set_handles.begin();
752 it != set_handles.end(); it++) {
753 // do descriptor sets need memories bound?
754 DescriptorSet *descriptorSet = new DescriptorSet(dev, this, *it);
755 sets.push_back(descriptorSet);
756 }
757 return sets;
758 }
759
760 std::vector<DescriptorSet *>
alloc_sets(const Device & dev,const DescriptorSetLayout & layout,uint32_t count)761 DescriptorPool::alloc_sets(const Device &dev, const DescriptorSetLayout &layout,
762 uint32_t count) {
763 return alloc_sets(dev,
764 std::vector<const DescriptorSetLayout *>(count, &layout));
765 }
766
alloc_sets(const Device & dev,const DescriptorSetLayout & layout)767 DescriptorSet *DescriptorPool::alloc_sets(const Device &dev,
768 const DescriptorSetLayout &layout) {
769 std::vector<DescriptorSet *> set = alloc_sets(dev, layout, 1);
770 return (set.empty()) ? NULL : set[0];
771 }
772
~DescriptorSet()773 DescriptorSet::~DescriptorSet() {
774 if (initialized()) {
775 // Only call vkFree* on sets allocated from pool with usage *_DYNAMIC
776 if (containing_pool_->getDynamicUsage()) {
777 VkDescriptorSet sets[1] = {handle()};
778 EXPECT(vkFreeDescriptorSets(device(), containing_pool_->GetObj(), 1,
779 sets) == VK_SUCCESS);
780 }
781 }
782 }
783
NON_DISPATCHABLE_HANDLE_DTOR(CommandPool,vkDestroyCommandPool)784 NON_DISPATCHABLE_HANDLE_DTOR(CommandPool, vkDestroyCommandPool)
785
786 void CommandPool::init(const Device &dev, const VkCommandPoolCreateInfo &info) {
787 NON_DISPATCHABLE_HANDLE_INIT(vkCreateCommandPool, dev, &info);
788 }
789
~CommandBuffer()790 CommandBuffer::~CommandBuffer() {
791 if (initialized()) {
792 VkCommandBuffer cmds[] = {handle()};
793 vkFreeCommandBuffers(dev_handle_, cmd_pool_, 1, cmds);
794 }
795 }
796
init(const Device & dev,const VkCommandBufferAllocateInfo & info)797 void CommandBuffer::init(const Device &dev,
798 const VkCommandBufferAllocateInfo &info) {
799 VkCommandBuffer cmd;
800
801 // Make sure commandPool is set
802 assert(info.commandPool);
803
804 if (EXPECT(vkAllocateCommandBuffers(dev.handle(), &info, &cmd) ==
805 VK_SUCCESS)) {
806 Handle::init(cmd);
807 dev_handle_ = dev.handle();
808 cmd_pool_ = info.commandPool;
809 }
810 }
811
begin(const VkCommandBufferBeginInfo * info)812 void CommandBuffer::begin(const VkCommandBufferBeginInfo *info) {
813 EXPECT(vkBeginCommandBuffer(handle(), info) == VK_SUCCESS);
814 }
815
begin()816 void CommandBuffer::begin() {
817 VkCommandBufferBeginInfo info = {};
818 VkCommandBufferInheritanceInfo hinfo = {};
819 info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
820 info.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO;
821 info.pInheritanceInfo = &hinfo;
822 hinfo.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_INHERITANCE_INFO;
823 hinfo.pNext = NULL;
824 hinfo.renderPass = VK_NULL_HANDLE;
825 hinfo.subpass = 0;
826 hinfo.framebuffer = VK_NULL_HANDLE;
827 hinfo.occlusionQueryEnable = VK_FALSE;
828 hinfo.queryFlags = 0;
829 hinfo.pipelineStatistics = 0;
830
831 begin(&info);
832 }
833
end()834 void CommandBuffer::end() {
835 EXPECT(vkEndCommandBuffer(handle()) == VK_SUCCESS);
836 }
837
reset(VkCommandBufferResetFlags flags)838 void CommandBuffer::reset(VkCommandBufferResetFlags flags) {
839 EXPECT(vkResetCommandBuffer(handle(), flags) == VK_SUCCESS);
840 }
841
842 }; // namespace vk_testing
843