1 /*
2  * Copyright © 2021 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "vk_command_buffer.h"
25 
26 #include "vk_command_pool.h"
27 #include "vk_common_entrypoints.h"
28 #include "vk_device.h"
29 
30 VkResult
vk_command_buffer_init(struct vk_command_pool * pool,struct vk_command_buffer * command_buffer,const struct vk_command_buffer_ops * ops,VkCommandBufferLevel level)31 vk_command_buffer_init(struct vk_command_pool *pool,
32                        struct vk_command_buffer *command_buffer,
33                        const struct vk_command_buffer_ops *ops,
34                        VkCommandBufferLevel level)
35 {
36    memset(command_buffer, 0, sizeof(*command_buffer));
37    vk_object_base_init(pool->base.device, &command_buffer->base,
38                        VK_OBJECT_TYPE_COMMAND_BUFFER);
39 
40    command_buffer->pool = pool;
41    command_buffer->level = level;
42    command_buffer->ops = ops;
43    vk_dynamic_graphics_state_init(&command_buffer->dynamic_graphics_state);
44    command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_INITIAL;
45    command_buffer->record_result = VK_SUCCESS;
46    vk_cmd_queue_init(&command_buffer->cmd_queue, &pool->alloc);
47 #ifndef VK_NO_NIR
48    vk_meta_object_list_init(&command_buffer->meta_objects);
49 #endif
50    util_dynarray_init(&command_buffer->labels, NULL);
51    command_buffer->region_begin = true;
52 
53    list_add(&command_buffer->pool_link, &pool->command_buffers);
54 
55    return VK_SUCCESS;
56 }
57 
58 void
vk_command_buffer_reset(struct vk_command_buffer * command_buffer)59 vk_command_buffer_reset(struct vk_command_buffer *command_buffer)
60 {
61    vk_dynamic_graphics_state_clear(&command_buffer->dynamic_graphics_state);
62    command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_INITIAL;
63    command_buffer->record_result = VK_SUCCESS;
64    vk_command_buffer_reset_render_pass(command_buffer);
65    vk_cmd_queue_reset(&command_buffer->cmd_queue);
66 #ifndef VK_NO_NIR
67    vk_meta_object_list_reset(command_buffer->base.device,
68                              &command_buffer->meta_objects);
69 #endif
70    util_dynarray_clear(&command_buffer->labels);
71    command_buffer->region_begin = true;
72 }
73 
74 void
vk_command_buffer_begin(struct vk_command_buffer * command_buffer,const VkCommandBufferBeginInfo * pBeginInfo)75 vk_command_buffer_begin(struct vk_command_buffer *command_buffer,
76                         const VkCommandBufferBeginInfo *pBeginInfo)
77 {
78    if (command_buffer->state != MESA_VK_COMMAND_BUFFER_STATE_INITIAL &&
79        command_buffer->ops->reset != NULL)
80       command_buffer->ops->reset(command_buffer, 0);
81 
82    command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_RECORDING;
83 }
84 
85 VkResult
vk_command_buffer_end(struct vk_command_buffer * command_buffer)86 vk_command_buffer_end(struct vk_command_buffer *command_buffer)
87 {
88    assert(command_buffer->state == MESA_VK_COMMAND_BUFFER_STATE_RECORDING);
89 
90    if (vk_command_buffer_has_error(command_buffer))
91       command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_INVALID;
92    else
93       command_buffer->state = MESA_VK_COMMAND_BUFFER_STATE_EXECUTABLE;
94 
95    return vk_command_buffer_get_record_result(command_buffer);
96 }
97 
98 void
vk_command_buffer_finish(struct vk_command_buffer * command_buffer)99 vk_command_buffer_finish(struct vk_command_buffer *command_buffer)
100 {
101    list_del(&command_buffer->pool_link);
102    vk_command_buffer_reset_render_pass(command_buffer);
103    vk_cmd_queue_finish(&command_buffer->cmd_queue);
104    util_dynarray_fini(&command_buffer->labels);
105 #ifndef VK_NO_NIR
106    vk_meta_object_list_finish(command_buffer->base.device,
107                               &command_buffer->meta_objects);
108 #endif
109    vk_object_base_finish(&command_buffer->base);
110 }
111 
112 void
vk_command_buffer_recycle(struct vk_command_buffer * cmd_buffer)113 vk_command_buffer_recycle(struct vk_command_buffer *cmd_buffer)
114 {
115    /* Reset, returning resources to the pool.  The command buffer object
116     * itself will be recycled but, if the driver supports returning other
117     * resources such as batch buffers to the pool, it should do so so they're
118     * not tied up in recycled command buffer objects.
119     */
120    cmd_buffer->ops->reset(cmd_buffer,
121       VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT);
122 
123    vk_object_base_recycle(&cmd_buffer->base);
124 }
125 
126 VKAPI_ATTR VkResult VKAPI_CALL
vk_common_ResetCommandBuffer(VkCommandBuffer commandBuffer,VkCommandBufferResetFlags flags)127 vk_common_ResetCommandBuffer(VkCommandBuffer commandBuffer,
128                              VkCommandBufferResetFlags flags)
129 {
130    VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
131 
132    if (cmd_buffer->state != MESA_VK_COMMAND_BUFFER_STATE_INITIAL)
133       cmd_buffer->ops->reset(cmd_buffer, flags);
134 
135    return VK_SUCCESS;
136 }
137 
138 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdExecuteCommands(VkCommandBuffer commandBuffer,uint32_t commandBufferCount,const VkCommandBuffer * pCommandBuffers)139 vk_common_CmdExecuteCommands(VkCommandBuffer commandBuffer,
140                              uint32_t commandBufferCount,
141                              const VkCommandBuffer *pCommandBuffers)
142 {
143    VK_FROM_HANDLE(vk_command_buffer, primary, commandBuffer);
144    const struct vk_device_dispatch_table *disp =
145       primary->base.device->command_dispatch_table;
146 
147    for (uint32_t i = 0; i < commandBufferCount; i++) {
148       VK_FROM_HANDLE(vk_command_buffer, secondary, pCommandBuffers[i]);
149 
150       vk_cmd_queue_execute(&secondary->cmd_queue, commandBuffer, disp);
151    }
152 }
153 
154 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,uint32_t firstBinding,uint32_t bindingCount,const VkBuffer * pBuffers,const VkDeviceSize * pOffsets)155 vk_common_CmdBindVertexBuffers(VkCommandBuffer commandBuffer,
156                                uint32_t firstBinding,
157                                uint32_t bindingCount,
158                                const VkBuffer *pBuffers,
159                                const VkDeviceSize *pOffsets)
160 {
161    VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
162    const struct vk_device_dispatch_table *disp =
163       &cmd_buffer->base.device->dispatch_table;
164 
165    disp->CmdBindVertexBuffers2(commandBuffer, firstBinding, bindingCount,
166                                pBuffers, pOffsets, NULL, NULL);
167 }
168 
169 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdBindIndexBuffer(VkCommandBuffer commandBuffer,VkBuffer buffer,VkDeviceSize offset,VkIndexType indexType)170 vk_common_CmdBindIndexBuffer(
171     VkCommandBuffer                             commandBuffer,
172     VkBuffer                                    buffer,
173     VkDeviceSize                                offset,
174     VkIndexType                                 indexType)
175 {
176    VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
177    const struct vk_device_dispatch_table *disp =
178       &cmd_buffer->base.device->dispatch_table;
179 
180    disp->CmdBindIndexBuffer2KHR(commandBuffer, buffer, offset,
181                                 VK_WHOLE_SIZE, indexType);
182 }
183 
184 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdDispatch(VkCommandBuffer commandBuffer,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)185 vk_common_CmdDispatch(VkCommandBuffer commandBuffer,
186                       uint32_t groupCountX,
187                       uint32_t groupCountY,
188                       uint32_t groupCountZ)
189 {
190    VK_FROM_HANDLE(vk_command_buffer, cmd_buffer, commandBuffer);
191    const struct vk_device_dispatch_table *disp =
192       &cmd_buffer->base.device->dispatch_table;
193 
194    disp->CmdDispatchBase(commandBuffer, 0, 0, 0,
195                          groupCountX, groupCountY, groupCountZ);
196 }
197 
198 VKAPI_ATTR void VKAPI_CALL
vk_common_CmdSetDeviceMask(VkCommandBuffer commandBuffer,uint32_t deviceMask)199 vk_common_CmdSetDeviceMask(VkCommandBuffer commandBuffer, uint32_t deviceMask)
200 {
201    /* Nothing to do here since we only support a single device */
202    assert(deviceMask == 0x1);
203 }
204