1 /*
2  * Copyright © 2022 Collabora, LTD
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "vk_pipeline.h"
25 
26 #include "vk_device.h"
27 #include "vk_log.h"
28 #include "vk_nir.h"
29 #include "vk_shader_module.h"
30 #include "vk_util.h"
31 
32 #include "nir_serialize.h"
33 
34 #include "util/mesa-sha1.h"
35 #include "util/mesa-blake3.h"
36 
37 bool
vk_pipeline_shader_stage_is_null(const VkPipelineShaderStageCreateInfo * info)38 vk_pipeline_shader_stage_is_null(const VkPipelineShaderStageCreateInfo *info)
39 {
40    if (info->module != VK_NULL_HANDLE)
41       return false;
42 
43    vk_foreach_struct_const(ext, info->pNext) {
44       if (ext->sType == VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO ||
45           ext->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT)
46          return false;
47    }
48 
49    return true;
50 }
51 
52 static nir_shader *
get_builtin_nir(const VkPipelineShaderStageCreateInfo * info)53 get_builtin_nir(const VkPipelineShaderStageCreateInfo *info)
54 {
55    VK_FROM_HANDLE(vk_shader_module, module, info->module);
56 
57    nir_shader *nir = NULL;
58    if (module != NULL) {
59       nir = module->nir;
60    } else {
61       const VkPipelineShaderStageNirCreateInfoMESA *nir_info =
62          vk_find_struct_const(info->pNext, PIPELINE_SHADER_STAGE_NIR_CREATE_INFO_MESA);
63       if (nir_info != NULL)
64          nir = nir_info->nir;
65    }
66 
67    if (nir == NULL)
68       return NULL;
69 
70    assert(nir->info.stage == vk_to_mesa_shader_stage(info->stage));
71    ASSERTED nir_function_impl *entrypoint = nir_shader_get_entrypoint(nir);
72    assert(strcmp(entrypoint->function->name, info->pName) == 0);
73    assert(info->pSpecializationInfo == NULL);
74 
75    return nir;
76 }
77 
78 static uint32_t
get_required_subgroup_size(const VkPipelineShaderStageCreateInfo * info)79 get_required_subgroup_size(const VkPipelineShaderStageCreateInfo *info)
80 {
81    const VkPipelineShaderStageRequiredSubgroupSizeCreateInfo *rss_info =
82       vk_find_struct_const(info->pNext,
83                            PIPELINE_SHADER_STAGE_REQUIRED_SUBGROUP_SIZE_CREATE_INFO);
84    return rss_info != NULL ? rss_info->requiredSubgroupSize : 0;
85 }
86 
87 VkResult
vk_pipeline_shader_stage_to_nir(struct vk_device * device,const VkPipelineShaderStageCreateInfo * info,const struct spirv_to_nir_options * spirv_options,const struct nir_shader_compiler_options * nir_options,void * mem_ctx,nir_shader ** nir_out)88 vk_pipeline_shader_stage_to_nir(struct vk_device *device,
89                                 const VkPipelineShaderStageCreateInfo *info,
90                                 const struct spirv_to_nir_options *spirv_options,
91                                 const struct nir_shader_compiler_options *nir_options,
92                                 void *mem_ctx, nir_shader **nir_out)
93 {
94    VK_FROM_HANDLE(vk_shader_module, module, info->module);
95    const gl_shader_stage stage = vk_to_mesa_shader_stage(info->stage);
96 
97    assert(info->sType == VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO);
98 
99    nir_shader *builtin_nir = get_builtin_nir(info);
100    if (builtin_nir != NULL) {
101       nir_validate_shader(builtin_nir, "internal shader");
102 
103       nir_shader *clone = nir_shader_clone(mem_ctx, builtin_nir);
104       if (clone == NULL)
105          return vk_error(device, VK_ERROR_OUT_OF_HOST_MEMORY);
106 
107       assert(clone->options == NULL || clone->options == nir_options);
108       clone->options = nir_options;
109 
110       *nir_out = clone;
111       return VK_SUCCESS;
112    }
113 
114    const uint32_t *spirv_data;
115    uint32_t spirv_size;
116    if (module != NULL) {
117       spirv_data = (uint32_t *)module->data;
118       spirv_size = module->size;
119    } else {
120       const VkShaderModuleCreateInfo *minfo =
121          vk_find_struct_const(info->pNext, SHADER_MODULE_CREATE_INFO);
122       if (unlikely(minfo == NULL)) {
123          return vk_errorf(device, VK_ERROR_UNKNOWN,
124                           "No shader module provided");
125       }
126       spirv_data = minfo->pCode;
127       spirv_size = minfo->codeSize;
128    }
129 
130    enum gl_subgroup_size subgroup_size;
131    uint32_t req_subgroup_size = get_required_subgroup_size(info);
132    if (req_subgroup_size > 0) {
133       assert(util_is_power_of_two_nonzero(req_subgroup_size));
134       assert(req_subgroup_size >= 8 && req_subgroup_size <= 128);
135       subgroup_size = req_subgroup_size;
136    } else if (info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_ALLOW_VARYING_SUBGROUP_SIZE_BIT ||
137               vk_spirv_version(spirv_data, spirv_size) >= 0x10600) {
138       /* Starting with SPIR-V 1.6, varying subgroup size the default */
139       subgroup_size = SUBGROUP_SIZE_VARYING;
140    } else if (info->flags & VK_PIPELINE_SHADER_STAGE_CREATE_REQUIRE_FULL_SUBGROUPS_BIT) {
141       assert(stage == MESA_SHADER_COMPUTE);
142       subgroup_size = SUBGROUP_SIZE_FULL_SUBGROUPS;
143    } else {
144       subgroup_size = SUBGROUP_SIZE_API_CONSTANT;
145    }
146 
147    nir_shader *nir = vk_spirv_to_nir(device, spirv_data, spirv_size, stage,
148                                      info->pName, subgroup_size,
149                                      info->pSpecializationInfo,
150                                      spirv_options, nir_options, mem_ctx);
151    if (nir == NULL)
152       return vk_errorf(device, VK_ERROR_UNKNOWN, "spirv_to_nir failed");
153 
154    *nir_out = nir;
155 
156    return VK_SUCCESS;
157 }
158 
159 void
vk_pipeline_hash_shader_stage(const VkPipelineShaderStageCreateInfo * info,const struct vk_pipeline_robustness_state * rstate,unsigned char * stage_sha1)160 vk_pipeline_hash_shader_stage(const VkPipelineShaderStageCreateInfo *info,
161                               const struct vk_pipeline_robustness_state *rstate,
162                               unsigned char *stage_sha1)
163 {
164    VK_FROM_HANDLE(vk_shader_module, module, info->module);
165 
166    const nir_shader *builtin_nir = get_builtin_nir(info);
167    if (builtin_nir != NULL) {
168       /* Internal NIR module: serialize and hash the NIR shader.
169        * We don't need to hash other info fields since they should match the
170        * NIR data.
171        */
172       struct blob blob;
173 
174       blob_init(&blob);
175       nir_serialize(&blob, builtin_nir, false);
176       assert(!blob.out_of_memory);
177       _mesa_sha1_compute(blob.data, blob.size, stage_sha1);
178       blob_finish(&blob);
179       return;
180    }
181 
182    const VkShaderModuleCreateInfo *minfo =
183       vk_find_struct_const(info->pNext, SHADER_MODULE_CREATE_INFO);
184    const VkPipelineShaderStageModuleIdentifierCreateInfoEXT *iinfo =
185       vk_find_struct_const(info->pNext, PIPELINE_SHADER_STAGE_MODULE_IDENTIFIER_CREATE_INFO_EXT);
186 
187    struct mesa_sha1 ctx;
188 
189    _mesa_sha1_init(&ctx);
190 
191    _mesa_sha1_update(&ctx, &info->flags, sizeof(info->flags));
192 
193    assert(util_bitcount(info->stage) == 1);
194    _mesa_sha1_update(&ctx, &info->stage, sizeof(info->stage));
195 
196    if (module) {
197       _mesa_sha1_update(&ctx, module->hash, sizeof(module->hash));
198    } else if (minfo) {
199       blake3_hash spirv_hash;
200 
201       _mesa_blake3_compute(minfo->pCode, minfo->codeSize, spirv_hash);
202       _mesa_sha1_update(&ctx, spirv_hash, sizeof(spirv_hash));
203    } else {
204       /* It is legal to pass in arbitrary identifiers as long as they don't exceed
205        * the limit. Shaders with bogus identifiers are more or less guaranteed to fail. */
206       assert(iinfo);
207       assert(iinfo->identifierSize <= VK_MAX_SHADER_MODULE_IDENTIFIER_SIZE_EXT);
208       _mesa_sha1_update(&ctx, iinfo->pIdentifier, iinfo->identifierSize);
209    }
210 
211    if (rstate) {
212       _mesa_sha1_update(&ctx, &rstate->storage_buffers, sizeof(rstate->storage_buffers));
213       _mesa_sha1_update(&ctx, &rstate->uniform_buffers, sizeof(rstate->uniform_buffers));
214       _mesa_sha1_update(&ctx, &rstate->vertex_inputs, sizeof(rstate->vertex_inputs));
215       _mesa_sha1_update(&ctx, &rstate->images, sizeof(rstate->images));
216    }
217 
218    _mesa_sha1_update(&ctx, info->pName, strlen(info->pName));
219 
220    if (info->pSpecializationInfo) {
221       _mesa_sha1_update(&ctx, info->pSpecializationInfo->pMapEntries,
222                         info->pSpecializationInfo->mapEntryCount *
223                         sizeof(*info->pSpecializationInfo->pMapEntries));
224       _mesa_sha1_update(&ctx, info->pSpecializationInfo->pData,
225                         info->pSpecializationInfo->dataSize);
226    }
227 
228    uint32_t req_subgroup_size = get_required_subgroup_size(info);
229    _mesa_sha1_update(&ctx, &req_subgroup_size, sizeof(req_subgroup_size));
230 
231    _mesa_sha1_final(&ctx, stage_sha1);
232 }
233 
234 static VkPipelineRobustnessBufferBehaviorEXT
vk_device_default_robust_buffer_behavior(const struct vk_device * device)235 vk_device_default_robust_buffer_behavior(const struct vk_device *device)
236 {
237    if (device->enabled_features.robustBufferAccess2) {
238       return VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_2_EXT;
239    } else if (device->enabled_features.robustBufferAccess) {
240       return VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_ROBUST_BUFFER_ACCESS_EXT;
241    } else {
242       return VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DISABLED_EXT;
243    }
244 }
245 
246 static VkPipelineRobustnessImageBehaviorEXT
vk_device_default_robust_image_behavior(const struct vk_device * device)247 vk_device_default_robust_image_behavior(const struct vk_device *device)
248 {
249    if (device->enabled_features.robustImageAccess2) {
250       return VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_2_EXT;
251    } else if (device->enabled_features.robustImageAccess) {
252       return VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_ROBUST_IMAGE_ACCESS_EXT;
253    } else {
254       return VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DISABLED_EXT;
255    }
256 }
257 
258 void
vk_pipeline_robustness_state_fill(const struct vk_device * device,struct vk_pipeline_robustness_state * rs,const void * pipeline_pNext,const void * shader_stage_pNext)259 vk_pipeline_robustness_state_fill(const struct vk_device *device,
260                                   struct vk_pipeline_robustness_state *rs,
261                                   const void *pipeline_pNext,
262                                   const void *shader_stage_pNext)
263 {
264    rs->uniform_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
265    rs->storage_buffers = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
266    rs->vertex_inputs = VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT;
267    rs->images = VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT;
268 
269    const VkPipelineRobustnessCreateInfoEXT *shader_info =
270       vk_find_struct_const(shader_stage_pNext,
271                            PIPELINE_ROBUSTNESS_CREATE_INFO_EXT);
272    if (shader_info) {
273       rs->storage_buffers = shader_info->storageBuffers;
274       rs->uniform_buffers = shader_info->uniformBuffers;
275       rs->vertex_inputs = shader_info->vertexInputs;
276       rs->images = shader_info->images;
277    } else {
278       const VkPipelineRobustnessCreateInfoEXT *pipeline_info =
279          vk_find_struct_const(pipeline_pNext,
280                               PIPELINE_ROBUSTNESS_CREATE_INFO_EXT);
281       if (pipeline_info) {
282          rs->storage_buffers = pipeline_info->storageBuffers;
283          rs->uniform_buffers = pipeline_info->uniformBuffers;
284          rs->vertex_inputs = pipeline_info->vertexInputs;
285          rs->images = pipeline_info->images;
286       }
287    }
288 
289    if (rs->storage_buffers ==
290        VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT)
291       rs->storage_buffers = vk_device_default_robust_buffer_behavior(device);
292 
293    if (rs->uniform_buffers ==
294        VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT)
295       rs->uniform_buffers = vk_device_default_robust_buffer_behavior(device);
296 
297    if (rs->vertex_inputs ==
298        VK_PIPELINE_ROBUSTNESS_BUFFER_BEHAVIOR_DEVICE_DEFAULT_EXT)
299       rs->vertex_inputs = vk_device_default_robust_buffer_behavior(device);
300 
301    if (rs->images == VK_PIPELINE_ROBUSTNESS_IMAGE_BEHAVIOR_DEVICE_DEFAULT_EXT)
302       rs->images = vk_device_default_robust_image_behavior(device);
303 }
304