1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  *
26  */
27 
28 #include "vtn_private.h"
29 #include "nir/nir_vla.h"
30 #include "nir/nir_control_flow.h"
31 #include "nir/nir_constant_expressions.h"
32 #include "nir/nir_deref.h"
33 #include "spirv_info.h"
34 
35 #include "util/format/u_format.h"
36 #include "util/u_math.h"
37 
38 #include <stdio.h>
39 
40 void
vtn_log(struct vtn_builder * b,enum nir_spirv_debug_level level,size_t spirv_offset,const char * message)41 vtn_log(struct vtn_builder *b, enum nir_spirv_debug_level level,
42         size_t spirv_offset, const char *message)
43 {
44    if (b->options->debug.func) {
45       b->options->debug.func(b->options->debug.private_data,
46                              level, spirv_offset, message);
47    }
48 
49 #ifndef NDEBUG
50    if (level >= NIR_SPIRV_DEBUG_LEVEL_WARNING)
51       fprintf(stderr, "%s\n", message);
52 #endif
53 }
54 
55 void
vtn_logf(struct vtn_builder * b,enum nir_spirv_debug_level level,size_t spirv_offset,const char * fmt,...)56 vtn_logf(struct vtn_builder *b, enum nir_spirv_debug_level level,
57          size_t spirv_offset, const char *fmt, ...)
58 {
59    va_list args;
60    char *msg;
61 
62    va_start(args, fmt);
63    msg = ralloc_vasprintf(NULL, fmt, args);
64    va_end(args);
65 
66    vtn_log(b, level, spirv_offset, msg);
67 
68    ralloc_free(msg);
69 }
70 
71 static void
vtn_log_err(struct vtn_builder * b,enum nir_spirv_debug_level level,const char * prefix,const char * file,unsigned line,const char * fmt,va_list args)72 vtn_log_err(struct vtn_builder *b,
73             enum nir_spirv_debug_level level, const char *prefix,
74             const char *file, unsigned line,
75             const char *fmt, va_list args)
76 {
77    char *msg;
78 
79    msg = ralloc_strdup(NULL, prefix);
80 
81 #ifndef NDEBUG
82    ralloc_asprintf_append(&msg, "    In file %s:%u\n", file, line);
83 #endif
84 
85    ralloc_asprintf_append(&msg, "    ");
86 
87    ralloc_vasprintf_append(&msg, fmt, args);
88 
89    ralloc_asprintf_append(&msg, "\n    %zu bytes into the SPIR-V binary",
90                           b->spirv_offset);
91 
92    if (b->file) {
93       ralloc_asprintf_append(&msg,
94                              "\n    in SPIR-V source file %s, line %d, col %d",
95                              b->file, b->line, b->col);
96    }
97 
98    vtn_log(b, level, b->spirv_offset, msg);
99 
100    ralloc_free(msg);
101 }
102 
103 static void
vtn_dump_shader(struct vtn_builder * b,const char * path,const char * prefix)104 vtn_dump_shader(struct vtn_builder *b, const char *path, const char *prefix)
105 {
106    static int idx = 0;
107 
108    char filename[1024];
109    int len = snprintf(filename, sizeof(filename), "%s/%s-%d.spirv",
110                       path, prefix, idx++);
111    if (len < 0 || len >= sizeof(filename))
112       return;
113 
114    FILE *f = fopen(filename, "w");
115    if (f == NULL)
116       return;
117 
118    fwrite(b->spirv, sizeof(*b->spirv), b->spirv_word_count, f);
119    fclose(f);
120 
121    vtn_info("SPIR-V shader dumped to %s", filename);
122 }
123 
124 void
_vtn_warn(struct vtn_builder * b,const char * file,unsigned line,const char * fmt,...)125 _vtn_warn(struct vtn_builder *b, const char *file, unsigned line,
126           const char *fmt, ...)
127 {
128    va_list args;
129 
130    va_start(args, fmt);
131    vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_WARNING, "SPIR-V WARNING:\n",
132                file, line, fmt, args);
133    va_end(args);
134 }
135 
136 void
_vtn_err(struct vtn_builder * b,const char * file,unsigned line,const char * fmt,...)137 _vtn_err(struct vtn_builder *b, const char *file, unsigned line,
138           const char *fmt, ...)
139 {
140    va_list args;
141 
142    va_start(args, fmt);
143    vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V ERROR:\n",
144                file, line, fmt, args);
145    va_end(args);
146 }
147 
148 void
_vtn_fail(struct vtn_builder * b,const char * file,unsigned line,const char * fmt,...)149 _vtn_fail(struct vtn_builder *b, const char *file, unsigned line,
150           const char *fmt, ...)
151 {
152    va_list args;
153 
154    va_start(args, fmt);
155    vtn_log_err(b, NIR_SPIRV_DEBUG_LEVEL_ERROR, "SPIR-V parsing FAILED:\n",
156                file, line, fmt, args);
157    va_end(args);
158 
159    const char *dump_path = getenv("MESA_SPIRV_FAIL_DUMP_PATH");
160    if (dump_path)
161       vtn_dump_shader(b, dump_path, "fail");
162 
163    longjmp(b->fail_jump, 1);
164 }
165 
166 static struct vtn_ssa_value *
vtn_undef_ssa_value(struct vtn_builder * b,const struct glsl_type * type)167 vtn_undef_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
168 {
169    struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
170    val->type = glsl_get_bare_type(type);
171 
172    if (glsl_type_is_vector_or_scalar(type)) {
173       unsigned num_components = glsl_get_vector_elements(val->type);
174       unsigned bit_size = glsl_get_bit_size(val->type);
175       val->def = nir_ssa_undef(&b->nb, num_components, bit_size);
176    } else {
177       unsigned elems = glsl_get_length(val->type);
178       val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
179       if (glsl_type_is_array_or_matrix(type)) {
180          const struct glsl_type *elem_type = glsl_get_array_element(type);
181          for (unsigned i = 0; i < elems; i++)
182             val->elems[i] = vtn_undef_ssa_value(b, elem_type);
183       } else {
184          vtn_assert(glsl_type_is_struct_or_ifc(type));
185          for (unsigned i = 0; i < elems; i++) {
186             const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
187             val->elems[i] = vtn_undef_ssa_value(b, elem_type);
188          }
189       }
190    }
191 
192    return val;
193 }
194 
195 static struct vtn_ssa_value *
vtn_const_ssa_value(struct vtn_builder * b,nir_constant * constant,const struct glsl_type * type)196 vtn_const_ssa_value(struct vtn_builder *b, nir_constant *constant,
197                     const struct glsl_type *type)
198 {
199    struct hash_entry *entry = _mesa_hash_table_search(b->const_table, constant);
200 
201    if (entry)
202       return entry->data;
203 
204    struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
205    val->type = glsl_get_bare_type(type);
206 
207    if (glsl_type_is_vector_or_scalar(type)) {
208       unsigned num_components = glsl_get_vector_elements(val->type);
209       unsigned bit_size = glsl_get_bit_size(type);
210       nir_load_const_instr *load =
211          nir_load_const_instr_create(b->shader, num_components, bit_size);
212 
213       memcpy(load->value, constant->values,
214              sizeof(nir_const_value) * num_components);
215 
216       nir_instr_insert_before_cf_list(&b->nb.impl->body, &load->instr);
217       val->def = &load->def;
218    } else {
219       unsigned elems = glsl_get_length(val->type);
220       val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
221       if (glsl_type_is_array_or_matrix(type)) {
222          const struct glsl_type *elem_type = glsl_get_array_element(type);
223          for (unsigned i = 0; i < elems; i++) {
224             val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
225                                                 elem_type);
226          }
227       } else {
228          vtn_assert(glsl_type_is_struct_or_ifc(type));
229          for (unsigned i = 0; i < elems; i++) {
230             const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
231             val->elems[i] = vtn_const_ssa_value(b, constant->elements[i],
232                                                 elem_type);
233          }
234       }
235    }
236 
237    return val;
238 }
239 
240 struct vtn_ssa_value *
vtn_ssa_value(struct vtn_builder * b,uint32_t value_id)241 vtn_ssa_value(struct vtn_builder *b, uint32_t value_id)
242 {
243    struct vtn_value *val = vtn_untyped_value(b, value_id);
244    switch (val->value_type) {
245    case vtn_value_type_undef:
246       return vtn_undef_ssa_value(b, val->type->type);
247 
248    case vtn_value_type_constant:
249       return vtn_const_ssa_value(b, val->constant, val->type->type);
250 
251    case vtn_value_type_ssa:
252       return val->ssa;
253 
254    case vtn_value_type_pointer:
255       vtn_assert(val->pointer->ptr_type && val->pointer->ptr_type->type);
256       struct vtn_ssa_value *ssa =
257          vtn_create_ssa_value(b, val->pointer->ptr_type->type);
258       ssa->def = vtn_pointer_to_ssa(b, val->pointer);
259       return ssa;
260 
261    default:
262       vtn_fail("Invalid type for an SSA value");
263    }
264 }
265 
266 struct vtn_value *
vtn_push_ssa_value(struct vtn_builder * b,uint32_t value_id,struct vtn_ssa_value * ssa)267 vtn_push_ssa_value(struct vtn_builder *b, uint32_t value_id,
268                    struct vtn_ssa_value *ssa)
269 {
270    struct vtn_type *type = vtn_get_value_type(b, value_id);
271 
272    /* See vtn_create_ssa_value */
273    vtn_fail_if(ssa->type != glsl_get_bare_type(type->type),
274                "Type mismatch for SPIR-V SSA value");
275 
276    struct vtn_value *val;
277    if (type->base_type == vtn_base_type_pointer) {
278       val = vtn_push_pointer(b, value_id, vtn_pointer_from_ssa(b, ssa->def, type));
279    } else {
280       /* Don't trip the value_type_ssa check in vtn_push_value */
281       val = vtn_push_value(b, value_id, vtn_value_type_invalid);
282       val->value_type = vtn_value_type_ssa;
283       val->ssa = ssa;
284    }
285 
286    return val;
287 }
288 
289 nir_ssa_def *
vtn_get_nir_ssa(struct vtn_builder * b,uint32_t value_id)290 vtn_get_nir_ssa(struct vtn_builder *b, uint32_t value_id)
291 {
292    struct vtn_ssa_value *ssa = vtn_ssa_value(b, value_id);
293    vtn_fail_if(!glsl_type_is_vector_or_scalar(ssa->type),
294                "Expected a vector or scalar type");
295    return ssa->def;
296 }
297 
298 struct vtn_value *
vtn_push_nir_ssa(struct vtn_builder * b,uint32_t value_id,nir_ssa_def * def)299 vtn_push_nir_ssa(struct vtn_builder *b, uint32_t value_id, nir_ssa_def *def)
300 {
301    /* Types for all SPIR-V SSA values are set as part of a pre-pass so the
302     * type will be valid by the time we get here.
303     */
304    struct vtn_type *type = vtn_get_value_type(b, value_id);
305    vtn_fail_if(def->num_components != glsl_get_vector_elements(type->type) ||
306                def->bit_size != glsl_get_bit_size(type->type),
307                "Mismatch between NIR and SPIR-V type.");
308    struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
309    ssa->def = def;
310    return vtn_push_ssa_value(b, value_id, ssa);
311 }
312 
313 static enum gl_access_qualifier
spirv_to_gl_access_qualifier(struct vtn_builder * b,SpvAccessQualifier access_qualifier)314 spirv_to_gl_access_qualifier(struct vtn_builder *b,
315                              SpvAccessQualifier access_qualifier)
316 {
317    switch (access_qualifier) {
318    case SpvAccessQualifierReadOnly:
319       return ACCESS_NON_WRITEABLE;
320    case SpvAccessQualifierWriteOnly:
321       return ACCESS_NON_READABLE;
322    case SpvAccessQualifierReadWrite:
323       return 0;
324    default:
325       vtn_fail("Invalid image access qualifier");
326    }
327 }
328 
329 static nir_deref_instr *
vtn_get_image(struct vtn_builder * b,uint32_t value_id,enum gl_access_qualifier * access)330 vtn_get_image(struct vtn_builder *b, uint32_t value_id,
331               enum gl_access_qualifier *access)
332 {
333    struct vtn_type *type = vtn_get_value_type(b, value_id);
334    vtn_assert(type->base_type == vtn_base_type_image);
335    if (access)
336       *access |= spirv_to_gl_access_qualifier(b, type->access_qualifier);
337    return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
338                                nir_var_uniform, type->glsl_image, 0);
339 }
340 
341 static void
vtn_push_image(struct vtn_builder * b,uint32_t value_id,nir_deref_instr * deref,bool propagate_non_uniform)342 vtn_push_image(struct vtn_builder *b, uint32_t value_id,
343                nir_deref_instr *deref, bool propagate_non_uniform)
344 {
345    struct vtn_type *type = vtn_get_value_type(b, value_id);
346    vtn_assert(type->base_type == vtn_base_type_image);
347    struct vtn_value *value = vtn_push_nir_ssa(b, value_id, &deref->dest.ssa);
348    value->propagated_non_uniform = propagate_non_uniform;
349 }
350 
351 static nir_deref_instr *
vtn_get_sampler(struct vtn_builder * b,uint32_t value_id)352 vtn_get_sampler(struct vtn_builder *b, uint32_t value_id)
353 {
354    struct vtn_type *type = vtn_get_value_type(b, value_id);
355    vtn_assert(type->base_type == vtn_base_type_sampler);
356    return nir_build_deref_cast(&b->nb, vtn_get_nir_ssa(b, value_id),
357                                nir_var_uniform, glsl_bare_sampler_type(), 0);
358 }
359 
360 nir_ssa_def *
vtn_sampled_image_to_nir_ssa(struct vtn_builder * b,struct vtn_sampled_image si)361 vtn_sampled_image_to_nir_ssa(struct vtn_builder *b,
362                              struct vtn_sampled_image si)
363 {
364    return nir_vec2(&b->nb, &si.image->dest.ssa, &si.sampler->dest.ssa);
365 }
366 
367 static void
vtn_push_sampled_image(struct vtn_builder * b,uint32_t value_id,struct vtn_sampled_image si,bool propagate_non_uniform)368 vtn_push_sampled_image(struct vtn_builder *b, uint32_t value_id,
369                        struct vtn_sampled_image si, bool propagate_non_uniform)
370 {
371    struct vtn_type *type = vtn_get_value_type(b, value_id);
372    vtn_assert(type->base_type == vtn_base_type_sampled_image);
373    struct vtn_value *value = vtn_push_nir_ssa(b, value_id,
374                                               vtn_sampled_image_to_nir_ssa(b, si));
375    value->propagated_non_uniform = propagate_non_uniform;
376 }
377 
378 static struct vtn_sampled_image
vtn_get_sampled_image(struct vtn_builder * b,uint32_t value_id)379 vtn_get_sampled_image(struct vtn_builder *b, uint32_t value_id)
380 {
381    struct vtn_type *type = vtn_get_value_type(b, value_id);
382    vtn_assert(type->base_type == vtn_base_type_sampled_image);
383    nir_ssa_def *si_vec2 = vtn_get_nir_ssa(b, value_id);
384 
385    struct vtn_sampled_image si = { NULL, };
386    si.image = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 0),
387                                    nir_var_uniform,
388                                    type->image->glsl_image, 0);
389    si.sampler = nir_build_deref_cast(&b->nb, nir_channel(&b->nb, si_vec2, 1),
390                                      nir_var_uniform,
391                                      glsl_bare_sampler_type(), 0);
392    return si;
393 }
394 
395 static const char *
vtn_string_literal(struct vtn_builder * b,const uint32_t * words,unsigned word_count,unsigned * words_used)396 vtn_string_literal(struct vtn_builder *b, const uint32_t *words,
397                    unsigned word_count, unsigned *words_used)
398 {
399    /* From the SPIR-V spec:
400     *
401     *    "A string is interpreted as a nul-terminated stream of characters.
402     *    The character set is Unicode in the UTF-8 encoding scheme. The UTF-8
403     *    octets (8-bit bytes) are packed four per word, following the
404     *    little-endian convention (i.e., the first octet is in the
405     *    lowest-order 8 bits of the word). The final word contains the
406     *    string’s nul-termination character (0), and all contents past the
407     *    end of the string in the final word are padded with 0."
408     *
409     * On big-endian, we need to byte-swap.
410     */
411 #if UTIL_ARCH_BIG_ENDIAN
412    {
413       uint32_t *copy = ralloc_array(b, uint32_t, word_count);
414       for (unsigned i = 0; i < word_count; i++)
415          copy[i] = util_bswap32(words[i]);
416       words = copy;
417    }
418 #endif
419 
420    const char *str = (char *)words;
421    const char *end = memchr(str, 0, word_count * 4);
422    vtn_fail_if(end == NULL, "String is not null-terminated");
423 
424    if (words_used)
425       *words_used = DIV_ROUND_UP(end - str + 1, sizeof(*words));
426 
427    return str;
428 }
429 
430 const uint32_t *
vtn_foreach_instruction(struct vtn_builder * b,const uint32_t * start,const uint32_t * end,vtn_instruction_handler handler)431 vtn_foreach_instruction(struct vtn_builder *b, const uint32_t *start,
432                         const uint32_t *end, vtn_instruction_handler handler)
433 {
434    b->file = NULL;
435    b->line = -1;
436    b->col = -1;
437 
438    const uint32_t *w = start;
439    while (w < end) {
440       SpvOp opcode = w[0] & SpvOpCodeMask;
441       unsigned count = w[0] >> SpvWordCountShift;
442       vtn_assert(count >= 1 && w + count <= end);
443 
444       b->spirv_offset = (uint8_t *)w - (uint8_t *)b->spirv;
445 
446       switch (opcode) {
447       case SpvOpNop:
448          break; /* Do nothing */
449 
450       case SpvOpLine:
451          b->file = vtn_value(b, w[1], vtn_value_type_string)->str;
452          b->line = w[2];
453          b->col = w[3];
454          break;
455 
456       case SpvOpNoLine:
457          b->file = NULL;
458          b->line = -1;
459          b->col = -1;
460          break;
461 
462       default:
463          if (!handler(b, opcode, w, count))
464             return w;
465          break;
466       }
467 
468       w += count;
469    }
470 
471    b->spirv_offset = 0;
472    b->file = NULL;
473    b->line = -1;
474    b->col = -1;
475 
476    assert(w == end);
477    return w;
478 }
479 
480 static bool
vtn_handle_non_semantic_instruction(struct vtn_builder * b,SpvOp ext_opcode,const uint32_t * w,unsigned count)481 vtn_handle_non_semantic_instruction(struct vtn_builder *b, SpvOp ext_opcode,
482                                     const uint32_t *w, unsigned count)
483 {
484    /* Do nothing. */
485    return true;
486 }
487 
488 static void
vtn_handle_extension(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)489 vtn_handle_extension(struct vtn_builder *b, SpvOp opcode,
490                      const uint32_t *w, unsigned count)
491 {
492    switch (opcode) {
493    case SpvOpExtInstImport: {
494       struct vtn_value *val = vtn_push_value(b, w[1], vtn_value_type_extension);
495       const char *ext = vtn_string_literal(b, &w[2], count - 2, NULL);
496       if (strcmp(ext, "GLSL.std.450") == 0) {
497          val->ext_handler = vtn_handle_glsl450_instruction;
498       } else if ((strcmp(ext, "SPV_AMD_gcn_shader") == 0)
499                 && (b->options && b->options->caps.amd_gcn_shader)) {
500          val->ext_handler = vtn_handle_amd_gcn_shader_instruction;
501       } else if ((strcmp(ext, "SPV_AMD_shader_ballot") == 0)
502                 && (b->options && b->options->caps.amd_shader_ballot)) {
503          val->ext_handler = vtn_handle_amd_shader_ballot_instruction;
504       } else if ((strcmp(ext, "SPV_AMD_shader_trinary_minmax") == 0)
505                 && (b->options && b->options->caps.amd_trinary_minmax)) {
506          val->ext_handler = vtn_handle_amd_shader_trinary_minmax_instruction;
507       } else if ((strcmp(ext, "SPV_AMD_shader_explicit_vertex_parameter") == 0)
508                 && (b->options && b->options->caps.amd_shader_explicit_vertex_parameter)) {
509          val->ext_handler = vtn_handle_amd_shader_explicit_vertex_parameter_instruction;
510       } else if (strcmp(ext, "OpenCL.std") == 0) {
511          val->ext_handler = vtn_handle_opencl_instruction;
512       } else if (strstr(ext, "NonSemantic.") == ext) {
513          val->ext_handler = vtn_handle_non_semantic_instruction;
514       } else {
515          vtn_fail("Unsupported extension: %s", ext);
516       }
517       break;
518    }
519 
520    case SpvOpExtInst: {
521       struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
522       bool handled = val->ext_handler(b, w[4], w, count);
523       vtn_assert(handled);
524       break;
525    }
526 
527    default:
528       vtn_fail_with_opcode("Unhandled opcode", opcode);
529    }
530 }
531 
532 static void
_foreach_decoration_helper(struct vtn_builder * b,struct vtn_value * base_value,int parent_member,struct vtn_value * value,vtn_decoration_foreach_cb cb,void * data)533 _foreach_decoration_helper(struct vtn_builder *b,
534                            struct vtn_value *base_value,
535                            int parent_member,
536                            struct vtn_value *value,
537                            vtn_decoration_foreach_cb cb, void *data)
538 {
539    for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
540       int member;
541       if (dec->scope == VTN_DEC_DECORATION) {
542          member = parent_member;
543       } else if (dec->scope >= VTN_DEC_STRUCT_MEMBER0) {
544          vtn_fail_if(value->value_type != vtn_value_type_type ||
545                      value->type->base_type != vtn_base_type_struct,
546                      "OpMemberDecorate and OpGroupMemberDecorate are only "
547                      "allowed on OpTypeStruct");
548          /* This means we haven't recursed yet */
549          assert(value == base_value);
550 
551          member = dec->scope - VTN_DEC_STRUCT_MEMBER0;
552 
553          vtn_fail_if(member >= base_value->type->length,
554                      "OpMemberDecorate specifies member %d but the "
555                      "OpTypeStruct has only %u members",
556                      member, base_value->type->length);
557       } else {
558          /* Not a decoration */
559          assert(dec->scope == VTN_DEC_EXECUTION_MODE);
560          continue;
561       }
562 
563       if (dec->group) {
564          assert(dec->group->value_type == vtn_value_type_decoration_group);
565          _foreach_decoration_helper(b, base_value, member, dec->group,
566                                     cb, data);
567       } else {
568          cb(b, base_value, member, dec, data);
569       }
570    }
571 }
572 
573 /** Iterates (recursively if needed) over all of the decorations on a value
574  *
575  * This function iterates over all of the decorations applied to a given
576  * value.  If it encounters a decoration group, it recurses into the group
577  * and iterates over all of those decorations as well.
578  */
579 void
vtn_foreach_decoration(struct vtn_builder * b,struct vtn_value * value,vtn_decoration_foreach_cb cb,void * data)580 vtn_foreach_decoration(struct vtn_builder *b, struct vtn_value *value,
581                        vtn_decoration_foreach_cb cb, void *data)
582 {
583    _foreach_decoration_helper(b, value, -1, value, cb, data);
584 }
585 
586 void
vtn_foreach_execution_mode(struct vtn_builder * b,struct vtn_value * value,vtn_execution_mode_foreach_cb cb,void * data)587 vtn_foreach_execution_mode(struct vtn_builder *b, struct vtn_value *value,
588                            vtn_execution_mode_foreach_cb cb, void *data)
589 {
590    for (struct vtn_decoration *dec = value->decoration; dec; dec = dec->next) {
591       if (dec->scope != VTN_DEC_EXECUTION_MODE)
592          continue;
593 
594       assert(dec->group == NULL);
595       cb(b, value, dec, data);
596    }
597 }
598 
599 void
vtn_handle_decoration(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)600 vtn_handle_decoration(struct vtn_builder *b, SpvOp opcode,
601                       const uint32_t *w, unsigned count)
602 {
603    const uint32_t *w_end = w + count;
604    const uint32_t target = w[1];
605    w += 2;
606 
607    switch (opcode) {
608    case SpvOpDecorationGroup:
609       vtn_push_value(b, target, vtn_value_type_decoration_group);
610       break;
611 
612    case SpvOpDecorate:
613    case SpvOpDecorateId:
614    case SpvOpMemberDecorate:
615    case SpvOpDecorateString:
616    case SpvOpMemberDecorateString:
617    case SpvOpExecutionMode:
618    case SpvOpExecutionModeId: {
619       struct vtn_value *val = vtn_untyped_value(b, target);
620 
621       struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
622       switch (opcode) {
623       case SpvOpDecorate:
624       case SpvOpDecorateId:
625       case SpvOpDecorateString:
626          dec->scope = VTN_DEC_DECORATION;
627          break;
628       case SpvOpMemberDecorate:
629       case SpvOpMemberDecorateString:
630          dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(w++);
631          vtn_fail_if(dec->scope < VTN_DEC_STRUCT_MEMBER0, /* overflow */
632                      "Member argument of OpMemberDecorate too large");
633          break;
634       case SpvOpExecutionMode:
635       case SpvOpExecutionModeId:
636          dec->scope = VTN_DEC_EXECUTION_MODE;
637          break;
638       default:
639          unreachable("Invalid decoration opcode");
640       }
641       dec->decoration = *(w++);
642       dec->operands = w;
643 
644       /* Link into the list */
645       dec->next = val->decoration;
646       val->decoration = dec;
647       break;
648    }
649 
650    case SpvOpGroupMemberDecorate:
651    case SpvOpGroupDecorate: {
652       struct vtn_value *group =
653          vtn_value(b, target, vtn_value_type_decoration_group);
654 
655       for (; w < w_end; w++) {
656          struct vtn_value *val = vtn_untyped_value(b, *w);
657          struct vtn_decoration *dec = rzalloc(b, struct vtn_decoration);
658 
659          dec->group = group;
660          if (opcode == SpvOpGroupDecorate) {
661             dec->scope = VTN_DEC_DECORATION;
662          } else {
663             dec->scope = VTN_DEC_STRUCT_MEMBER0 + *(++w);
664             vtn_fail_if(dec->scope < 0, /* Check for overflow */
665                         "Member argument of OpGroupMemberDecorate too large");
666          }
667 
668          /* Link into the list */
669          dec->next = val->decoration;
670          val->decoration = dec;
671       }
672       break;
673    }
674 
675    default:
676       unreachable("Unhandled opcode");
677    }
678 }
679 
680 struct member_decoration_ctx {
681    unsigned num_fields;
682    struct glsl_struct_field *fields;
683    struct vtn_type *type;
684 };
685 
686 /**
687  * Returns true if the given type contains a struct decorated Block or
688  * BufferBlock
689  */
690 bool
vtn_type_contains_block(struct vtn_builder * b,struct vtn_type * type)691 vtn_type_contains_block(struct vtn_builder *b, struct vtn_type *type)
692 {
693    switch (type->base_type) {
694    case vtn_base_type_array:
695       return vtn_type_contains_block(b, type->array_element);
696    case vtn_base_type_struct:
697       if (type->block || type->buffer_block)
698          return true;
699       for (unsigned i = 0; i < type->length; i++) {
700          if (vtn_type_contains_block(b, type->members[i]))
701             return true;
702       }
703       return false;
704    default:
705       return false;
706    }
707 }
708 
709 /** Returns true if two types are "compatible", i.e. you can do an OpLoad,
710  * OpStore, or OpCopyMemory between them without breaking anything.
711  * Technically, the SPIR-V rules require the exact same type ID but this lets
712  * us internally be a bit looser.
713  */
714 bool
vtn_types_compatible(struct vtn_builder * b,struct vtn_type * t1,struct vtn_type * t2)715 vtn_types_compatible(struct vtn_builder *b,
716                      struct vtn_type *t1, struct vtn_type *t2)
717 {
718    if (t1->id == t2->id)
719       return true;
720 
721    if (t1->base_type != t2->base_type)
722       return false;
723 
724    switch (t1->base_type) {
725    case vtn_base_type_void:
726    case vtn_base_type_scalar:
727    case vtn_base_type_vector:
728    case vtn_base_type_matrix:
729    case vtn_base_type_image:
730    case vtn_base_type_sampler:
731    case vtn_base_type_sampled_image:
732    case vtn_base_type_event:
733       return t1->type == t2->type;
734 
735    case vtn_base_type_array:
736       return t1->length == t2->length &&
737              vtn_types_compatible(b, t1->array_element, t2->array_element);
738 
739    case vtn_base_type_pointer:
740       return vtn_types_compatible(b, t1->deref, t2->deref);
741 
742    case vtn_base_type_struct:
743       if (t1->length != t2->length)
744          return false;
745 
746       for (unsigned i = 0; i < t1->length; i++) {
747          if (!vtn_types_compatible(b, t1->members[i], t2->members[i]))
748             return false;
749       }
750       return true;
751 
752    case vtn_base_type_accel_struct:
753       return true;
754 
755    case vtn_base_type_function:
756       /* This case shouldn't get hit since you can't copy around function
757        * types.  Just require them to be identical.
758        */
759       return false;
760    }
761 
762    vtn_fail("Invalid base type");
763 }
764 
765 struct vtn_type *
vtn_type_without_array(struct vtn_type * type)766 vtn_type_without_array(struct vtn_type *type)
767 {
768    while (type->base_type == vtn_base_type_array)
769       type = type->array_element;
770    return type;
771 }
772 
773 /* does a shallow copy of a vtn_type */
774 
775 static struct vtn_type *
vtn_type_copy(struct vtn_builder * b,struct vtn_type * src)776 vtn_type_copy(struct vtn_builder *b, struct vtn_type *src)
777 {
778    struct vtn_type *dest = ralloc(b, struct vtn_type);
779    *dest = *src;
780 
781    switch (src->base_type) {
782    case vtn_base_type_void:
783    case vtn_base_type_scalar:
784    case vtn_base_type_vector:
785    case vtn_base_type_matrix:
786    case vtn_base_type_array:
787    case vtn_base_type_pointer:
788    case vtn_base_type_image:
789    case vtn_base_type_sampler:
790    case vtn_base_type_sampled_image:
791    case vtn_base_type_event:
792    case vtn_base_type_accel_struct:
793       /* Nothing more to do */
794       break;
795 
796    case vtn_base_type_struct:
797       dest->members = ralloc_array(b, struct vtn_type *, src->length);
798       memcpy(dest->members, src->members,
799              src->length * sizeof(src->members[0]));
800 
801       dest->offsets = ralloc_array(b, unsigned, src->length);
802       memcpy(dest->offsets, src->offsets,
803              src->length * sizeof(src->offsets[0]));
804       break;
805 
806    case vtn_base_type_function:
807       dest->params = ralloc_array(b, struct vtn_type *, src->length);
808       memcpy(dest->params, src->params, src->length * sizeof(src->params[0]));
809       break;
810    }
811 
812    return dest;
813 }
814 
815 static const struct glsl_type *
wrap_type_in_array(const struct glsl_type * type,const struct glsl_type * array_type)816 wrap_type_in_array(const struct glsl_type *type,
817                    const struct glsl_type *array_type)
818 {
819    if (!glsl_type_is_array(array_type))
820       return type;
821 
822    const struct glsl_type *elem_type =
823       wrap_type_in_array(type, glsl_get_array_element(array_type));
824    return glsl_array_type(elem_type, glsl_get_length(array_type),
825                           glsl_get_explicit_stride(array_type));
826 }
827 
828 static bool
vtn_type_needs_explicit_layout(struct vtn_builder * b,enum vtn_variable_mode mode)829 vtn_type_needs_explicit_layout(struct vtn_builder *b, enum vtn_variable_mode mode)
830 {
831    /* For OpenCL we never want to strip the info from the types, and it makes
832     * type comparisons easier in later stages.
833     */
834    if (b->options->environment == NIR_SPIRV_OPENCL)
835       return true;
836 
837    switch (mode) {
838    case vtn_variable_mode_input:
839    case vtn_variable_mode_output:
840       /* Layout decorations kept because we need offsets for XFB arrays of
841        * blocks.
842        */
843       return b->shader->info.has_transform_feedback_varyings;
844 
845    case vtn_variable_mode_ssbo:
846    case vtn_variable_mode_phys_ssbo:
847    case vtn_variable_mode_ubo:
848    case vtn_variable_mode_push_constant:
849    case vtn_variable_mode_shader_record:
850       return true;
851 
852    default:
853       return false;
854    }
855 }
856 
857 const struct glsl_type *
vtn_type_get_nir_type(struct vtn_builder * b,struct vtn_type * type,enum vtn_variable_mode mode)858 vtn_type_get_nir_type(struct vtn_builder *b, struct vtn_type *type,
859                       enum vtn_variable_mode mode)
860 {
861    if (mode == vtn_variable_mode_atomic_counter) {
862       vtn_fail_if(glsl_without_array(type->type) != glsl_uint_type(),
863                   "Variables in the AtomicCounter storage class should be "
864                   "(possibly arrays of arrays of) uint.");
865       return wrap_type_in_array(glsl_atomic_uint_type(), type->type);
866    }
867 
868    if (mode == vtn_variable_mode_uniform) {
869       switch (type->base_type) {
870       case vtn_base_type_array: {
871          const struct glsl_type *elem_type =
872             vtn_type_get_nir_type(b, type->array_element, mode);
873 
874          return glsl_array_type(elem_type, type->length,
875                                 glsl_get_explicit_stride(type->type));
876       }
877 
878       case vtn_base_type_struct: {
879          bool need_new_struct = false;
880          const uint32_t num_fields = type->length;
881          NIR_VLA(struct glsl_struct_field, fields, num_fields);
882          for (unsigned i = 0; i < num_fields; i++) {
883             fields[i] = *glsl_get_struct_field_data(type->type, i);
884             const struct glsl_type *field_nir_type =
885                vtn_type_get_nir_type(b, type->members[i], mode);
886             if (fields[i].type != field_nir_type) {
887                fields[i].type = field_nir_type;
888                need_new_struct = true;
889             }
890          }
891          if (need_new_struct) {
892             if (glsl_type_is_interface(type->type)) {
893                return glsl_interface_type(fields, num_fields,
894                                           /* packing */ 0, false,
895                                           glsl_get_type_name(type->type));
896             } else {
897                return glsl_struct_type(fields, num_fields,
898                                        glsl_get_type_name(type->type),
899                                        glsl_struct_type_is_packed(type->type));
900             }
901          } else {
902             /* No changes, just pass it on */
903             return type->type;
904          }
905       }
906 
907       case vtn_base_type_image:
908          return type->glsl_image;
909 
910       case vtn_base_type_sampler:
911          return glsl_bare_sampler_type();
912 
913       case vtn_base_type_sampled_image:
914          return type->image->glsl_image;
915 
916       default:
917          return type->type;
918       }
919    }
920 
921    /* Layout decorations are allowed but ignored in certain conditions,
922     * to allow SPIR-V generators perform type deduplication.  Discard
923     * unnecessary ones when passing to NIR.
924     */
925    if (!vtn_type_needs_explicit_layout(b, mode))
926       return glsl_get_bare_type(type->type);
927 
928    return type->type;
929 }
930 
931 static struct vtn_type *
mutable_matrix_member(struct vtn_builder * b,struct vtn_type * type,int member)932 mutable_matrix_member(struct vtn_builder *b, struct vtn_type *type, int member)
933 {
934    type->members[member] = vtn_type_copy(b, type->members[member]);
935    type = type->members[member];
936 
937    /* We may have an array of matrices.... Oh, joy! */
938    while (glsl_type_is_array(type->type)) {
939       type->array_element = vtn_type_copy(b, type->array_element);
940       type = type->array_element;
941    }
942 
943    vtn_assert(glsl_type_is_matrix(type->type));
944 
945    return type;
946 }
947 
948 static void
vtn_handle_access_qualifier(struct vtn_builder * b,struct vtn_type * type,int member,enum gl_access_qualifier access)949 vtn_handle_access_qualifier(struct vtn_builder *b, struct vtn_type *type,
950                             int member, enum gl_access_qualifier access)
951 {
952    type->members[member] = vtn_type_copy(b, type->members[member]);
953    type = type->members[member];
954 
955    type->access |= access;
956 }
957 
958 static void
array_stride_decoration_cb(struct vtn_builder * b,struct vtn_value * val,int member,const struct vtn_decoration * dec,void * void_ctx)959 array_stride_decoration_cb(struct vtn_builder *b,
960                            struct vtn_value *val, int member,
961                            const struct vtn_decoration *dec, void *void_ctx)
962 {
963    struct vtn_type *type = val->type;
964 
965    if (dec->decoration == SpvDecorationArrayStride) {
966       if (vtn_type_contains_block(b, type)) {
967          vtn_warn("The ArrayStride decoration cannot be applied to an array "
968                   "type which contains a structure type decorated Block "
969                   "or BufferBlock");
970          /* Ignore the decoration */
971       } else {
972          vtn_fail_if(dec->operands[0] == 0, "ArrayStride must be non-zero");
973          type->stride = dec->operands[0];
974       }
975    }
976 }
977 
978 static void
struct_member_decoration_cb(struct vtn_builder * b,UNUSED struct vtn_value * val,int member,const struct vtn_decoration * dec,void * void_ctx)979 struct_member_decoration_cb(struct vtn_builder *b,
980                             UNUSED struct vtn_value *val, int member,
981                             const struct vtn_decoration *dec, void *void_ctx)
982 {
983    struct member_decoration_ctx *ctx = void_ctx;
984 
985    if (member < 0)
986       return;
987 
988    assert(member < ctx->num_fields);
989 
990    switch (dec->decoration) {
991    case SpvDecorationRelaxedPrecision:
992    case SpvDecorationUniform:
993    case SpvDecorationUniformId:
994       break; /* FIXME: Do nothing with this for now. */
995    case SpvDecorationNonWritable:
996       vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_WRITEABLE);
997       break;
998    case SpvDecorationNonReadable:
999       vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_NON_READABLE);
1000       break;
1001    case SpvDecorationVolatile:
1002       vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_VOLATILE);
1003       break;
1004    case SpvDecorationCoherent:
1005       vtn_handle_access_qualifier(b, ctx->type, member, ACCESS_COHERENT);
1006       break;
1007    case SpvDecorationNoPerspective:
1008       ctx->fields[member].interpolation = INTERP_MODE_NOPERSPECTIVE;
1009       break;
1010    case SpvDecorationFlat:
1011       ctx->fields[member].interpolation = INTERP_MODE_FLAT;
1012       break;
1013    case SpvDecorationExplicitInterpAMD:
1014       ctx->fields[member].interpolation = INTERP_MODE_EXPLICIT;
1015       break;
1016    case SpvDecorationCentroid:
1017       ctx->fields[member].centroid = true;
1018       break;
1019    case SpvDecorationSample:
1020       ctx->fields[member].sample = true;
1021       break;
1022    case SpvDecorationStream:
1023       /* This is handled later by var_decoration_cb in vtn_variables.c */
1024       break;
1025    case SpvDecorationLocation:
1026       ctx->fields[member].location = dec->operands[0];
1027       break;
1028    case SpvDecorationComponent:
1029       break; /* FIXME: What should we do with these? */
1030    case SpvDecorationBuiltIn:
1031       ctx->type->members[member] = vtn_type_copy(b, ctx->type->members[member]);
1032       ctx->type->members[member]->is_builtin = true;
1033       ctx->type->members[member]->builtin = dec->operands[0];
1034       ctx->type->builtin_block = true;
1035       break;
1036    case SpvDecorationOffset:
1037       ctx->type->offsets[member] = dec->operands[0];
1038       ctx->fields[member].offset = dec->operands[0];
1039       break;
1040    case SpvDecorationMatrixStride:
1041       /* Handled as a second pass */
1042       break;
1043    case SpvDecorationColMajor:
1044       break; /* Nothing to do here.  Column-major is the default. */
1045    case SpvDecorationRowMajor:
1046       mutable_matrix_member(b, ctx->type, member)->row_major = true;
1047       break;
1048 
1049    case SpvDecorationPatch:
1050       break;
1051 
1052    case SpvDecorationSpecId:
1053    case SpvDecorationBlock:
1054    case SpvDecorationBufferBlock:
1055    case SpvDecorationArrayStride:
1056    case SpvDecorationGLSLShared:
1057    case SpvDecorationGLSLPacked:
1058    case SpvDecorationInvariant:
1059    case SpvDecorationRestrict:
1060    case SpvDecorationAliased:
1061    case SpvDecorationConstant:
1062    case SpvDecorationIndex:
1063    case SpvDecorationBinding:
1064    case SpvDecorationDescriptorSet:
1065    case SpvDecorationLinkageAttributes:
1066    case SpvDecorationNoContraction:
1067    case SpvDecorationInputAttachmentIndex:
1068    case SpvDecorationCPacked:
1069       vtn_warn("Decoration not allowed on struct members: %s",
1070                spirv_decoration_to_string(dec->decoration));
1071       break;
1072 
1073    case SpvDecorationXfbBuffer:
1074    case SpvDecorationXfbStride:
1075       /* This is handled later by var_decoration_cb in vtn_variables.c */
1076       break;
1077 
1078    case SpvDecorationSaturatedConversion:
1079    case SpvDecorationFuncParamAttr:
1080    case SpvDecorationFPRoundingMode:
1081    case SpvDecorationFPFastMathMode:
1082    case SpvDecorationAlignment:
1083       if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1084          vtn_warn("Decoration only allowed for CL-style kernels: %s",
1085                   spirv_decoration_to_string(dec->decoration));
1086       }
1087       break;
1088 
1089    case SpvDecorationUserSemantic:
1090    case SpvDecorationUserTypeGOOGLE:
1091       /* User semantic decorations can safely be ignored by the driver. */
1092       break;
1093 
1094    default:
1095       vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1096    }
1097 }
1098 
1099 /** Chases the array type all the way down to the tail and rewrites the
1100  * glsl_types to be based off the tail's glsl_type.
1101  */
1102 static void
vtn_array_type_rewrite_glsl_type(struct vtn_type * type)1103 vtn_array_type_rewrite_glsl_type(struct vtn_type *type)
1104 {
1105    if (type->base_type != vtn_base_type_array)
1106       return;
1107 
1108    vtn_array_type_rewrite_glsl_type(type->array_element);
1109 
1110    type->type = glsl_array_type(type->array_element->type,
1111                                 type->length, type->stride);
1112 }
1113 
1114 /* Matrix strides are handled as a separate pass because we need to know
1115  * whether the matrix is row-major or not first.
1116  */
1117 static void
struct_member_matrix_stride_cb(struct vtn_builder * b,UNUSED struct vtn_value * val,int member,const struct vtn_decoration * dec,void * void_ctx)1118 struct_member_matrix_stride_cb(struct vtn_builder *b,
1119                                UNUSED struct vtn_value *val, int member,
1120                                const struct vtn_decoration *dec,
1121                                void *void_ctx)
1122 {
1123    if (dec->decoration != SpvDecorationMatrixStride)
1124       return;
1125 
1126    vtn_fail_if(member < 0,
1127                "The MatrixStride decoration is only allowed on members "
1128                "of OpTypeStruct");
1129    vtn_fail_if(dec->operands[0] == 0, "MatrixStride must be non-zero");
1130 
1131    struct member_decoration_ctx *ctx = void_ctx;
1132 
1133    struct vtn_type *mat_type = mutable_matrix_member(b, ctx->type, member);
1134    if (mat_type->row_major) {
1135       mat_type->array_element = vtn_type_copy(b, mat_type->array_element);
1136       mat_type->stride = mat_type->array_element->stride;
1137       mat_type->array_element->stride = dec->operands[0];
1138 
1139       mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1140                                                  dec->operands[0], true);
1141       mat_type->array_element->type = glsl_get_column_type(mat_type->type);
1142    } else {
1143       vtn_assert(mat_type->array_element->stride > 0);
1144       mat_type->stride = dec->operands[0];
1145 
1146       mat_type->type = glsl_explicit_matrix_type(mat_type->type,
1147                                                  dec->operands[0], false);
1148    }
1149 
1150    /* Now that we've replaced the glsl_type with a properly strided matrix
1151     * type, rewrite the member type so that it's an array of the proper kind
1152     * of glsl_type.
1153     */
1154    vtn_array_type_rewrite_glsl_type(ctx->type->members[member]);
1155    ctx->fields[member].type = ctx->type->members[member]->type;
1156 }
1157 
1158 static void
struct_packed_decoration_cb(struct vtn_builder * b,struct vtn_value * val,int member,const struct vtn_decoration * dec,void * void_ctx)1159 struct_packed_decoration_cb(struct vtn_builder *b,
1160                             struct vtn_value *val, int member,
1161                             const struct vtn_decoration *dec, void *void_ctx)
1162 {
1163    vtn_assert(val->type->base_type == vtn_base_type_struct);
1164    if (dec->decoration == SpvDecorationCPacked) {
1165       if (b->shader->info.stage != MESA_SHADER_KERNEL) {
1166          vtn_warn("Decoration only allowed for CL-style kernels: %s",
1167                   spirv_decoration_to_string(dec->decoration));
1168       }
1169       val->type->packed = true;
1170    }
1171 }
1172 
1173 static void
struct_block_decoration_cb(struct vtn_builder * b,struct vtn_value * val,int member,const struct vtn_decoration * dec,void * ctx)1174 struct_block_decoration_cb(struct vtn_builder *b,
1175                            struct vtn_value *val, int member,
1176                            const struct vtn_decoration *dec, void *ctx)
1177 {
1178    if (member != -1)
1179       return;
1180 
1181    struct vtn_type *type = val->type;
1182    if (dec->decoration == SpvDecorationBlock)
1183       type->block = true;
1184    else if (dec->decoration == SpvDecorationBufferBlock)
1185       type->buffer_block = true;
1186 }
1187 
1188 static void
type_decoration_cb(struct vtn_builder * b,struct vtn_value * val,int member,const struct vtn_decoration * dec,UNUSED void * ctx)1189 type_decoration_cb(struct vtn_builder *b,
1190                    struct vtn_value *val, int member,
1191                    const struct vtn_decoration *dec, UNUSED void *ctx)
1192 {
1193    struct vtn_type *type = val->type;
1194 
1195    if (member != -1) {
1196       /* This should have been handled by OpTypeStruct */
1197       assert(val->type->base_type == vtn_base_type_struct);
1198       assert(member >= 0 && member < val->type->length);
1199       return;
1200    }
1201 
1202    switch (dec->decoration) {
1203    case SpvDecorationArrayStride:
1204       vtn_assert(type->base_type == vtn_base_type_array ||
1205                  type->base_type == vtn_base_type_pointer);
1206       break;
1207    case SpvDecorationBlock:
1208       vtn_assert(type->base_type == vtn_base_type_struct);
1209       vtn_assert(type->block);
1210       break;
1211    case SpvDecorationBufferBlock:
1212       vtn_assert(type->base_type == vtn_base_type_struct);
1213       vtn_assert(type->buffer_block);
1214       break;
1215    case SpvDecorationGLSLShared:
1216    case SpvDecorationGLSLPacked:
1217       /* Ignore these, since we get explicit offsets anyways */
1218       break;
1219 
1220    case SpvDecorationRowMajor:
1221    case SpvDecorationColMajor:
1222    case SpvDecorationMatrixStride:
1223    case SpvDecorationBuiltIn:
1224    case SpvDecorationNoPerspective:
1225    case SpvDecorationFlat:
1226    case SpvDecorationPatch:
1227    case SpvDecorationCentroid:
1228    case SpvDecorationSample:
1229    case SpvDecorationExplicitInterpAMD:
1230    case SpvDecorationVolatile:
1231    case SpvDecorationCoherent:
1232    case SpvDecorationNonWritable:
1233    case SpvDecorationNonReadable:
1234    case SpvDecorationUniform:
1235    case SpvDecorationUniformId:
1236    case SpvDecorationLocation:
1237    case SpvDecorationComponent:
1238    case SpvDecorationOffset:
1239    case SpvDecorationXfbBuffer:
1240    case SpvDecorationXfbStride:
1241    case SpvDecorationUserSemantic:
1242       vtn_warn("Decoration only allowed for struct members: %s",
1243                spirv_decoration_to_string(dec->decoration));
1244       break;
1245 
1246    case SpvDecorationStream:
1247       /* We don't need to do anything here, as stream is filled up when
1248        * aplying the decoration to a variable, just check that if it is not a
1249        * struct member, it should be a struct.
1250        */
1251       vtn_assert(type->base_type == vtn_base_type_struct);
1252       break;
1253 
1254    case SpvDecorationRelaxedPrecision:
1255    case SpvDecorationSpecId:
1256    case SpvDecorationInvariant:
1257    case SpvDecorationRestrict:
1258    case SpvDecorationAliased:
1259    case SpvDecorationConstant:
1260    case SpvDecorationIndex:
1261    case SpvDecorationBinding:
1262    case SpvDecorationDescriptorSet:
1263    case SpvDecorationLinkageAttributes:
1264    case SpvDecorationNoContraction:
1265    case SpvDecorationInputAttachmentIndex:
1266       vtn_warn("Decoration not allowed on types: %s",
1267                spirv_decoration_to_string(dec->decoration));
1268       break;
1269 
1270    case SpvDecorationCPacked:
1271       /* Handled when parsing a struct type, nothing to do here. */
1272       break;
1273 
1274    case SpvDecorationSaturatedConversion:
1275    case SpvDecorationFuncParamAttr:
1276    case SpvDecorationFPRoundingMode:
1277    case SpvDecorationFPFastMathMode:
1278    case SpvDecorationAlignment:
1279       vtn_warn("Decoration only allowed for CL-style kernels: %s",
1280                spirv_decoration_to_string(dec->decoration));
1281       break;
1282 
1283    case SpvDecorationUserTypeGOOGLE:
1284       /* User semantic decorations can safely be ignored by the driver. */
1285       break;
1286 
1287    default:
1288       vtn_fail_with_decoration("Unhandled decoration", dec->decoration);
1289    }
1290 }
1291 
1292 static unsigned
translate_image_format(struct vtn_builder * b,SpvImageFormat format)1293 translate_image_format(struct vtn_builder *b, SpvImageFormat format)
1294 {
1295    switch (format) {
1296    case SpvImageFormatUnknown:      return PIPE_FORMAT_NONE;
1297    case SpvImageFormatRgba32f:      return PIPE_FORMAT_R32G32B32A32_FLOAT;
1298    case SpvImageFormatRgba16f:      return PIPE_FORMAT_R16G16B16A16_FLOAT;
1299    case SpvImageFormatR32f:         return PIPE_FORMAT_R32_FLOAT;
1300    case SpvImageFormatRgba8:        return PIPE_FORMAT_R8G8B8A8_UNORM;
1301    case SpvImageFormatRgba8Snorm:   return PIPE_FORMAT_R8G8B8A8_SNORM;
1302    case SpvImageFormatRg32f:        return PIPE_FORMAT_R32G32_FLOAT;
1303    case SpvImageFormatRg16f:        return PIPE_FORMAT_R16G16_FLOAT;
1304    case SpvImageFormatR11fG11fB10f: return PIPE_FORMAT_R11G11B10_FLOAT;
1305    case SpvImageFormatR16f:         return PIPE_FORMAT_R16_FLOAT;
1306    case SpvImageFormatRgba16:       return PIPE_FORMAT_R16G16B16A16_UNORM;
1307    case SpvImageFormatRgb10A2:      return PIPE_FORMAT_R10G10B10A2_UNORM;
1308    case SpvImageFormatRg16:         return PIPE_FORMAT_R16G16_UNORM;
1309    case SpvImageFormatRg8:          return PIPE_FORMAT_R8G8_UNORM;
1310    case SpvImageFormatR16:          return PIPE_FORMAT_R16_UNORM;
1311    case SpvImageFormatR8:           return PIPE_FORMAT_R8_UNORM;
1312    case SpvImageFormatRgba16Snorm:  return PIPE_FORMAT_R16G16B16A16_SNORM;
1313    case SpvImageFormatRg16Snorm:    return PIPE_FORMAT_R16G16_SNORM;
1314    case SpvImageFormatRg8Snorm:     return PIPE_FORMAT_R8G8_SNORM;
1315    case SpvImageFormatR16Snorm:     return PIPE_FORMAT_R16_SNORM;
1316    case SpvImageFormatR8Snorm:      return PIPE_FORMAT_R8_SNORM;
1317    case SpvImageFormatRgba32i:      return PIPE_FORMAT_R32G32B32A32_SINT;
1318    case SpvImageFormatRgba16i:      return PIPE_FORMAT_R16G16B16A16_SINT;
1319    case SpvImageFormatRgba8i:       return PIPE_FORMAT_R8G8B8A8_SINT;
1320    case SpvImageFormatR32i:         return PIPE_FORMAT_R32_SINT;
1321    case SpvImageFormatRg32i:        return PIPE_FORMAT_R32G32_SINT;
1322    case SpvImageFormatRg16i:        return PIPE_FORMAT_R16G16_SINT;
1323    case SpvImageFormatRg8i:         return PIPE_FORMAT_R8G8_SINT;
1324    case SpvImageFormatR16i:         return PIPE_FORMAT_R16_SINT;
1325    case SpvImageFormatR8i:          return PIPE_FORMAT_R8_SINT;
1326    case SpvImageFormatRgba32ui:     return PIPE_FORMAT_R32G32B32A32_UINT;
1327    case SpvImageFormatRgba16ui:     return PIPE_FORMAT_R16G16B16A16_UINT;
1328    case SpvImageFormatRgba8ui:      return PIPE_FORMAT_R8G8B8A8_UINT;
1329    case SpvImageFormatR32ui:        return PIPE_FORMAT_R32_UINT;
1330    case SpvImageFormatRgb10a2ui:    return PIPE_FORMAT_R10G10B10A2_UINT;
1331    case SpvImageFormatRg32ui:       return PIPE_FORMAT_R32G32_UINT;
1332    case SpvImageFormatRg16ui:       return PIPE_FORMAT_R16G16_UINT;
1333    case SpvImageFormatRg8ui:        return PIPE_FORMAT_R8G8_UINT;
1334    case SpvImageFormatR16ui:        return PIPE_FORMAT_R16_UINT;
1335    case SpvImageFormatR8ui:         return PIPE_FORMAT_R8_UINT;
1336    case SpvImageFormatR64ui:        return PIPE_FORMAT_R64_UINT;
1337    case SpvImageFormatR64i:         return PIPE_FORMAT_R64_SINT;
1338    default:
1339       vtn_fail("Invalid image format: %s (%u)",
1340                spirv_imageformat_to_string(format), format);
1341    }
1342 }
1343 
1344 static void
vtn_handle_type(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)1345 vtn_handle_type(struct vtn_builder *b, SpvOp opcode,
1346                 const uint32_t *w, unsigned count)
1347 {
1348    struct vtn_value *val = NULL;
1349 
1350    /* In order to properly handle forward declarations, we have to defer
1351     * allocation for pointer types.
1352     */
1353    if (opcode != SpvOpTypePointer && opcode != SpvOpTypeForwardPointer) {
1354       val = vtn_push_value(b, w[1], vtn_value_type_type);
1355       vtn_fail_if(val->type != NULL,
1356                   "Only pointers can have forward declarations");
1357       val->type = rzalloc(b, struct vtn_type);
1358       val->type->id = w[1];
1359    }
1360 
1361    switch (opcode) {
1362    case SpvOpTypeVoid:
1363       val->type->base_type = vtn_base_type_void;
1364       val->type->type = glsl_void_type();
1365       break;
1366    case SpvOpTypeBool:
1367       val->type->base_type = vtn_base_type_scalar;
1368       val->type->type = glsl_bool_type();
1369       val->type->length = 1;
1370       break;
1371    case SpvOpTypeInt: {
1372       int bit_size = w[2];
1373       const bool signedness = w[3];
1374       vtn_fail_if(bit_size != 8 && bit_size != 16 &&
1375                   bit_size != 32 && bit_size != 64,
1376                   "Invalid int bit size: %u", bit_size);
1377       val->type->base_type = vtn_base_type_scalar;
1378       val->type->type = signedness ? glsl_intN_t_type(bit_size) :
1379                                      glsl_uintN_t_type(bit_size);
1380       val->type->length = 1;
1381       break;
1382    }
1383 
1384    case SpvOpTypeFloat: {
1385       int bit_size = w[2];
1386       val->type->base_type = vtn_base_type_scalar;
1387       vtn_fail_if(bit_size != 16 && bit_size != 32 && bit_size != 64,
1388                   "Invalid float bit size: %u", bit_size);
1389       val->type->type = glsl_floatN_t_type(bit_size);
1390       val->type->length = 1;
1391       break;
1392    }
1393 
1394    case SpvOpTypeVector: {
1395       struct vtn_type *base = vtn_get_type(b, w[2]);
1396       unsigned elems = w[3];
1397 
1398       vtn_fail_if(base->base_type != vtn_base_type_scalar,
1399                   "Base type for OpTypeVector must be a scalar");
1400       vtn_fail_if((elems < 2 || elems > 4) && (elems != 8) && (elems != 16),
1401                   "Invalid component count for OpTypeVector");
1402 
1403       val->type->base_type = vtn_base_type_vector;
1404       val->type->type = glsl_vector_type(glsl_get_base_type(base->type), elems);
1405       val->type->length = elems;
1406       val->type->stride = glsl_type_is_boolean(val->type->type)
1407          ? 4 : glsl_get_bit_size(base->type) / 8;
1408       val->type->array_element = base;
1409       break;
1410    }
1411 
1412    case SpvOpTypeMatrix: {
1413       struct vtn_type *base = vtn_get_type(b, w[2]);
1414       unsigned columns = w[3];
1415 
1416       vtn_fail_if(base->base_type != vtn_base_type_vector,
1417                   "Base type for OpTypeMatrix must be a vector");
1418       vtn_fail_if(columns < 2 || columns > 4,
1419                   "Invalid column count for OpTypeMatrix");
1420 
1421       val->type->base_type = vtn_base_type_matrix;
1422       val->type->type = glsl_matrix_type(glsl_get_base_type(base->type),
1423                                          glsl_get_vector_elements(base->type),
1424                                          columns);
1425       vtn_fail_if(glsl_type_is_error(val->type->type),
1426                   "Unsupported base type for OpTypeMatrix");
1427       assert(!glsl_type_is_error(val->type->type));
1428       val->type->length = columns;
1429       val->type->array_element = base;
1430       val->type->row_major = false;
1431       val->type->stride = 0;
1432       break;
1433    }
1434 
1435    case SpvOpTypeRuntimeArray:
1436    case SpvOpTypeArray: {
1437       struct vtn_type *array_element = vtn_get_type(b, w[2]);
1438 
1439       if (opcode == SpvOpTypeRuntimeArray) {
1440          /* A length of 0 is used to denote unsized arrays */
1441          val->type->length = 0;
1442       } else {
1443          val->type->length = vtn_constant_uint(b, w[3]);
1444       }
1445 
1446       val->type->base_type = vtn_base_type_array;
1447       val->type->array_element = array_element;
1448 
1449       vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1450       val->type->type = glsl_array_type(array_element->type, val->type->length,
1451                                         val->type->stride);
1452       break;
1453    }
1454 
1455    case SpvOpTypeStruct: {
1456       unsigned num_fields = count - 2;
1457       val->type->base_type = vtn_base_type_struct;
1458       val->type->length = num_fields;
1459       val->type->members = ralloc_array(b, struct vtn_type *, num_fields);
1460       val->type->offsets = ralloc_array(b, unsigned, num_fields);
1461       val->type->packed = false;
1462 
1463       NIR_VLA(struct glsl_struct_field, fields, count);
1464       for (unsigned i = 0; i < num_fields; i++) {
1465          val->type->members[i] = vtn_get_type(b, w[i + 2]);
1466          fields[i] = (struct glsl_struct_field) {
1467             .type = val->type->members[i]->type,
1468             .name = ralloc_asprintf(b, "field%d", i),
1469             .location = -1,
1470             .offset = -1,
1471          };
1472       }
1473 
1474       vtn_foreach_decoration(b, val, struct_packed_decoration_cb, NULL);
1475 
1476       struct member_decoration_ctx ctx = {
1477          .num_fields = num_fields,
1478          .fields = fields,
1479          .type = val->type
1480       };
1481 
1482       vtn_foreach_decoration(b, val, struct_member_decoration_cb, &ctx);
1483       vtn_foreach_decoration(b, val, struct_member_matrix_stride_cb, &ctx);
1484 
1485       vtn_foreach_decoration(b, val, struct_block_decoration_cb, NULL);
1486 
1487       const char *name = val->name;
1488 
1489       if (val->type->block || val->type->buffer_block) {
1490          /* Packing will be ignored since types coming from SPIR-V are
1491           * explicitly laid out.
1492           */
1493          val->type->type = glsl_interface_type(fields, num_fields,
1494                                                /* packing */ 0, false,
1495                                                name ? name : "block");
1496       } else {
1497          val->type->type = glsl_struct_type(fields, num_fields,
1498                                             name ? name : "struct",
1499                                             val->type->packed);
1500       }
1501       break;
1502    }
1503 
1504    case SpvOpTypeFunction: {
1505       val->type->base_type = vtn_base_type_function;
1506       val->type->type = NULL;
1507 
1508       val->type->return_type = vtn_get_type(b, w[2]);
1509 
1510       const unsigned num_params = count - 3;
1511       val->type->length = num_params;
1512       val->type->params = ralloc_array(b, struct vtn_type *, num_params);
1513       for (unsigned i = 0; i < count - 3; i++) {
1514          val->type->params[i] = vtn_get_type(b, w[i + 3]);
1515       }
1516       break;
1517    }
1518 
1519    case SpvOpTypePointer:
1520    case SpvOpTypeForwardPointer: {
1521       /* We can't blindly push the value because it might be a forward
1522        * declaration.
1523        */
1524       val = vtn_untyped_value(b, w[1]);
1525 
1526       SpvStorageClass storage_class = w[2];
1527 
1528       vtn_fail_if(opcode == SpvOpTypeForwardPointer &&
1529                   b->shader->info.stage != MESA_SHADER_KERNEL &&
1530                   storage_class != SpvStorageClassPhysicalStorageBuffer,
1531                   "OpTypeForwardPointer is only allowed in Vulkan with "
1532                   "the PhysicalStorageBuffer storage class");
1533 
1534       struct vtn_type *deref_type = NULL;
1535       if (opcode == SpvOpTypePointer)
1536          deref_type = vtn_get_type(b, w[3]);
1537 
1538       if (val->value_type == vtn_value_type_invalid) {
1539          val->value_type = vtn_value_type_type;
1540          val->type = rzalloc(b, struct vtn_type);
1541          val->type->id = w[1];
1542          val->type->base_type = vtn_base_type_pointer;
1543          val->type->storage_class = storage_class;
1544 
1545          /* These can actually be stored to nir_variables and used as SSA
1546           * values so they need a real glsl_type.
1547           */
1548          enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1549             b, storage_class, deref_type, NULL);
1550 
1551          /* The deref type should only matter for the UniformConstant storage
1552           * class.  In particular, it should never matter for any storage
1553           * classes that are allowed in combination with OpTypeForwardPointer.
1554           */
1555          if (storage_class != SpvStorageClassUniform &&
1556              storage_class != SpvStorageClassUniformConstant) {
1557             assert(mode == vtn_storage_class_to_mode(b, storage_class,
1558                                                      NULL, NULL));
1559          }
1560 
1561          val->type->type = nir_address_format_to_glsl_type(
1562             vtn_mode_to_address_format(b, mode));
1563       } else {
1564          vtn_fail_if(val->type->storage_class != storage_class,
1565                      "The storage classes of an OpTypePointer and any "
1566                      "OpTypeForwardPointers that provide forward "
1567                      "declarations of it must match.");
1568       }
1569 
1570       if (opcode == SpvOpTypePointer) {
1571          vtn_fail_if(val->type->deref != NULL,
1572                      "While OpTypeForwardPointer can be used to provide a "
1573                      "forward declaration of a pointer, OpTypePointer can "
1574                      "only be used once for a given id.");
1575 
1576          val->type->deref = deref_type;
1577 
1578          /* Only certain storage classes use ArrayStride.  The others (in
1579           * particular Workgroup) are expected to be laid out by the driver.
1580           */
1581          switch (storage_class) {
1582          case SpvStorageClassUniform:
1583          case SpvStorageClassPushConstant:
1584          case SpvStorageClassStorageBuffer:
1585          case SpvStorageClassPhysicalStorageBuffer:
1586             vtn_foreach_decoration(b, val, array_stride_decoration_cb, NULL);
1587             break;
1588          default:
1589             /* Nothing to do. */
1590             break;
1591          }
1592       }
1593       break;
1594    }
1595 
1596    case SpvOpTypeImage: {
1597       val->type->base_type = vtn_base_type_image;
1598 
1599       /* Images are represented in NIR as a scalar SSA value that is the
1600        * result of a deref instruction.  An OpLoad on an OpTypeImage pointer
1601        * from UniformConstant memory just takes the NIR deref from the pointer
1602        * and turns it into an SSA value.
1603        */
1604       val->type->type = nir_address_format_to_glsl_type(
1605          vtn_mode_to_address_format(b, vtn_variable_mode_function));
1606 
1607       const struct vtn_type *sampled_type = vtn_get_type(b, w[2]);
1608       if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1609          vtn_fail_if(sampled_type->base_type != vtn_base_type_void,
1610                      "Sampled type of OpTypeImage must be void for kernels");
1611       } else {
1612          vtn_fail_if(sampled_type->base_type != vtn_base_type_scalar,
1613                      "Sampled type of OpTypeImage must be a scalar");
1614          if (b->options->caps.image_atomic_int64) {
1615             vtn_fail_if(glsl_get_bit_size(sampled_type->type) != 32 &&
1616                         glsl_get_bit_size(sampled_type->type) != 64,
1617                         "Sampled type of OpTypeImage must be a 32 or 64-bit "
1618                         "scalar");
1619          } else {
1620             vtn_fail_if(glsl_get_bit_size(sampled_type->type) != 32,
1621                         "Sampled type of OpTypeImage must be a 32-bit scalar");
1622          }
1623       }
1624 
1625       enum glsl_sampler_dim dim;
1626       switch ((SpvDim)w[3]) {
1627       case SpvDim1D:       dim = GLSL_SAMPLER_DIM_1D;    break;
1628       case SpvDim2D:       dim = GLSL_SAMPLER_DIM_2D;    break;
1629       case SpvDim3D:       dim = GLSL_SAMPLER_DIM_3D;    break;
1630       case SpvDimCube:     dim = GLSL_SAMPLER_DIM_CUBE;  break;
1631       case SpvDimRect:     dim = GLSL_SAMPLER_DIM_RECT;  break;
1632       case SpvDimBuffer:   dim = GLSL_SAMPLER_DIM_BUF;   break;
1633       case SpvDimSubpassData: dim = GLSL_SAMPLER_DIM_SUBPASS; break;
1634       default:
1635          vtn_fail("Invalid SPIR-V image dimensionality: %s (%u)",
1636                   spirv_dim_to_string((SpvDim)w[3]), w[3]);
1637       }
1638 
1639       /* w[4]: as per Vulkan spec "Validation Rules within a Module",
1640        *       The “Depth” operand of OpTypeImage is ignored.
1641        */
1642       bool is_array = w[5];
1643       bool multisampled = w[6];
1644       unsigned sampled = w[7];
1645       SpvImageFormat format = w[8];
1646 
1647       if (count > 9)
1648          val->type->access_qualifier = w[9];
1649       else if (b->shader->info.stage == MESA_SHADER_KERNEL)
1650          /* Per the CL C spec: If no qualifier is provided, read_only is assumed. */
1651          val->type->access_qualifier = SpvAccessQualifierReadOnly;
1652       else
1653          val->type->access_qualifier = SpvAccessQualifierReadWrite;
1654 
1655       if (multisampled) {
1656          if (dim == GLSL_SAMPLER_DIM_2D)
1657             dim = GLSL_SAMPLER_DIM_MS;
1658          else if (dim == GLSL_SAMPLER_DIM_SUBPASS)
1659             dim = GLSL_SAMPLER_DIM_SUBPASS_MS;
1660          else
1661             vtn_fail("Unsupported multisampled image type");
1662       }
1663 
1664       val->type->image_format = translate_image_format(b, format);
1665 
1666       enum glsl_base_type sampled_base_type =
1667          glsl_get_base_type(sampled_type->type);
1668       if (sampled == 1) {
1669          val->type->glsl_image = glsl_sampler_type(dim, false, is_array,
1670                                                    sampled_base_type);
1671       } else if (sampled == 2) {
1672          val->type->glsl_image = glsl_image_type(dim, is_array,
1673                                                  sampled_base_type);
1674       } else if (b->shader->info.stage == MESA_SHADER_KERNEL) {
1675          val->type->glsl_image = glsl_image_type(dim, is_array,
1676                                                  GLSL_TYPE_VOID);
1677       } else {
1678          vtn_fail("We need to know if the image will be sampled");
1679       }
1680       break;
1681    }
1682 
1683    case SpvOpTypeSampledImage: {
1684       val->type->base_type = vtn_base_type_sampled_image;
1685       val->type->image = vtn_get_type(b, w[2]);
1686 
1687       /* Sampled images are represented NIR as a vec2 SSA value where each
1688        * component is the result of a deref instruction.  The first component
1689        * is the image and the second is the sampler.  An OpLoad on an
1690        * OpTypeSampledImage pointer from UniformConstant memory just takes
1691        * the NIR deref from the pointer and duplicates it to both vector
1692        * components.
1693        */
1694       nir_address_format addr_format =
1695          vtn_mode_to_address_format(b, vtn_variable_mode_function);
1696       assert(nir_address_format_num_components(addr_format) == 1);
1697       unsigned bit_size = nir_address_format_bit_size(addr_format);
1698       assert(bit_size == 32 || bit_size == 64);
1699 
1700       enum glsl_base_type base_type =
1701          bit_size == 32 ? GLSL_TYPE_UINT : GLSL_TYPE_UINT64;
1702       val->type->type = glsl_vector_type(base_type, 2);
1703       break;
1704    }
1705 
1706    case SpvOpTypeSampler:
1707       val->type->base_type = vtn_base_type_sampler;
1708 
1709       /* Samplers are represented in NIR as a scalar SSA value that is the
1710        * result of a deref instruction.  An OpLoad on an OpTypeSampler pointer
1711        * from UniformConstant memory just takes the NIR deref from the pointer
1712        * and turns it into an SSA value.
1713        */
1714       val->type->type = nir_address_format_to_glsl_type(
1715          vtn_mode_to_address_format(b, vtn_variable_mode_function));
1716       break;
1717 
1718    case SpvOpTypeAccelerationStructureKHR:
1719       val->type->base_type = vtn_base_type_accel_struct;
1720       val->type->type = glsl_uint64_t_type();
1721       break;
1722 
1723    case SpvOpTypeOpaque:
1724       val->type->base_type = vtn_base_type_struct;
1725       const char *name = vtn_string_literal(b, &w[2], count - 2, NULL);
1726       val->type->type = glsl_struct_type(NULL, 0, name, false);
1727       break;
1728 
1729    case SpvOpTypeEvent:
1730       val->type->base_type = vtn_base_type_event;
1731       val->type->type = glsl_int_type();
1732       break;
1733 
1734    case SpvOpTypeDeviceEvent:
1735    case SpvOpTypeReserveId:
1736    case SpvOpTypeQueue:
1737    case SpvOpTypePipe:
1738    default:
1739       vtn_fail_with_opcode("Unhandled opcode", opcode);
1740    }
1741 
1742    vtn_foreach_decoration(b, val, type_decoration_cb, NULL);
1743 
1744    if (val->type->base_type == vtn_base_type_struct &&
1745        (val->type->block || val->type->buffer_block)) {
1746       for (unsigned i = 0; i < val->type->length; i++) {
1747          vtn_fail_if(vtn_type_contains_block(b, val->type->members[i]),
1748                      "Block and BufferBlock decorations cannot decorate a "
1749                      "structure type that is nested at any level inside "
1750                      "another structure type decorated with Block or "
1751                      "BufferBlock.");
1752       }
1753    }
1754 }
1755 
1756 static nir_constant *
vtn_null_constant(struct vtn_builder * b,struct vtn_type * type)1757 vtn_null_constant(struct vtn_builder *b, struct vtn_type *type)
1758 {
1759    nir_constant *c = rzalloc(b, nir_constant);
1760 
1761    switch (type->base_type) {
1762    case vtn_base_type_scalar:
1763    case vtn_base_type_vector:
1764       /* Nothing to do here.  It's already initialized to zero */
1765       break;
1766 
1767    case vtn_base_type_pointer: {
1768       enum vtn_variable_mode mode = vtn_storage_class_to_mode(
1769          b, type->storage_class, type->deref, NULL);
1770       nir_address_format addr_format = vtn_mode_to_address_format(b, mode);
1771 
1772       const nir_const_value *null_value = nir_address_format_null_value(addr_format);
1773       memcpy(c->values, null_value,
1774              sizeof(nir_const_value) * nir_address_format_num_components(addr_format));
1775       break;
1776    }
1777 
1778    case vtn_base_type_void:
1779    case vtn_base_type_image:
1780    case vtn_base_type_sampler:
1781    case vtn_base_type_sampled_image:
1782    case vtn_base_type_function:
1783    case vtn_base_type_event:
1784       /* For those we have to return something but it doesn't matter what. */
1785       break;
1786 
1787    case vtn_base_type_matrix:
1788    case vtn_base_type_array:
1789       vtn_assert(type->length > 0);
1790       c->num_elements = type->length;
1791       c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1792 
1793       c->elements[0] = vtn_null_constant(b, type->array_element);
1794       for (unsigned i = 1; i < c->num_elements; i++)
1795          c->elements[i] = c->elements[0];
1796       break;
1797 
1798    case vtn_base_type_struct:
1799       c->num_elements = type->length;
1800       c->elements = ralloc_array(b, nir_constant *, c->num_elements);
1801       for (unsigned i = 0; i < c->num_elements; i++)
1802          c->elements[i] = vtn_null_constant(b, type->members[i]);
1803       break;
1804 
1805    default:
1806       vtn_fail("Invalid type for null constant");
1807    }
1808 
1809    return c;
1810 }
1811 
1812 static void
spec_constant_decoration_cb(struct vtn_builder * b,UNUSED struct vtn_value * val,ASSERTED int member,const struct vtn_decoration * dec,void * data)1813 spec_constant_decoration_cb(struct vtn_builder *b, UNUSED struct vtn_value *val,
1814                             ASSERTED int member,
1815                             const struct vtn_decoration *dec, void *data)
1816 {
1817    vtn_assert(member == -1);
1818    if (dec->decoration != SpvDecorationSpecId)
1819       return;
1820 
1821    nir_const_value *value = data;
1822    for (unsigned i = 0; i < b->num_specializations; i++) {
1823       if (b->specializations[i].id == dec->operands[0]) {
1824          *value = b->specializations[i].value;
1825          return;
1826       }
1827    }
1828 }
1829 
1830 static void
handle_workgroup_size_decoration_cb(struct vtn_builder * b,struct vtn_value * val,ASSERTED int member,const struct vtn_decoration * dec,UNUSED void * data)1831 handle_workgroup_size_decoration_cb(struct vtn_builder *b,
1832                                     struct vtn_value *val,
1833                                     ASSERTED int member,
1834                                     const struct vtn_decoration *dec,
1835                                     UNUSED void *data)
1836 {
1837    vtn_assert(member == -1);
1838    if (dec->decoration != SpvDecorationBuiltIn ||
1839        dec->operands[0] != SpvBuiltInWorkgroupSize)
1840       return;
1841 
1842    vtn_assert(val->type->type == glsl_vector_type(GLSL_TYPE_UINT, 3));
1843    b->workgroup_size_builtin = val;
1844 }
1845 
1846 static void
vtn_handle_constant(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)1847 vtn_handle_constant(struct vtn_builder *b, SpvOp opcode,
1848                     const uint32_t *w, unsigned count)
1849 {
1850    struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_constant);
1851    val->constant = rzalloc(b, nir_constant);
1852    switch (opcode) {
1853    case SpvOpConstantTrue:
1854    case SpvOpConstantFalse:
1855    case SpvOpSpecConstantTrue:
1856    case SpvOpSpecConstantFalse: {
1857       vtn_fail_if(val->type->type != glsl_bool_type(),
1858                   "Result type of %s must be OpTypeBool",
1859                   spirv_op_to_string(opcode));
1860 
1861       bool bval = (opcode == SpvOpConstantTrue ||
1862                    opcode == SpvOpSpecConstantTrue);
1863 
1864       nir_const_value u32val = nir_const_value_for_uint(bval, 32);
1865 
1866       if (opcode == SpvOpSpecConstantTrue ||
1867           opcode == SpvOpSpecConstantFalse)
1868          vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32val);
1869 
1870       val->constant->values[0].b = u32val.u32 != 0;
1871       break;
1872    }
1873 
1874    case SpvOpConstant:
1875    case SpvOpSpecConstant: {
1876       vtn_fail_if(val->type->base_type != vtn_base_type_scalar,
1877                   "Result type of %s must be a scalar",
1878                   spirv_op_to_string(opcode));
1879       int bit_size = glsl_get_bit_size(val->type->type);
1880       switch (bit_size) {
1881       case 64:
1882          val->constant->values[0].u64 = vtn_u64_literal(&w[3]);
1883          break;
1884       case 32:
1885          val->constant->values[0].u32 = w[3];
1886          break;
1887       case 16:
1888          val->constant->values[0].u16 = w[3];
1889          break;
1890       case 8:
1891          val->constant->values[0].u8 = w[3];
1892          break;
1893       default:
1894          vtn_fail("Unsupported SpvOpConstant bit size: %u", bit_size);
1895       }
1896 
1897       if (opcode == SpvOpSpecConstant)
1898          vtn_foreach_decoration(b, val, spec_constant_decoration_cb,
1899                                 &val->constant->values[0]);
1900       break;
1901    }
1902 
1903    case SpvOpSpecConstantComposite:
1904    case SpvOpConstantComposite: {
1905       unsigned elem_count = count - 3;
1906       vtn_fail_if(elem_count != val->type->length,
1907                   "%s has %u constituents, expected %u",
1908                   spirv_op_to_string(opcode), elem_count, val->type->length);
1909 
1910       nir_constant **elems = ralloc_array(b, nir_constant *, elem_count);
1911       for (unsigned i = 0; i < elem_count; i++) {
1912          struct vtn_value *val = vtn_untyped_value(b, w[i + 3]);
1913 
1914          if (val->value_type == vtn_value_type_constant) {
1915             elems[i] = val->constant;
1916          } else {
1917             vtn_fail_if(val->value_type != vtn_value_type_undef,
1918                         "only constants or undefs allowed for "
1919                         "SpvOpConstantComposite");
1920             /* to make it easier, just insert a NULL constant for now */
1921             elems[i] = vtn_null_constant(b, val->type);
1922          }
1923       }
1924 
1925       switch (val->type->base_type) {
1926       case vtn_base_type_vector: {
1927          assert(glsl_type_is_vector(val->type->type));
1928          for (unsigned i = 0; i < elem_count; i++)
1929             val->constant->values[i] = elems[i]->values[0];
1930          break;
1931       }
1932 
1933       case vtn_base_type_matrix:
1934       case vtn_base_type_struct:
1935       case vtn_base_type_array:
1936          ralloc_steal(val->constant, elems);
1937          val->constant->num_elements = elem_count;
1938          val->constant->elements = elems;
1939          break;
1940 
1941       default:
1942          vtn_fail("Result type of %s must be a composite type",
1943                   spirv_op_to_string(opcode));
1944       }
1945       break;
1946    }
1947 
1948    case SpvOpSpecConstantOp: {
1949       nir_const_value u32op = nir_const_value_for_uint(w[3], 32);
1950       vtn_foreach_decoration(b, val, spec_constant_decoration_cb, &u32op);
1951       SpvOp opcode = u32op.u32;
1952       switch (opcode) {
1953       case SpvOpVectorShuffle: {
1954          struct vtn_value *v0 = &b->values[w[4]];
1955          struct vtn_value *v1 = &b->values[w[5]];
1956 
1957          vtn_assert(v0->value_type == vtn_value_type_constant ||
1958                     v0->value_type == vtn_value_type_undef);
1959          vtn_assert(v1->value_type == vtn_value_type_constant ||
1960                     v1->value_type == vtn_value_type_undef);
1961 
1962          unsigned len0 = glsl_get_vector_elements(v0->type->type);
1963          unsigned len1 = glsl_get_vector_elements(v1->type->type);
1964 
1965          vtn_assert(len0 + len1 < 16);
1966 
1967          unsigned bit_size = glsl_get_bit_size(val->type->type);
1968          unsigned bit_size0 = glsl_get_bit_size(v0->type->type);
1969          unsigned bit_size1 = glsl_get_bit_size(v1->type->type);
1970 
1971          vtn_assert(bit_size == bit_size0 && bit_size == bit_size1);
1972          (void)bit_size0; (void)bit_size1;
1973 
1974          nir_const_value undef = { .u64 = 0xdeadbeefdeadbeef };
1975          nir_const_value combined[NIR_MAX_VEC_COMPONENTS * 2];
1976 
1977          if (v0->value_type == vtn_value_type_constant) {
1978             for (unsigned i = 0; i < len0; i++)
1979                combined[i] = v0->constant->values[i];
1980          }
1981          if (v1->value_type == vtn_value_type_constant) {
1982             for (unsigned i = 0; i < len1; i++)
1983                combined[len0 + i] = v1->constant->values[i];
1984          }
1985 
1986          for (unsigned i = 0, j = 0; i < count - 6; i++, j++) {
1987             uint32_t comp = w[i + 6];
1988             if (comp == (uint32_t)-1) {
1989                /* If component is not used, set the value to a known constant
1990                 * to detect if it is wrongly used.
1991                 */
1992                val->constant->values[j] = undef;
1993             } else {
1994                vtn_fail_if(comp >= len0 + len1,
1995                            "All Component literals must either be FFFFFFFF "
1996                            "or in [0, N - 1] (inclusive).");
1997                val->constant->values[j] = combined[comp];
1998             }
1999          }
2000          break;
2001       }
2002 
2003       case SpvOpCompositeExtract:
2004       case SpvOpCompositeInsert: {
2005          struct vtn_value *comp;
2006          unsigned deref_start;
2007          struct nir_constant **c;
2008          if (opcode == SpvOpCompositeExtract) {
2009             comp = vtn_value(b, w[4], vtn_value_type_constant);
2010             deref_start = 5;
2011             c = &comp->constant;
2012          } else {
2013             comp = vtn_value(b, w[5], vtn_value_type_constant);
2014             deref_start = 6;
2015             val->constant = nir_constant_clone(comp->constant,
2016                                                (nir_variable *)b);
2017             c = &val->constant;
2018          }
2019 
2020          int elem = -1;
2021          const struct vtn_type *type = comp->type;
2022          for (unsigned i = deref_start; i < count; i++) {
2023             vtn_fail_if(w[i] > type->length,
2024                         "%uth index of %s is %u but the type has only "
2025                         "%u elements", i - deref_start,
2026                         spirv_op_to_string(opcode), w[i], type->length);
2027 
2028             switch (type->base_type) {
2029             case vtn_base_type_vector:
2030                elem = w[i];
2031                type = type->array_element;
2032                break;
2033 
2034             case vtn_base_type_matrix:
2035             case vtn_base_type_array:
2036                c = &(*c)->elements[w[i]];
2037                type = type->array_element;
2038                break;
2039 
2040             case vtn_base_type_struct:
2041                c = &(*c)->elements[w[i]];
2042                type = type->members[w[i]];
2043                break;
2044 
2045             default:
2046                vtn_fail("%s must only index into composite types",
2047                         spirv_op_to_string(opcode));
2048             }
2049          }
2050 
2051          if (opcode == SpvOpCompositeExtract) {
2052             if (elem == -1) {
2053                val->constant = *c;
2054             } else {
2055                unsigned num_components = type->length;
2056                for (unsigned i = 0; i < num_components; i++)
2057                   val->constant->values[i] = (*c)->values[elem + i];
2058             }
2059          } else {
2060             struct vtn_value *insert =
2061                vtn_value(b, w[4], vtn_value_type_constant);
2062             vtn_assert(insert->type == type);
2063             if (elem == -1) {
2064                *c = insert->constant;
2065             } else {
2066                unsigned num_components = type->length;
2067                for (unsigned i = 0; i < num_components; i++)
2068                   (*c)->values[elem + i] = insert->constant->values[i];
2069             }
2070          }
2071          break;
2072       }
2073 
2074       default: {
2075          bool swap;
2076          nir_alu_type dst_alu_type = nir_get_nir_type_for_glsl_type(val->type->type);
2077          nir_alu_type src_alu_type = dst_alu_type;
2078          unsigned num_components = glsl_get_vector_elements(val->type->type);
2079          unsigned bit_size;
2080 
2081          vtn_assert(count <= 7);
2082 
2083          switch (opcode) {
2084          case SpvOpSConvert:
2085          case SpvOpFConvert:
2086          case SpvOpUConvert:
2087             /* We have a source in a conversion */
2088             src_alu_type =
2089                nir_get_nir_type_for_glsl_type(vtn_get_value_type(b, w[4])->type);
2090             /* We use the bitsize of the conversion source to evaluate the opcode later */
2091             bit_size = glsl_get_bit_size(vtn_get_value_type(b, w[4])->type);
2092             break;
2093          default:
2094             bit_size = glsl_get_bit_size(val->type->type);
2095          };
2096 
2097          bool exact;
2098          nir_op op = vtn_nir_alu_op_for_spirv_opcode(b, opcode, &swap, &exact,
2099                                                      nir_alu_type_get_type_size(src_alu_type),
2100                                                      nir_alu_type_get_type_size(dst_alu_type));
2101 
2102          /* No SPIR-V opcodes handled through this path should set exact.
2103           * Since it is ignored, assert on it.
2104           */
2105          assert(!exact);
2106 
2107          nir_const_value src[3][NIR_MAX_VEC_COMPONENTS];
2108 
2109          for (unsigned i = 0; i < count - 4; i++) {
2110             struct vtn_value *src_val =
2111                vtn_value(b, w[4 + i], vtn_value_type_constant);
2112 
2113             /* If this is an unsized source, pull the bit size from the
2114              * source; otherwise, we'll use the bit size from the destination.
2115              */
2116             if (!nir_alu_type_get_type_size(nir_op_infos[op].input_types[i]))
2117                bit_size = glsl_get_bit_size(src_val->type->type);
2118 
2119             unsigned src_comps = nir_op_infos[op].input_sizes[i] ?
2120                                  nir_op_infos[op].input_sizes[i] :
2121                                  num_components;
2122 
2123             unsigned j = swap ? 1 - i : i;
2124             for (unsigned c = 0; c < src_comps; c++)
2125                src[j][c] = src_val->constant->values[c];
2126          }
2127 
2128          /* fix up fixed size sources */
2129          switch (op) {
2130          case nir_op_ishl:
2131          case nir_op_ishr:
2132          case nir_op_ushr: {
2133             if (bit_size == 32)
2134                break;
2135             for (unsigned i = 0; i < num_components; ++i) {
2136                switch (bit_size) {
2137                case 64: src[1][i].u32 = src[1][i].u64; break;
2138                case 16: src[1][i].u32 = src[1][i].u16; break;
2139                case  8: src[1][i].u32 = src[1][i].u8;  break;
2140                }
2141             }
2142             break;
2143          }
2144          default:
2145             break;
2146          }
2147 
2148          nir_const_value *srcs[3] = {
2149             src[0], src[1], src[2],
2150          };
2151          nir_eval_const_opcode(op, val->constant->values,
2152                                num_components, bit_size, srcs,
2153                                b->shader->info.float_controls_execution_mode);
2154          break;
2155       } /* default */
2156       }
2157       break;
2158    }
2159 
2160    case SpvOpConstantNull:
2161       val->constant = vtn_null_constant(b, val->type);
2162       break;
2163 
2164    default:
2165       vtn_fail_with_opcode("Unhandled opcode", opcode);
2166    }
2167 
2168    /* Now that we have the value, update the workgroup size if needed */
2169    vtn_foreach_decoration(b, val, handle_workgroup_size_decoration_cb, NULL);
2170 }
2171 
2172 static void
vtn_split_barrier_semantics(struct vtn_builder * b,SpvMemorySemanticsMask semantics,SpvMemorySemanticsMask * before,SpvMemorySemanticsMask * after)2173 vtn_split_barrier_semantics(struct vtn_builder *b,
2174                             SpvMemorySemanticsMask semantics,
2175                             SpvMemorySemanticsMask *before,
2176                             SpvMemorySemanticsMask *after)
2177 {
2178    /* For memory semantics embedded in operations, we split them into up to
2179     * two barriers, to be added before and after the operation.  This is less
2180     * strict than if we propagated until the final backend stage, but still
2181     * result in correct execution.
2182     *
2183     * A further improvement could be pipe this information (and use!) into the
2184     * next compiler layers, at the expense of making the handling of barriers
2185     * more complicated.
2186     */
2187 
2188    *before = SpvMemorySemanticsMaskNone;
2189    *after = SpvMemorySemanticsMaskNone;
2190 
2191    SpvMemorySemanticsMask order_semantics =
2192       semantics & (SpvMemorySemanticsAcquireMask |
2193                    SpvMemorySemanticsReleaseMask |
2194                    SpvMemorySemanticsAcquireReleaseMask |
2195                    SpvMemorySemanticsSequentiallyConsistentMask);
2196 
2197    if (util_bitcount(order_semantics) > 1) {
2198       /* Old GLSLang versions incorrectly set all the ordering bits.  This was
2199        * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2200        * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2201        */
2202       vtn_warn("Multiple memory ordering semantics specified, "
2203                "assuming AcquireRelease.");
2204       order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2205    }
2206 
2207    const SpvMemorySemanticsMask av_vis_semantics =
2208       semantics & (SpvMemorySemanticsMakeAvailableMask |
2209                    SpvMemorySemanticsMakeVisibleMask);
2210 
2211    const SpvMemorySemanticsMask storage_semantics =
2212       semantics & (SpvMemorySemanticsUniformMemoryMask |
2213                    SpvMemorySemanticsSubgroupMemoryMask |
2214                    SpvMemorySemanticsWorkgroupMemoryMask |
2215                    SpvMemorySemanticsCrossWorkgroupMemoryMask |
2216                    SpvMemorySemanticsAtomicCounterMemoryMask |
2217                    SpvMemorySemanticsImageMemoryMask |
2218                    SpvMemorySemanticsOutputMemoryMask);
2219 
2220    const SpvMemorySemanticsMask other_semantics =
2221       semantics & ~(order_semantics | av_vis_semantics | storage_semantics |
2222                     SpvMemorySemanticsVolatileMask);
2223 
2224    if (other_semantics)
2225       vtn_warn("Ignoring unhandled memory semantics: %u\n", other_semantics);
2226 
2227    /* SequentiallyConsistent is treated as AcquireRelease. */
2228 
2229    /* The RELEASE barrier happens BEFORE the operation, and it is usually
2230     * associated with a Store.  All the write operations with a matching
2231     * semantics will not be reordered after the Store.
2232     */
2233    if (order_semantics & (SpvMemorySemanticsReleaseMask |
2234                           SpvMemorySemanticsAcquireReleaseMask |
2235                           SpvMemorySemanticsSequentiallyConsistentMask)) {
2236       *before |= SpvMemorySemanticsReleaseMask | storage_semantics;
2237    }
2238 
2239    /* The ACQUIRE barrier happens AFTER the operation, and it is usually
2240     * associated with a Load.  All the operations with a matching semantics
2241     * will not be reordered before the Load.
2242     */
2243    if (order_semantics & (SpvMemorySemanticsAcquireMask |
2244                           SpvMemorySemanticsAcquireReleaseMask |
2245                           SpvMemorySemanticsSequentiallyConsistentMask)) {
2246       *after |= SpvMemorySemanticsAcquireMask | storage_semantics;
2247    }
2248 
2249    if (av_vis_semantics & SpvMemorySemanticsMakeVisibleMask)
2250       *before |= SpvMemorySemanticsMakeVisibleMask | storage_semantics;
2251 
2252    if (av_vis_semantics & SpvMemorySemanticsMakeAvailableMask)
2253       *after |= SpvMemorySemanticsMakeAvailableMask | storage_semantics;
2254 }
2255 
2256 static nir_memory_semantics
vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder * b,SpvMemorySemanticsMask semantics)2257 vtn_mem_semantics_to_nir_mem_semantics(struct vtn_builder *b,
2258                                        SpvMemorySemanticsMask semantics)
2259 {
2260    nir_memory_semantics nir_semantics = 0;
2261 
2262    SpvMemorySemanticsMask order_semantics =
2263       semantics & (SpvMemorySemanticsAcquireMask |
2264                    SpvMemorySemanticsReleaseMask |
2265                    SpvMemorySemanticsAcquireReleaseMask |
2266                    SpvMemorySemanticsSequentiallyConsistentMask);
2267 
2268    if (util_bitcount(order_semantics) > 1) {
2269       /* Old GLSLang versions incorrectly set all the ordering bits.  This was
2270        * fixed in c51287d744fb6e7e9ccc09f6f8451e6c64b1dad6 of glslang repo,
2271        * and it is in GLSLang since revision "SPIRV99.1321" (from Jul-2016).
2272        */
2273       vtn_warn("Multiple memory ordering semantics bits specified, "
2274                "assuming AcquireRelease.");
2275       order_semantics = SpvMemorySemanticsAcquireReleaseMask;
2276    }
2277 
2278    switch (order_semantics) {
2279    case 0:
2280       /* Not an ordering barrier. */
2281       break;
2282 
2283    case SpvMemorySemanticsAcquireMask:
2284       nir_semantics = NIR_MEMORY_ACQUIRE;
2285       break;
2286 
2287    case SpvMemorySemanticsReleaseMask:
2288       nir_semantics = NIR_MEMORY_RELEASE;
2289       break;
2290 
2291    case SpvMemorySemanticsSequentiallyConsistentMask:
2292       /* Fall through.  Treated as AcquireRelease in Vulkan. */
2293    case SpvMemorySemanticsAcquireReleaseMask:
2294       nir_semantics = NIR_MEMORY_ACQUIRE | NIR_MEMORY_RELEASE;
2295       break;
2296 
2297    default:
2298       unreachable("Invalid memory order semantics");
2299    }
2300 
2301    if (semantics & SpvMemorySemanticsMakeAvailableMask) {
2302       vtn_fail_if(!b->options->caps.vk_memory_model,
2303                   "To use MakeAvailable memory semantics the VulkanMemoryModel "
2304                   "capability must be declared.");
2305       nir_semantics |= NIR_MEMORY_MAKE_AVAILABLE;
2306    }
2307 
2308    if (semantics & SpvMemorySemanticsMakeVisibleMask) {
2309       vtn_fail_if(!b->options->caps.vk_memory_model,
2310                   "To use MakeVisible memory semantics the VulkanMemoryModel "
2311                   "capability must be declared.");
2312       nir_semantics |= NIR_MEMORY_MAKE_VISIBLE;
2313    }
2314 
2315    return nir_semantics;
2316 }
2317 
2318 static nir_variable_mode
vtn_mem_semantics_to_nir_var_modes(struct vtn_builder * b,SpvMemorySemanticsMask semantics)2319 vtn_mem_semantics_to_nir_var_modes(struct vtn_builder *b,
2320                                    SpvMemorySemanticsMask semantics)
2321 {
2322    /* Vulkan Environment for SPIR-V says "SubgroupMemory, CrossWorkgroupMemory,
2323     * and AtomicCounterMemory are ignored".
2324     */
2325    semantics &= ~(SpvMemorySemanticsSubgroupMemoryMask |
2326                   SpvMemorySemanticsCrossWorkgroupMemoryMask |
2327                   SpvMemorySemanticsAtomicCounterMemoryMask);
2328 
2329    /* TODO: Consider adding nir_var_mem_image mode to NIR so it can be used
2330     * for SpvMemorySemanticsImageMemoryMask.
2331     */
2332 
2333    nir_variable_mode modes = 0;
2334    if (semantics & (SpvMemorySemanticsUniformMemoryMask |
2335                     SpvMemorySemanticsImageMemoryMask)) {
2336       modes |= nir_var_uniform |
2337                nir_var_mem_ubo |
2338                nir_var_mem_ssbo |
2339                nir_var_mem_global;
2340    }
2341    if (semantics & SpvMemorySemanticsWorkgroupMemoryMask)
2342       modes |= nir_var_mem_shared;
2343    if (semantics & SpvMemorySemanticsOutputMemoryMask) {
2344       modes |= nir_var_shader_out;
2345    }
2346 
2347    return modes;
2348 }
2349 
2350 static nir_scope
vtn_scope_to_nir_scope(struct vtn_builder * b,SpvScope scope)2351 vtn_scope_to_nir_scope(struct vtn_builder *b, SpvScope scope)
2352 {
2353    nir_scope nir_scope;
2354    switch (scope) {
2355    case SpvScopeDevice:
2356       vtn_fail_if(b->options->caps.vk_memory_model &&
2357                   !b->options->caps.vk_memory_model_device_scope,
2358                   "If the Vulkan memory model is declared and any instruction "
2359                   "uses Device scope, the VulkanMemoryModelDeviceScope "
2360                   "capability must be declared.");
2361       nir_scope = NIR_SCOPE_DEVICE;
2362       break;
2363 
2364    case SpvScopeQueueFamily:
2365       vtn_fail_if(!b->options->caps.vk_memory_model,
2366                   "To use Queue Family scope, the VulkanMemoryModel capability "
2367                   "must be declared.");
2368       nir_scope = NIR_SCOPE_QUEUE_FAMILY;
2369       break;
2370 
2371    case SpvScopeWorkgroup:
2372       nir_scope = NIR_SCOPE_WORKGROUP;
2373       break;
2374 
2375    case SpvScopeSubgroup:
2376       nir_scope = NIR_SCOPE_SUBGROUP;
2377       break;
2378 
2379    case SpvScopeInvocation:
2380       nir_scope = NIR_SCOPE_INVOCATION;
2381       break;
2382 
2383    case SpvScopeShaderCallKHR:
2384       nir_scope = NIR_SCOPE_SHADER_CALL;
2385       break;
2386 
2387    default:
2388       vtn_fail("Invalid memory scope");
2389    }
2390 
2391    return nir_scope;
2392 }
2393 
2394 static void
vtn_emit_scoped_control_barrier(struct vtn_builder * b,SpvScope exec_scope,SpvScope mem_scope,SpvMemorySemanticsMask semantics)2395 vtn_emit_scoped_control_barrier(struct vtn_builder *b, SpvScope exec_scope,
2396                                 SpvScope mem_scope,
2397                                 SpvMemorySemanticsMask semantics)
2398 {
2399    nir_memory_semantics nir_semantics =
2400       vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2401    nir_variable_mode modes = vtn_mem_semantics_to_nir_var_modes(b, semantics);
2402    nir_scope nir_exec_scope = vtn_scope_to_nir_scope(b, exec_scope);
2403 
2404    /* Memory semantics is optional for OpControlBarrier. */
2405    nir_scope nir_mem_scope;
2406    if (nir_semantics == 0 || modes == 0)
2407       nir_mem_scope = NIR_SCOPE_NONE;
2408    else
2409       nir_mem_scope = vtn_scope_to_nir_scope(b, mem_scope);
2410 
2411    nir_scoped_barrier(&b->nb, nir_exec_scope, nir_mem_scope, nir_semantics, modes);
2412 }
2413 
2414 static void
vtn_emit_scoped_memory_barrier(struct vtn_builder * b,SpvScope scope,SpvMemorySemanticsMask semantics)2415 vtn_emit_scoped_memory_barrier(struct vtn_builder *b, SpvScope scope,
2416                                SpvMemorySemanticsMask semantics)
2417 {
2418    nir_variable_mode modes = vtn_mem_semantics_to_nir_var_modes(b, semantics);
2419    nir_memory_semantics nir_semantics =
2420       vtn_mem_semantics_to_nir_mem_semantics(b, semantics);
2421 
2422    /* No barrier to add. */
2423    if (nir_semantics == 0 || modes == 0)
2424       return;
2425 
2426    nir_scope nir_mem_scope = vtn_scope_to_nir_scope(b, scope);
2427    nir_scoped_barrier(&b->nb, NIR_SCOPE_NONE, nir_mem_scope, nir_semantics, modes);
2428 }
2429 
2430 struct vtn_ssa_value *
vtn_create_ssa_value(struct vtn_builder * b,const struct glsl_type * type)2431 vtn_create_ssa_value(struct vtn_builder *b, const struct glsl_type *type)
2432 {
2433    /* Always use bare types for SSA values for a couple of reasons:
2434     *
2435     *  1. Code which emits deref chains should never listen to the explicit
2436     *     layout information on the SSA value if any exists.  If we've
2437     *     accidentally been relying on this, we want to find those bugs.
2438     *
2439     *  2. We want to be able to quickly check that an SSA value being assigned
2440     *     to a SPIR-V value has the right type.  Using bare types everywhere
2441     *     ensures that we can pointer-compare.
2442     */
2443    struct vtn_ssa_value *val = rzalloc(b, struct vtn_ssa_value);
2444    val->type = glsl_get_bare_type(type);
2445 
2446 
2447    if (!glsl_type_is_vector_or_scalar(type)) {
2448       unsigned elems = glsl_get_length(val->type);
2449       val->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
2450       if (glsl_type_is_array_or_matrix(type)) {
2451          const struct glsl_type *elem_type = glsl_get_array_element(type);
2452          for (unsigned i = 0; i < elems; i++)
2453             val->elems[i] = vtn_create_ssa_value(b, elem_type);
2454       } else {
2455          vtn_assert(glsl_type_is_struct_or_ifc(type));
2456          for (unsigned i = 0; i < elems; i++) {
2457             const struct glsl_type *elem_type = glsl_get_struct_field(type, i);
2458             val->elems[i] = vtn_create_ssa_value(b, elem_type);
2459          }
2460       }
2461    }
2462 
2463    return val;
2464 }
2465 
2466 static nir_tex_src
vtn_tex_src(struct vtn_builder * b,unsigned index,nir_tex_src_type type)2467 vtn_tex_src(struct vtn_builder *b, unsigned index, nir_tex_src_type type)
2468 {
2469    nir_tex_src src;
2470    src.src = nir_src_for_ssa(vtn_get_nir_ssa(b, index));
2471    src.src_type = type;
2472    return src;
2473 }
2474 
2475 static uint32_t
image_operand_arg(struct vtn_builder * b,const uint32_t * w,uint32_t count,uint32_t mask_idx,SpvImageOperandsMask op)2476 image_operand_arg(struct vtn_builder *b, const uint32_t *w, uint32_t count,
2477                   uint32_t mask_idx, SpvImageOperandsMask op)
2478 {
2479    static const SpvImageOperandsMask ops_with_arg =
2480       SpvImageOperandsBiasMask |
2481       SpvImageOperandsLodMask |
2482       SpvImageOperandsGradMask |
2483       SpvImageOperandsConstOffsetMask |
2484       SpvImageOperandsOffsetMask |
2485       SpvImageOperandsConstOffsetsMask |
2486       SpvImageOperandsSampleMask |
2487       SpvImageOperandsMinLodMask |
2488       SpvImageOperandsMakeTexelAvailableMask |
2489       SpvImageOperandsMakeTexelVisibleMask;
2490 
2491    assert(util_bitcount(op) == 1);
2492    assert(w[mask_idx] & op);
2493    assert(op & ops_with_arg);
2494 
2495    uint32_t idx = util_bitcount(w[mask_idx] & (op - 1) & ops_with_arg) + 1;
2496 
2497    /* Adjust indices for operands with two arguments. */
2498    static const SpvImageOperandsMask ops_with_two_args =
2499       SpvImageOperandsGradMask;
2500    idx += util_bitcount(w[mask_idx] & (op - 1) & ops_with_two_args);
2501 
2502    idx += mask_idx;
2503 
2504    vtn_fail_if(idx + (op & ops_with_two_args ? 1 : 0) >= count,
2505                "Image op claims to have %s but does not enough "
2506                "following operands", spirv_imageoperands_to_string(op));
2507 
2508    return idx;
2509 }
2510 
2511 static void
non_uniform_decoration_cb(struct vtn_builder * b,struct vtn_value * val,int member,const struct vtn_decoration * dec,void * void_ctx)2512 non_uniform_decoration_cb(struct vtn_builder *b,
2513                           struct vtn_value *val, int member,
2514                           const struct vtn_decoration *dec, void *void_ctx)
2515 {
2516    enum gl_access_qualifier *access = void_ctx;
2517    switch (dec->decoration) {
2518    case SpvDecorationNonUniformEXT:
2519       *access |= ACCESS_NON_UNIFORM;
2520       break;
2521 
2522    default:
2523       break;
2524    }
2525 }
2526 
2527 static void
vtn_handle_texture(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)2528 vtn_handle_texture(struct vtn_builder *b, SpvOp opcode,
2529                    const uint32_t *w, unsigned count)
2530 {
2531    struct vtn_type *ret_type = vtn_get_type(b, w[1]);
2532 
2533    if (opcode == SpvOpSampledImage) {
2534       struct vtn_sampled_image si = {
2535          .image = vtn_get_image(b, w[3], NULL),
2536          .sampler = vtn_get_sampler(b, w[4]),
2537       };
2538 
2539       enum gl_access_qualifier access = 0;
2540       vtn_foreach_decoration(b, vtn_untyped_value(b, w[3]),
2541                              non_uniform_decoration_cb, &access);
2542       vtn_foreach_decoration(b, vtn_untyped_value(b, w[4]),
2543                              non_uniform_decoration_cb, &access);
2544 
2545       vtn_push_sampled_image(b, w[2], si, access & ACCESS_NON_UNIFORM);
2546       return;
2547    } else if (opcode == SpvOpImage) {
2548       struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2549 
2550       enum gl_access_qualifier access = 0;
2551       vtn_foreach_decoration(b, vtn_untyped_value(b, w[3]),
2552                              non_uniform_decoration_cb, &access);
2553 
2554       vtn_push_image(b, w[2], si.image, access & ACCESS_NON_UNIFORM);
2555       return;
2556    }
2557 
2558    nir_deref_instr *image = NULL, *sampler = NULL;
2559    struct vtn_value *sampled_val = vtn_untyped_value(b, w[3]);
2560    if (sampled_val->type->base_type == vtn_base_type_sampled_image) {
2561       struct vtn_sampled_image si = vtn_get_sampled_image(b, w[3]);
2562       image = si.image;
2563       sampler = si.sampler;
2564    } else {
2565       image = vtn_get_image(b, w[3], NULL);
2566    }
2567 
2568    const enum glsl_sampler_dim sampler_dim = glsl_get_sampler_dim(image->type);
2569    const bool is_array = glsl_sampler_type_is_array(image->type);
2570    nir_alu_type dest_type = nir_type_invalid;
2571 
2572    /* Figure out the base texture operation */
2573    nir_texop texop;
2574    switch (opcode) {
2575    case SpvOpImageSampleImplicitLod:
2576    case SpvOpImageSampleDrefImplicitLod:
2577    case SpvOpImageSampleProjImplicitLod:
2578    case SpvOpImageSampleProjDrefImplicitLod:
2579       texop = nir_texop_tex;
2580       break;
2581 
2582    case SpvOpImageSampleExplicitLod:
2583    case SpvOpImageSampleDrefExplicitLod:
2584    case SpvOpImageSampleProjExplicitLod:
2585    case SpvOpImageSampleProjDrefExplicitLod:
2586       texop = nir_texop_txl;
2587       break;
2588 
2589    case SpvOpImageFetch:
2590       if (sampler_dim == GLSL_SAMPLER_DIM_MS) {
2591          texop = nir_texop_txf_ms;
2592       } else {
2593          texop = nir_texop_txf;
2594       }
2595       break;
2596 
2597    case SpvOpImageGather:
2598    case SpvOpImageDrefGather:
2599       texop = nir_texop_tg4;
2600       break;
2601 
2602    case SpvOpImageQuerySizeLod:
2603    case SpvOpImageQuerySize:
2604       texop = nir_texop_txs;
2605       dest_type = nir_type_int;
2606       break;
2607 
2608    case SpvOpImageQueryLod:
2609       texop = nir_texop_lod;
2610       dest_type = nir_type_float;
2611       break;
2612 
2613    case SpvOpImageQueryLevels:
2614       texop = nir_texop_query_levels;
2615       dest_type = nir_type_int;
2616       break;
2617 
2618    case SpvOpImageQuerySamples:
2619       texop = nir_texop_texture_samples;
2620       dest_type = nir_type_int;
2621       break;
2622 
2623    case SpvOpFragmentFetchAMD:
2624       texop = nir_texop_fragment_fetch;
2625       break;
2626 
2627    case SpvOpFragmentMaskFetchAMD:
2628       texop = nir_texop_fragment_mask_fetch;
2629       dest_type = nir_type_uint;
2630       break;
2631 
2632    default:
2633       vtn_fail_with_opcode("Unhandled opcode", opcode);
2634    }
2635 
2636    nir_tex_src srcs[10]; /* 10 should be enough */
2637    nir_tex_src *p = srcs;
2638 
2639    p->src = nir_src_for_ssa(&image->dest.ssa);
2640    p->src_type = nir_tex_src_texture_deref;
2641    p++;
2642 
2643    switch (texop) {
2644    case nir_texop_tex:
2645    case nir_texop_txb:
2646    case nir_texop_txl:
2647    case nir_texop_txd:
2648    case nir_texop_tg4:
2649    case nir_texop_lod:
2650       vtn_fail_if(sampler == NULL,
2651                   "%s requires an image of type OpTypeSampledImage",
2652                   spirv_op_to_string(opcode));
2653       p->src = nir_src_for_ssa(&sampler->dest.ssa);
2654       p->src_type = nir_tex_src_sampler_deref;
2655       p++;
2656       break;
2657    case nir_texop_txf:
2658    case nir_texop_txf_ms:
2659    case nir_texop_txs:
2660    case nir_texop_query_levels:
2661    case nir_texop_texture_samples:
2662    case nir_texop_samples_identical:
2663    case nir_texop_fragment_fetch:
2664    case nir_texop_fragment_mask_fetch:
2665       /* These don't */
2666       break;
2667    case nir_texop_txf_ms_fb:
2668       vtn_fail("unexpected nir_texop_txf_ms_fb");
2669       break;
2670    case nir_texop_txf_ms_mcs:
2671       vtn_fail("unexpected nir_texop_txf_ms_mcs");
2672    case nir_texop_tex_prefetch:
2673       vtn_fail("unexpected nir_texop_tex_prefetch");
2674    }
2675 
2676    unsigned idx = 4;
2677 
2678    struct nir_ssa_def *coord;
2679    unsigned coord_components;
2680    switch (opcode) {
2681    case SpvOpImageSampleImplicitLod:
2682    case SpvOpImageSampleExplicitLod:
2683    case SpvOpImageSampleDrefImplicitLod:
2684    case SpvOpImageSampleDrefExplicitLod:
2685    case SpvOpImageSampleProjImplicitLod:
2686    case SpvOpImageSampleProjExplicitLod:
2687    case SpvOpImageSampleProjDrefImplicitLod:
2688    case SpvOpImageSampleProjDrefExplicitLod:
2689    case SpvOpImageFetch:
2690    case SpvOpImageGather:
2691    case SpvOpImageDrefGather:
2692    case SpvOpImageQueryLod:
2693    case SpvOpFragmentFetchAMD:
2694    case SpvOpFragmentMaskFetchAMD: {
2695       /* All these types have the coordinate as their first real argument */
2696       coord_components = glsl_get_sampler_dim_coordinate_components(sampler_dim);
2697 
2698       if (is_array && texop != nir_texop_lod)
2699          coord_components++;
2700 
2701       struct vtn_ssa_value *coord_val = vtn_ssa_value(b, w[idx++]);
2702       coord = coord_val->def;
2703       p->src = nir_src_for_ssa(nir_channels(&b->nb, coord,
2704                                             (1 << coord_components) - 1));
2705 
2706       /* OpenCL allows integer sampling coordinates */
2707       if (glsl_type_is_integer(coord_val->type) &&
2708           opcode == SpvOpImageSampleExplicitLod) {
2709          vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
2710                      "Unless the Kernel capability is being used, the coordinate parameter "
2711                      "OpImageSampleExplicitLod must be floating point.");
2712 
2713          p->src = nir_src_for_ssa(
2714             nir_fadd(&b->nb, nir_i2f32(&b->nb, p->src.ssa),
2715                              nir_imm_float(&b->nb, 0.5)));
2716       }
2717 
2718       p->src_type = nir_tex_src_coord;
2719       p++;
2720       break;
2721    }
2722 
2723    default:
2724       coord = NULL;
2725       coord_components = 0;
2726       break;
2727    }
2728 
2729    switch (opcode) {
2730    case SpvOpImageSampleProjImplicitLod:
2731    case SpvOpImageSampleProjExplicitLod:
2732    case SpvOpImageSampleProjDrefImplicitLod:
2733    case SpvOpImageSampleProjDrefExplicitLod:
2734       /* These have the projector as the last coordinate component */
2735       p->src = nir_src_for_ssa(nir_channel(&b->nb, coord, coord_components));
2736       p->src_type = nir_tex_src_projector;
2737       p++;
2738       break;
2739 
2740    default:
2741       break;
2742    }
2743 
2744    bool is_shadow = false;
2745    unsigned gather_component = 0;
2746    switch (opcode) {
2747    case SpvOpImageSampleDrefImplicitLod:
2748    case SpvOpImageSampleDrefExplicitLod:
2749    case SpvOpImageSampleProjDrefImplicitLod:
2750    case SpvOpImageSampleProjDrefExplicitLod:
2751    case SpvOpImageDrefGather:
2752       /* These all have an explicit depth value as their next source */
2753       is_shadow = true;
2754       (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_comparator);
2755       break;
2756 
2757    case SpvOpImageGather:
2758       /* This has a component as its next source */
2759       gather_component = vtn_constant_uint(b, w[idx++]);
2760       break;
2761 
2762    default:
2763       break;
2764    }
2765 
2766    /* For OpImageQuerySizeLod, we always have an LOD */
2767    if (opcode == SpvOpImageQuerySizeLod)
2768       (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_lod);
2769 
2770    /* For OpFragmentFetchAMD, we always have a multisample index */
2771    if (opcode == SpvOpFragmentFetchAMD)
2772       (*p++) = vtn_tex_src(b, w[idx++], nir_tex_src_ms_index);
2773 
2774    /* Now we need to handle some number of optional arguments */
2775    struct vtn_value *gather_offsets = NULL;
2776    if (idx < count) {
2777       uint32_t operands = w[idx];
2778 
2779       if (operands & SpvImageOperandsBiasMask) {
2780          vtn_assert(texop == nir_texop_tex ||
2781                     texop == nir_texop_tg4);
2782          if (texop == nir_texop_tex)
2783             texop = nir_texop_txb;
2784          uint32_t arg = image_operand_arg(b, w, count, idx,
2785                                           SpvImageOperandsBiasMask);
2786          (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_bias);
2787       }
2788 
2789       if (operands & SpvImageOperandsLodMask) {
2790          vtn_assert(texop == nir_texop_txl || texop == nir_texop_txf ||
2791                     texop == nir_texop_txs || texop == nir_texop_tg4);
2792          uint32_t arg = image_operand_arg(b, w, count, idx,
2793                                           SpvImageOperandsLodMask);
2794          (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_lod);
2795       }
2796 
2797       if (operands & SpvImageOperandsGradMask) {
2798          vtn_assert(texop == nir_texop_txl);
2799          texop = nir_texop_txd;
2800          uint32_t arg = image_operand_arg(b, w, count, idx,
2801                                           SpvImageOperandsGradMask);
2802          (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ddx);
2803          (*p++) = vtn_tex_src(b, w[arg + 1], nir_tex_src_ddy);
2804       }
2805 
2806       vtn_fail_if(util_bitcount(operands & (SpvImageOperandsConstOffsetsMask |
2807                                             SpvImageOperandsOffsetMask |
2808                                             SpvImageOperandsConstOffsetMask)) > 1,
2809                   "At most one of the ConstOffset, Offset, and ConstOffsets "
2810                   "image operands can be used on a given instruction.");
2811 
2812       if (operands & SpvImageOperandsOffsetMask) {
2813          uint32_t arg = image_operand_arg(b, w, count, idx,
2814                                           SpvImageOperandsOffsetMask);
2815          (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2816       }
2817 
2818       if (operands & SpvImageOperandsConstOffsetMask) {
2819          uint32_t arg = image_operand_arg(b, w, count, idx,
2820                                           SpvImageOperandsConstOffsetMask);
2821          (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_offset);
2822       }
2823 
2824       if (operands & SpvImageOperandsConstOffsetsMask) {
2825          vtn_assert(texop == nir_texop_tg4);
2826          uint32_t arg = image_operand_arg(b, w, count, idx,
2827                                           SpvImageOperandsConstOffsetsMask);
2828          gather_offsets = vtn_value(b, w[arg], vtn_value_type_constant);
2829       }
2830 
2831       if (operands & SpvImageOperandsSampleMask) {
2832          vtn_assert(texop == nir_texop_txf_ms);
2833          uint32_t arg = image_operand_arg(b, w, count, idx,
2834                                           SpvImageOperandsSampleMask);
2835          texop = nir_texop_txf_ms;
2836          (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_ms_index);
2837       }
2838 
2839       if (operands & SpvImageOperandsMinLodMask) {
2840          vtn_assert(texop == nir_texop_tex ||
2841                     texop == nir_texop_txb ||
2842                     texop == nir_texop_txd);
2843          uint32_t arg = image_operand_arg(b, w, count, idx,
2844                                           SpvImageOperandsMinLodMask);
2845          (*p++) = vtn_tex_src(b, w[arg], nir_tex_src_min_lod);
2846       }
2847    }
2848 
2849    nir_tex_instr *instr = nir_tex_instr_create(b->shader, p - srcs);
2850    instr->op = texop;
2851 
2852    memcpy(instr->src, srcs, instr->num_srcs * sizeof(*instr->src));
2853 
2854    instr->coord_components = coord_components;
2855    instr->sampler_dim = sampler_dim;
2856    instr->is_array = is_array;
2857    instr->is_shadow = is_shadow;
2858    instr->is_new_style_shadow =
2859       is_shadow && glsl_get_components(ret_type->type) == 1;
2860    instr->component = gather_component;
2861 
2862    /* The Vulkan spec says:
2863     *
2864     *    "If an instruction loads from or stores to a resource (including
2865     *    atomics and image instructions) and the resource descriptor being
2866     *    accessed is not dynamically uniform, then the operand corresponding
2867     *    to that resource (e.g. the pointer or sampled image operand) must be
2868     *    decorated with NonUniform."
2869     *
2870     * It's very careful to specify that the exact operand must be decorated
2871     * NonUniform.  The SPIR-V parser is not expected to chase through long
2872     * chains to find the NonUniform decoration.  It's either right there or we
2873     * can assume it doesn't exist.
2874     */
2875    enum gl_access_qualifier access = 0;
2876    vtn_foreach_decoration(b, sampled_val, non_uniform_decoration_cb, &access);
2877 
2878    if (sampled_val->propagated_non_uniform)
2879       access |= ACCESS_NON_UNIFORM;
2880 
2881    if (image && (access & ACCESS_NON_UNIFORM))
2882       instr->texture_non_uniform = true;
2883 
2884    if (sampler && (access & ACCESS_NON_UNIFORM))
2885       instr->sampler_non_uniform = true;
2886 
2887    /* for non-query ops, get dest_type from SPIR-V return type */
2888    if (dest_type == nir_type_invalid) {
2889       /* the return type should match the image type, unless the image type is
2890        * VOID (CL image), in which case the return type dictates the sampler
2891        */
2892       enum glsl_base_type sampler_base =
2893          glsl_get_sampler_result_type(image->type);
2894       enum glsl_base_type ret_base = glsl_get_base_type(ret_type->type);
2895       vtn_fail_if(sampler_base != ret_base && sampler_base != GLSL_TYPE_VOID,
2896                   "SPIR-V return type mismatches image type. This is only valid "
2897                   "for untyped images (OpenCL).");
2898       switch (ret_base) {
2899       case GLSL_TYPE_FLOAT:   dest_type = nir_type_float;   break;
2900       case GLSL_TYPE_INT:     dest_type = nir_type_int;     break;
2901       case GLSL_TYPE_UINT:    dest_type = nir_type_uint;    break;
2902       case GLSL_TYPE_BOOL:    dest_type = nir_type_bool;    break;
2903       default:
2904          vtn_fail("Invalid base type for sampler result");
2905       }
2906    }
2907 
2908    instr->dest_type = dest_type;
2909 
2910    nir_ssa_dest_init(&instr->instr, &instr->dest,
2911                      nir_tex_instr_dest_size(instr), 32, NULL);
2912 
2913    vtn_assert(glsl_get_vector_elements(ret_type->type) ==
2914               nir_tex_instr_dest_size(instr));
2915 
2916    if (gather_offsets) {
2917       vtn_fail_if(gather_offsets->type->base_type != vtn_base_type_array ||
2918                   gather_offsets->type->length != 4,
2919                   "ConstOffsets must be an array of size four of vectors "
2920                   "of two integer components");
2921 
2922       struct vtn_type *vec_type = gather_offsets->type->array_element;
2923       vtn_fail_if(vec_type->base_type != vtn_base_type_vector ||
2924                   vec_type->length != 2 ||
2925                   !glsl_type_is_integer(vec_type->type),
2926                   "ConstOffsets must be an array of size four of vectors "
2927                   "of two integer components");
2928 
2929       unsigned bit_size = glsl_get_bit_size(vec_type->type);
2930       for (uint32_t i = 0; i < 4; i++) {
2931          const nir_const_value *cvec =
2932             gather_offsets->constant->elements[i]->values;
2933          for (uint32_t j = 0; j < 2; j++) {
2934             switch (bit_size) {
2935             case 8:  instr->tg4_offsets[i][j] = cvec[j].i8;    break;
2936             case 16: instr->tg4_offsets[i][j] = cvec[j].i16;   break;
2937             case 32: instr->tg4_offsets[i][j] = cvec[j].i32;   break;
2938             case 64: instr->tg4_offsets[i][j] = cvec[j].i64;   break;
2939             default:
2940                vtn_fail("Unsupported bit size: %u", bit_size);
2941             }
2942          }
2943       }
2944    }
2945 
2946    nir_builder_instr_insert(&b->nb, &instr->instr);
2947 
2948    vtn_push_nir_ssa(b, w[2], &instr->dest.ssa);
2949 }
2950 
2951 static void
fill_common_atomic_sources(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,nir_src * src)2952 fill_common_atomic_sources(struct vtn_builder *b, SpvOp opcode,
2953                            const uint32_t *w, nir_src *src)
2954 {
2955    const struct glsl_type *type = vtn_get_type(b, w[1])->type;
2956    unsigned bit_size = glsl_get_bit_size(type);
2957 
2958    switch (opcode) {
2959    case SpvOpAtomicIIncrement:
2960       src[0] = nir_src_for_ssa(nir_imm_intN_t(&b->nb, 1, bit_size));
2961       break;
2962 
2963    case SpvOpAtomicIDecrement:
2964       src[0] = nir_src_for_ssa(nir_imm_intN_t(&b->nb, -1, bit_size));
2965       break;
2966 
2967    case SpvOpAtomicISub:
2968       src[0] =
2969          nir_src_for_ssa(nir_ineg(&b->nb, vtn_get_nir_ssa(b, w[6])));
2970       break;
2971 
2972    case SpvOpAtomicCompareExchange:
2973    case SpvOpAtomicCompareExchangeWeak:
2974       src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[8]));
2975       src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[7]));
2976       break;
2977 
2978    case SpvOpAtomicExchange:
2979    case SpvOpAtomicIAdd:
2980    case SpvOpAtomicSMin:
2981    case SpvOpAtomicUMin:
2982    case SpvOpAtomicSMax:
2983    case SpvOpAtomicUMax:
2984    case SpvOpAtomicAnd:
2985    case SpvOpAtomicOr:
2986    case SpvOpAtomicXor:
2987    case SpvOpAtomicFAddEXT:
2988       src[0] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[6]));
2989       break;
2990 
2991    default:
2992       vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
2993    }
2994 }
2995 
2996 static nir_ssa_def *
get_image_coord(struct vtn_builder * b,uint32_t value)2997 get_image_coord(struct vtn_builder *b, uint32_t value)
2998 {
2999    nir_ssa_def *coord = vtn_get_nir_ssa(b, value);
3000 
3001    /* The image_load_store intrinsics assume a 4-dim coordinate */
3002    unsigned swizzle[4];
3003    for (unsigned i = 0; i < 4; i++)
3004       swizzle[i] = MIN2(i, coord->num_components - 1);
3005 
3006    return nir_swizzle(&b->nb, coord, swizzle, 4);
3007 }
3008 
3009 static nir_ssa_def *
expand_to_vec4(nir_builder * b,nir_ssa_def * value)3010 expand_to_vec4(nir_builder *b, nir_ssa_def *value)
3011 {
3012    if (value->num_components == 4)
3013       return value;
3014 
3015    unsigned swiz[4];
3016    for (unsigned i = 0; i < 4; i++)
3017       swiz[i] = i < value->num_components ? i : 0;
3018    return nir_swizzle(b, value, swiz, 4);
3019 }
3020 
3021 static void
vtn_handle_image(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)3022 vtn_handle_image(struct vtn_builder *b, SpvOp opcode,
3023                  const uint32_t *w, unsigned count)
3024 {
3025    /* Just get this one out of the way */
3026    if (opcode == SpvOpImageTexelPointer) {
3027       struct vtn_value *val =
3028          vtn_push_value(b, w[2], vtn_value_type_image_pointer);
3029       val->image = ralloc(b, struct vtn_image_pointer);
3030 
3031       val->image->image = vtn_nir_deref(b, w[3]);
3032       val->image->coord = get_image_coord(b, w[4]);
3033       val->image->sample = vtn_get_nir_ssa(b, w[5]);
3034       val->image->lod = nir_imm_int(&b->nb, 0);
3035       return;
3036    }
3037 
3038    struct vtn_image_pointer image;
3039    SpvScope scope = SpvScopeInvocation;
3040    SpvMemorySemanticsMask semantics = 0;
3041 
3042    enum gl_access_qualifier access = 0;
3043 
3044    struct vtn_value *res_val;
3045    switch (opcode) {
3046    case SpvOpAtomicExchange:
3047    case SpvOpAtomicCompareExchange:
3048    case SpvOpAtomicCompareExchangeWeak:
3049    case SpvOpAtomicIIncrement:
3050    case SpvOpAtomicIDecrement:
3051    case SpvOpAtomicIAdd:
3052    case SpvOpAtomicISub:
3053    case SpvOpAtomicLoad:
3054    case SpvOpAtomicSMin:
3055    case SpvOpAtomicUMin:
3056    case SpvOpAtomicSMax:
3057    case SpvOpAtomicUMax:
3058    case SpvOpAtomicAnd:
3059    case SpvOpAtomicOr:
3060    case SpvOpAtomicXor:
3061    case SpvOpAtomicFAddEXT:
3062       res_val = vtn_value(b, w[3], vtn_value_type_image_pointer);
3063       image = *res_val->image;
3064       scope = vtn_constant_uint(b, w[4]);
3065       semantics = vtn_constant_uint(b, w[5]);
3066       access |= ACCESS_COHERENT;
3067       break;
3068 
3069    case SpvOpAtomicStore:
3070       res_val = vtn_value(b, w[1], vtn_value_type_image_pointer);
3071       image = *res_val->image;
3072       scope = vtn_constant_uint(b, w[2]);
3073       semantics = vtn_constant_uint(b, w[3]);
3074       access |= ACCESS_COHERENT;
3075       break;
3076 
3077    case SpvOpImageQuerySizeLod:
3078       res_val = vtn_untyped_value(b, w[3]);
3079       image.image = vtn_get_image(b, w[3], &access);
3080       image.coord = NULL;
3081       image.sample = NULL;
3082       image.lod = vtn_ssa_value(b, w[4])->def;
3083       break;
3084 
3085    case SpvOpImageQuerySize:
3086       res_val = vtn_untyped_value(b, w[3]);
3087       image.image = vtn_get_image(b, w[3], &access);
3088       image.coord = NULL;
3089       image.sample = NULL;
3090       image.lod = NULL;
3091       break;
3092 
3093    case SpvOpImageQueryFormat:
3094    case SpvOpImageQueryOrder:
3095       res_val = vtn_untyped_value(b, w[3]);
3096       image.image = vtn_get_image(b, w[3], &access);
3097       image.coord = NULL;
3098       image.sample = NULL;
3099       image.lod = NULL;
3100       break;
3101 
3102    case SpvOpImageRead: {
3103       res_val = vtn_untyped_value(b, w[3]);
3104       image.image = vtn_get_image(b, w[3], &access);
3105       image.coord = get_image_coord(b, w[4]);
3106 
3107       const SpvImageOperandsMask operands =
3108          count > 5 ? w[5] : SpvImageOperandsMaskNone;
3109 
3110       if (operands & SpvImageOperandsSampleMask) {
3111          uint32_t arg = image_operand_arg(b, w, count, 5,
3112                                           SpvImageOperandsSampleMask);
3113          image.sample = vtn_get_nir_ssa(b, w[arg]);
3114       } else {
3115          image.sample = nir_ssa_undef(&b->nb, 1, 32);
3116       }
3117 
3118       if (operands & SpvImageOperandsMakeTexelVisibleMask) {
3119          vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
3120                      "MakeTexelVisible requires NonPrivateTexel to also be set.");
3121          uint32_t arg = image_operand_arg(b, w, count, 5,
3122                                           SpvImageOperandsMakeTexelVisibleMask);
3123          semantics = SpvMemorySemanticsMakeVisibleMask;
3124          scope = vtn_constant_uint(b, w[arg]);
3125       }
3126 
3127       if (operands & SpvImageOperandsLodMask) {
3128          uint32_t arg = image_operand_arg(b, w, count, 5,
3129                                           SpvImageOperandsLodMask);
3130          image.lod = vtn_get_nir_ssa(b, w[arg]);
3131       } else {
3132          image.lod = nir_imm_int(&b->nb, 0);
3133       }
3134 
3135       if (operands & SpvImageOperandsVolatileTexelMask)
3136          access |= ACCESS_VOLATILE;
3137 
3138       break;
3139    }
3140 
3141    case SpvOpImageWrite: {
3142       res_val = vtn_untyped_value(b, w[1]);
3143       image.image = vtn_get_image(b, w[1], &access);
3144       image.coord = get_image_coord(b, w[2]);
3145 
3146       /* texel = w[3] */
3147 
3148       const SpvImageOperandsMask operands =
3149          count > 4 ? w[4] : SpvImageOperandsMaskNone;
3150 
3151       if (operands & SpvImageOperandsSampleMask) {
3152          uint32_t arg = image_operand_arg(b, w, count, 4,
3153                                           SpvImageOperandsSampleMask);
3154          image.sample = vtn_get_nir_ssa(b, w[arg]);
3155       } else {
3156          image.sample = nir_ssa_undef(&b->nb, 1, 32);
3157       }
3158 
3159       if (operands & SpvImageOperandsMakeTexelAvailableMask) {
3160          vtn_fail_if((operands & SpvImageOperandsNonPrivateTexelMask) == 0,
3161                      "MakeTexelAvailable requires NonPrivateTexel to also be set.");
3162          uint32_t arg = image_operand_arg(b, w, count, 4,
3163                                           SpvImageOperandsMakeTexelAvailableMask);
3164          semantics = SpvMemorySemanticsMakeAvailableMask;
3165          scope = vtn_constant_uint(b, w[arg]);
3166       }
3167 
3168       if (operands & SpvImageOperandsLodMask) {
3169          uint32_t arg = image_operand_arg(b, w, count, 4,
3170                                           SpvImageOperandsLodMask);
3171          image.lod = vtn_get_nir_ssa(b, w[arg]);
3172       } else {
3173          image.lod = nir_imm_int(&b->nb, 0);
3174       }
3175 
3176       if (operands & SpvImageOperandsVolatileTexelMask)
3177          access |= ACCESS_VOLATILE;
3178 
3179       break;
3180    }
3181 
3182    default:
3183       vtn_fail_with_opcode("Invalid image opcode", opcode);
3184    }
3185 
3186    if (semantics & SpvMemorySemanticsVolatileMask)
3187       access |= ACCESS_VOLATILE;
3188 
3189    nir_intrinsic_op op;
3190    switch (opcode) {
3191 #define OP(S, N) case SpvOp##S: op = nir_intrinsic_image_deref_##N; break;
3192    OP(ImageQuerySize,            size)
3193    OP(ImageQuerySizeLod,         size)
3194    OP(ImageRead,                 load)
3195    OP(ImageWrite,                store)
3196    OP(AtomicLoad,                load)
3197    OP(AtomicStore,               store)
3198    OP(AtomicExchange,            atomic_exchange)
3199    OP(AtomicCompareExchange,     atomic_comp_swap)
3200    OP(AtomicCompareExchangeWeak, atomic_comp_swap)
3201    OP(AtomicIIncrement,          atomic_add)
3202    OP(AtomicIDecrement,          atomic_add)
3203    OP(AtomicIAdd,                atomic_add)
3204    OP(AtomicISub,                atomic_add)
3205    OP(AtomicSMin,                atomic_imin)
3206    OP(AtomicUMin,                atomic_umin)
3207    OP(AtomicSMax,                atomic_imax)
3208    OP(AtomicUMax,                atomic_umax)
3209    OP(AtomicAnd,                 atomic_and)
3210    OP(AtomicOr,                  atomic_or)
3211    OP(AtomicXor,                 atomic_xor)
3212    OP(AtomicFAddEXT,             atomic_fadd)
3213    OP(ImageQueryFormat,          format)
3214    OP(ImageQueryOrder,           order)
3215 #undef OP
3216    default:
3217       vtn_fail_with_opcode("Invalid image opcode", opcode);
3218    }
3219 
3220    nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
3221 
3222    intrin->src[0] = nir_src_for_ssa(&image.image->dest.ssa);
3223 
3224    switch (opcode) {
3225    case SpvOpImageQuerySize:
3226    case SpvOpImageQuerySizeLod:
3227    case SpvOpImageQueryFormat:
3228    case SpvOpImageQueryOrder:
3229       break;
3230    default:
3231       /* The image coordinate is always 4 components but we may not have that
3232        * many.  Swizzle to compensate.
3233        */
3234       intrin->src[1] = nir_src_for_ssa(expand_to_vec4(&b->nb, image.coord));
3235       intrin->src[2] = nir_src_for_ssa(image.sample);
3236       break;
3237    }
3238 
3239    /* The Vulkan spec says:
3240     *
3241     *    "If an instruction loads from or stores to a resource (including
3242     *    atomics and image instructions) and the resource descriptor being
3243     *    accessed is not dynamically uniform, then the operand corresponding
3244     *    to that resource (e.g. the pointer or sampled image operand) must be
3245     *    decorated with NonUniform."
3246     *
3247     * It's very careful to specify that the exact operand must be decorated
3248     * NonUniform.  The SPIR-V parser is not expected to chase through long
3249     * chains to find the NonUniform decoration.  It's either right there or we
3250     * can assume it doesn't exist.
3251     */
3252    vtn_foreach_decoration(b, res_val, non_uniform_decoration_cb, &access);
3253    nir_intrinsic_set_access(intrin, access);
3254 
3255    switch (opcode) {
3256    case SpvOpImageQueryFormat:
3257    case SpvOpImageQueryOrder:
3258       /* No additional sources */
3259       break;
3260    case SpvOpImageQuerySize:
3261       intrin->src[1] = nir_src_for_ssa(nir_imm_int(&b->nb, 0));
3262       break;
3263    case SpvOpImageQuerySizeLod:
3264       intrin->src[1] = nir_src_for_ssa(image.lod);
3265       break;
3266    case SpvOpAtomicLoad:
3267    case SpvOpImageRead:
3268       /* Only OpImageRead can support a lod parameter if
3269       * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3270       * intrinsics definition for atomics requires us to set it for
3271       * OpAtomicLoad.
3272       */
3273       intrin->src[3] = nir_src_for_ssa(image.lod);
3274       break;
3275    case SpvOpAtomicStore:
3276    case SpvOpImageWrite: {
3277       const uint32_t value_id = opcode == SpvOpAtomicStore ? w[4] : w[3];
3278       struct vtn_ssa_value *value = vtn_ssa_value(b, value_id);
3279       /* nir_intrinsic_image_deref_store always takes a vec4 value */
3280       assert(op == nir_intrinsic_image_deref_store);
3281       intrin->num_components = 4;
3282       intrin->src[3] = nir_src_for_ssa(expand_to_vec4(&b->nb, value->def));
3283       /* Only OpImageWrite can support a lod parameter if
3284        * SPV_AMD_shader_image_load_store_lod is used but the current NIR
3285        * intrinsics definition for atomics requires us to set it for
3286        * OpAtomicStore.
3287        */
3288       intrin->src[4] = nir_src_for_ssa(image.lod);
3289 
3290       if (opcode == SpvOpImageWrite)
3291          nir_intrinsic_set_src_type(intrin, nir_get_nir_type_for_glsl_type(value->type));
3292       break;
3293    }
3294 
3295    case SpvOpAtomicCompareExchange:
3296    case SpvOpAtomicCompareExchangeWeak:
3297    case SpvOpAtomicIIncrement:
3298    case SpvOpAtomicIDecrement:
3299    case SpvOpAtomicExchange:
3300    case SpvOpAtomicIAdd:
3301    case SpvOpAtomicISub:
3302    case SpvOpAtomicSMin:
3303    case SpvOpAtomicUMin:
3304    case SpvOpAtomicSMax:
3305    case SpvOpAtomicUMax:
3306    case SpvOpAtomicAnd:
3307    case SpvOpAtomicOr:
3308    case SpvOpAtomicXor:
3309    case SpvOpAtomicFAddEXT:
3310       fill_common_atomic_sources(b, opcode, w, &intrin->src[3]);
3311       break;
3312 
3313    default:
3314       vtn_fail_with_opcode("Invalid image opcode", opcode);
3315    }
3316 
3317    /* Image operations implicitly have the Image storage memory semantics. */
3318    semantics |= SpvMemorySemanticsImageMemoryMask;
3319 
3320    SpvMemorySemanticsMask before_semantics;
3321    SpvMemorySemanticsMask after_semantics;
3322    vtn_split_barrier_semantics(b, semantics, &before_semantics, &after_semantics);
3323 
3324    if (before_semantics)
3325       vtn_emit_memory_barrier(b, scope, before_semantics);
3326 
3327    if (opcode != SpvOpImageWrite && opcode != SpvOpAtomicStore) {
3328       struct vtn_type *type = vtn_get_type(b, w[1]);
3329 
3330       unsigned dest_components = glsl_get_vector_elements(type->type);
3331       if (nir_intrinsic_infos[op].dest_components == 0)
3332          intrin->num_components = dest_components;
3333 
3334       nir_ssa_dest_init(&intrin->instr, &intrin->dest,
3335                         nir_intrinsic_dest_components(intrin),
3336                         glsl_get_bit_size(type->type), NULL);
3337 
3338       nir_builder_instr_insert(&b->nb, &intrin->instr);
3339 
3340       nir_ssa_def *result = &intrin->dest.ssa;
3341       if (nir_intrinsic_dest_components(intrin) != dest_components)
3342          result = nir_channels(&b->nb, result, (1 << dest_components) - 1);
3343 
3344       vtn_push_nir_ssa(b, w[2], result);
3345 
3346       if (opcode == SpvOpImageRead)
3347          nir_intrinsic_set_dest_type(intrin, nir_get_nir_type_for_glsl_type(type->type));
3348    } else {
3349       nir_builder_instr_insert(&b->nb, &intrin->instr);
3350    }
3351 
3352    if (after_semantics)
3353       vtn_emit_memory_barrier(b, scope, after_semantics);
3354 }
3355 
3356 static nir_intrinsic_op
get_uniform_nir_atomic_op(struct vtn_builder * b,SpvOp opcode)3357 get_uniform_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
3358 {
3359    switch (opcode) {
3360 #define OP(S, N) case SpvOp##S: return nir_intrinsic_atomic_counter_ ##N;
3361    OP(AtomicLoad,                read_deref)
3362    OP(AtomicExchange,            exchange)
3363    OP(AtomicCompareExchange,     comp_swap)
3364    OP(AtomicCompareExchangeWeak, comp_swap)
3365    OP(AtomicIIncrement,          inc_deref)
3366    OP(AtomicIDecrement,          post_dec_deref)
3367    OP(AtomicIAdd,                add_deref)
3368    OP(AtomicISub,                add_deref)
3369    OP(AtomicUMin,                min_deref)
3370    OP(AtomicUMax,                max_deref)
3371    OP(AtomicAnd,                 and_deref)
3372    OP(AtomicOr,                  or_deref)
3373    OP(AtomicXor,                 xor_deref)
3374 #undef OP
3375    default:
3376       /* We left the following out: AtomicStore, AtomicSMin and
3377        * AtomicSmax. Right now there are not nir intrinsics for them. At this
3378        * moment Atomic Counter support is needed for ARB_spirv support, so is
3379        * only need to support GLSL Atomic Counters that are uints and don't
3380        * allow direct storage.
3381        */
3382       vtn_fail("Invalid uniform atomic");
3383    }
3384 }
3385 
3386 static nir_intrinsic_op
get_deref_nir_atomic_op(struct vtn_builder * b,SpvOp opcode)3387 get_deref_nir_atomic_op(struct vtn_builder *b, SpvOp opcode)
3388 {
3389    switch (opcode) {
3390    case SpvOpAtomicLoad:         return nir_intrinsic_load_deref;
3391    case SpvOpAtomicStore:        return nir_intrinsic_store_deref;
3392 #define OP(S, N) case SpvOp##S: return nir_intrinsic_deref_##N;
3393    OP(AtomicExchange,            atomic_exchange)
3394    OP(AtomicCompareExchange,     atomic_comp_swap)
3395    OP(AtomicCompareExchangeWeak, atomic_comp_swap)
3396    OP(AtomicIIncrement,          atomic_add)
3397    OP(AtomicIDecrement,          atomic_add)
3398    OP(AtomicIAdd,                atomic_add)
3399    OP(AtomicISub,                atomic_add)
3400    OP(AtomicSMin,                atomic_imin)
3401    OP(AtomicUMin,                atomic_umin)
3402    OP(AtomicSMax,                atomic_imax)
3403    OP(AtomicUMax,                atomic_umax)
3404    OP(AtomicAnd,                 atomic_and)
3405    OP(AtomicOr,                  atomic_or)
3406    OP(AtomicXor,                 atomic_xor)
3407    OP(AtomicFAddEXT,             atomic_fadd)
3408 #undef OP
3409    default:
3410       vtn_fail_with_opcode("Invalid shared atomic", opcode);
3411    }
3412 }
3413 
3414 /*
3415  * Handles shared atomics, ssbo atomics and atomic counters.
3416  */
3417 static void
vtn_handle_atomics(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,UNUSED unsigned count)3418 vtn_handle_atomics(struct vtn_builder *b, SpvOp opcode,
3419                    const uint32_t *w, UNUSED unsigned count)
3420 {
3421    struct vtn_pointer *ptr;
3422    nir_intrinsic_instr *atomic;
3423 
3424    SpvScope scope = SpvScopeInvocation;
3425    SpvMemorySemanticsMask semantics = 0;
3426    enum gl_access_qualifier access = 0;
3427 
3428    switch (opcode) {
3429    case SpvOpAtomicLoad:
3430    case SpvOpAtomicExchange:
3431    case SpvOpAtomicCompareExchange:
3432    case SpvOpAtomicCompareExchangeWeak:
3433    case SpvOpAtomicIIncrement:
3434    case SpvOpAtomicIDecrement:
3435    case SpvOpAtomicIAdd:
3436    case SpvOpAtomicISub:
3437    case SpvOpAtomicSMin:
3438    case SpvOpAtomicUMin:
3439    case SpvOpAtomicSMax:
3440    case SpvOpAtomicUMax:
3441    case SpvOpAtomicAnd:
3442    case SpvOpAtomicOr:
3443    case SpvOpAtomicXor:
3444    case SpvOpAtomicFAddEXT:
3445       ptr = vtn_value(b, w[3], vtn_value_type_pointer)->pointer;
3446       scope = vtn_constant_uint(b, w[4]);
3447       semantics = vtn_constant_uint(b, w[5]);
3448       break;
3449 
3450    case SpvOpAtomicStore:
3451       ptr = vtn_value(b, w[1], vtn_value_type_pointer)->pointer;
3452       scope = vtn_constant_uint(b, w[2]);
3453       semantics = vtn_constant_uint(b, w[3]);
3454       break;
3455 
3456    default:
3457       vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3458    }
3459 
3460    if (semantics & SpvMemorySemanticsVolatileMask)
3461       access |= ACCESS_VOLATILE;
3462 
3463    /* uniform as "atomic counter uniform" */
3464    if (ptr->mode == vtn_variable_mode_atomic_counter) {
3465       nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
3466       nir_intrinsic_op op = get_uniform_nir_atomic_op(b, opcode);
3467       atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3468       atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
3469 
3470       /* SSBO needs to initialize index/offset. In this case we don't need to,
3471        * as that info is already stored on the ptr->var->var nir_variable (see
3472        * vtn_create_variable)
3473        */
3474 
3475       switch (opcode) {
3476       case SpvOpAtomicLoad:
3477       case SpvOpAtomicExchange:
3478       case SpvOpAtomicCompareExchange:
3479       case SpvOpAtomicCompareExchangeWeak:
3480       case SpvOpAtomicIIncrement:
3481       case SpvOpAtomicIDecrement:
3482       case SpvOpAtomicIAdd:
3483       case SpvOpAtomicISub:
3484       case SpvOpAtomicSMin:
3485       case SpvOpAtomicUMin:
3486       case SpvOpAtomicSMax:
3487       case SpvOpAtomicUMax:
3488       case SpvOpAtomicAnd:
3489       case SpvOpAtomicOr:
3490       case SpvOpAtomicXor:
3491          /* Nothing: we don't need to call fill_common_atomic_sources here, as
3492           * atomic counter uniforms doesn't have sources
3493           */
3494          break;
3495 
3496       default:
3497          unreachable("Invalid SPIR-V atomic");
3498 
3499       }
3500    } else {
3501       nir_deref_instr *deref = vtn_pointer_to_deref(b, ptr);
3502       const struct glsl_type *deref_type = deref->type;
3503       nir_intrinsic_op op = get_deref_nir_atomic_op(b, opcode);
3504       atomic = nir_intrinsic_instr_create(b->nb.shader, op);
3505       atomic->src[0] = nir_src_for_ssa(&deref->dest.ssa);
3506 
3507       if (ptr->mode != vtn_variable_mode_workgroup)
3508          access |= ACCESS_COHERENT;
3509 
3510       nir_intrinsic_set_access(atomic, access);
3511 
3512       switch (opcode) {
3513       case SpvOpAtomicLoad:
3514          atomic->num_components = glsl_get_vector_elements(deref_type);
3515          break;
3516 
3517       case SpvOpAtomicStore:
3518          atomic->num_components = glsl_get_vector_elements(deref_type);
3519          nir_intrinsic_set_write_mask(atomic, (1 << atomic->num_components) - 1);
3520          atomic->src[1] = nir_src_for_ssa(vtn_get_nir_ssa(b, w[4]));
3521          break;
3522 
3523       case SpvOpAtomicExchange:
3524       case SpvOpAtomicCompareExchange:
3525       case SpvOpAtomicCompareExchangeWeak:
3526       case SpvOpAtomicIIncrement:
3527       case SpvOpAtomicIDecrement:
3528       case SpvOpAtomicIAdd:
3529       case SpvOpAtomicISub:
3530       case SpvOpAtomicSMin:
3531       case SpvOpAtomicUMin:
3532       case SpvOpAtomicSMax:
3533       case SpvOpAtomicUMax:
3534       case SpvOpAtomicAnd:
3535       case SpvOpAtomicOr:
3536       case SpvOpAtomicXor:
3537       case SpvOpAtomicFAddEXT:
3538          fill_common_atomic_sources(b, opcode, w, &atomic->src[1]);
3539          break;
3540 
3541       default:
3542          vtn_fail_with_opcode("Invalid SPIR-V atomic", opcode);
3543       }
3544    }
3545 
3546    /* Atomic ordering operations will implicitly apply to the atomic operation
3547     * storage class, so include that too.
3548     */
3549    semantics |= vtn_mode_to_memory_semantics(ptr->mode);
3550 
3551    SpvMemorySemanticsMask before_semantics;
3552    SpvMemorySemanticsMask after_semantics;
3553    vtn_split_barrier_semantics(b, semantics, &before_semantics, &after_semantics);
3554 
3555    if (before_semantics)
3556       vtn_emit_memory_barrier(b, scope, before_semantics);
3557 
3558    if (opcode != SpvOpAtomicStore) {
3559       struct vtn_type *type = vtn_get_type(b, w[1]);
3560 
3561       nir_ssa_dest_init(&atomic->instr, &atomic->dest,
3562                         glsl_get_vector_elements(type->type),
3563                         glsl_get_bit_size(type->type), NULL);
3564 
3565       vtn_push_nir_ssa(b, w[2], &atomic->dest.ssa);
3566    }
3567 
3568    nir_builder_instr_insert(&b->nb, &atomic->instr);
3569 
3570    if (after_semantics)
3571       vtn_emit_memory_barrier(b, scope, after_semantics);
3572 }
3573 
3574 static nir_alu_instr *
create_vec(struct vtn_builder * b,unsigned num_components,unsigned bit_size)3575 create_vec(struct vtn_builder *b, unsigned num_components, unsigned bit_size)
3576 {
3577    nir_op op = nir_op_vec(num_components);
3578    nir_alu_instr *vec = nir_alu_instr_create(b->shader, op);
3579    nir_ssa_dest_init(&vec->instr, &vec->dest.dest, num_components,
3580                      bit_size, NULL);
3581    vec->dest.write_mask = (1 << num_components) - 1;
3582 
3583    return vec;
3584 }
3585 
3586 struct vtn_ssa_value *
vtn_ssa_transpose(struct vtn_builder * b,struct vtn_ssa_value * src)3587 vtn_ssa_transpose(struct vtn_builder *b, struct vtn_ssa_value *src)
3588 {
3589    if (src->transposed)
3590       return src->transposed;
3591 
3592    struct vtn_ssa_value *dest =
3593       vtn_create_ssa_value(b, glsl_transposed_type(src->type));
3594 
3595    for (unsigned i = 0; i < glsl_get_matrix_columns(dest->type); i++) {
3596       nir_alu_instr *vec = create_vec(b, glsl_get_matrix_columns(src->type),
3597                                          glsl_get_bit_size(src->type));
3598       if (glsl_type_is_vector_or_scalar(src->type)) {
3599           vec->src[0].src = nir_src_for_ssa(src->def);
3600           vec->src[0].swizzle[0] = i;
3601       } else {
3602          for (unsigned j = 0; j < glsl_get_matrix_columns(src->type); j++) {
3603             vec->src[j].src = nir_src_for_ssa(src->elems[j]->def);
3604             vec->src[j].swizzle[0] = i;
3605          }
3606       }
3607       nir_builder_instr_insert(&b->nb, &vec->instr);
3608       dest->elems[i]->def = &vec->dest.dest.ssa;
3609    }
3610 
3611    dest->transposed = src;
3612 
3613    return dest;
3614 }
3615 
3616 static nir_ssa_def *
vtn_vector_shuffle(struct vtn_builder * b,unsigned num_components,nir_ssa_def * src0,nir_ssa_def * src1,const uint32_t * indices)3617 vtn_vector_shuffle(struct vtn_builder *b, unsigned num_components,
3618                    nir_ssa_def *src0, nir_ssa_def *src1,
3619                    const uint32_t *indices)
3620 {
3621    nir_alu_instr *vec = create_vec(b, num_components, src0->bit_size);
3622 
3623    for (unsigned i = 0; i < num_components; i++) {
3624       uint32_t index = indices[i];
3625       if (index == 0xffffffff) {
3626          vec->src[i].src =
3627             nir_src_for_ssa(nir_ssa_undef(&b->nb, 1, src0->bit_size));
3628       } else if (index < src0->num_components) {
3629          vec->src[i].src = nir_src_for_ssa(src0);
3630          vec->src[i].swizzle[0] = index;
3631       } else {
3632          vec->src[i].src = nir_src_for_ssa(src1);
3633          vec->src[i].swizzle[0] = index - src0->num_components;
3634       }
3635    }
3636 
3637    nir_builder_instr_insert(&b->nb, &vec->instr);
3638 
3639    return &vec->dest.dest.ssa;
3640 }
3641 
3642 /*
3643  * Concatentates a number of vectors/scalars together to produce a vector
3644  */
3645 static nir_ssa_def *
vtn_vector_construct(struct vtn_builder * b,unsigned num_components,unsigned num_srcs,nir_ssa_def ** srcs)3646 vtn_vector_construct(struct vtn_builder *b, unsigned num_components,
3647                      unsigned num_srcs, nir_ssa_def **srcs)
3648 {
3649    nir_alu_instr *vec = create_vec(b, num_components, srcs[0]->bit_size);
3650 
3651    /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3652     *
3653     *    "When constructing a vector, there must be at least two Constituent
3654     *    operands."
3655     */
3656    vtn_assert(num_srcs >= 2);
3657 
3658    unsigned dest_idx = 0;
3659    for (unsigned i = 0; i < num_srcs; i++) {
3660       nir_ssa_def *src = srcs[i];
3661       vtn_assert(dest_idx + src->num_components <= num_components);
3662       for (unsigned j = 0; j < src->num_components; j++) {
3663          vec->src[dest_idx].src = nir_src_for_ssa(src);
3664          vec->src[dest_idx].swizzle[0] = j;
3665          dest_idx++;
3666       }
3667    }
3668 
3669    /* From the SPIR-V 1.1 spec for OpCompositeConstruct:
3670     *
3671     *    "When constructing a vector, the total number of components in all
3672     *    the operands must equal the number of components in Result Type."
3673     */
3674    vtn_assert(dest_idx == num_components);
3675 
3676    nir_builder_instr_insert(&b->nb, &vec->instr);
3677 
3678    return &vec->dest.dest.ssa;
3679 }
3680 
3681 static struct vtn_ssa_value *
vtn_composite_copy(void * mem_ctx,struct vtn_ssa_value * src)3682 vtn_composite_copy(void *mem_ctx, struct vtn_ssa_value *src)
3683 {
3684    struct vtn_ssa_value *dest = rzalloc(mem_ctx, struct vtn_ssa_value);
3685    dest->type = src->type;
3686 
3687    if (glsl_type_is_vector_or_scalar(src->type)) {
3688       dest->def = src->def;
3689    } else {
3690       unsigned elems = glsl_get_length(src->type);
3691 
3692       dest->elems = ralloc_array(mem_ctx, struct vtn_ssa_value *, elems);
3693       for (unsigned i = 0; i < elems; i++)
3694          dest->elems[i] = vtn_composite_copy(mem_ctx, src->elems[i]);
3695    }
3696 
3697    return dest;
3698 }
3699 
3700 static struct vtn_ssa_value *
vtn_composite_insert(struct vtn_builder * b,struct vtn_ssa_value * src,struct vtn_ssa_value * insert,const uint32_t * indices,unsigned num_indices)3701 vtn_composite_insert(struct vtn_builder *b, struct vtn_ssa_value *src,
3702                      struct vtn_ssa_value *insert, const uint32_t *indices,
3703                      unsigned num_indices)
3704 {
3705    struct vtn_ssa_value *dest = vtn_composite_copy(b, src);
3706 
3707    struct vtn_ssa_value *cur = dest;
3708    unsigned i;
3709    for (i = 0; i < num_indices - 1; i++) {
3710       /* If we got a vector here, that means the next index will be trying to
3711        * dereference a scalar.
3712        */
3713       vtn_fail_if(glsl_type_is_vector_or_scalar(cur->type),
3714                   "OpCompositeInsert has too many indices.");
3715       vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
3716                   "All indices in an OpCompositeInsert must be in-bounds");
3717       cur = cur->elems[indices[i]];
3718    }
3719 
3720    if (glsl_type_is_vector_or_scalar(cur->type)) {
3721       vtn_fail_if(indices[i] >= glsl_get_vector_elements(cur->type),
3722                   "All indices in an OpCompositeInsert must be in-bounds");
3723 
3724       /* According to the SPIR-V spec, OpCompositeInsert may work down to
3725        * the component granularity. In that case, the last index will be
3726        * the index to insert the scalar into the vector.
3727        */
3728 
3729       cur->def = nir_vector_insert_imm(&b->nb, cur->def, insert->def, indices[i]);
3730    } else {
3731       vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
3732                   "All indices in an OpCompositeInsert must be in-bounds");
3733       cur->elems[indices[i]] = insert;
3734    }
3735 
3736    return dest;
3737 }
3738 
3739 static struct vtn_ssa_value *
vtn_composite_extract(struct vtn_builder * b,struct vtn_ssa_value * src,const uint32_t * indices,unsigned num_indices)3740 vtn_composite_extract(struct vtn_builder *b, struct vtn_ssa_value *src,
3741                       const uint32_t *indices, unsigned num_indices)
3742 {
3743    struct vtn_ssa_value *cur = src;
3744    for (unsigned i = 0; i < num_indices; i++) {
3745       if (glsl_type_is_vector_or_scalar(cur->type)) {
3746          vtn_assert(i == num_indices - 1);
3747          vtn_fail_if(indices[i] >= glsl_get_vector_elements(cur->type),
3748                      "All indices in an OpCompositeExtract must be in-bounds");
3749 
3750          /* According to the SPIR-V spec, OpCompositeExtract may work down to
3751           * the component granularity. The last index will be the index of the
3752           * vector to extract.
3753           */
3754 
3755          const struct glsl_type *scalar_type =
3756             glsl_scalar_type(glsl_get_base_type(cur->type));
3757          struct vtn_ssa_value *ret = vtn_create_ssa_value(b, scalar_type);
3758          ret->def = nir_channel(&b->nb, cur->def, indices[i]);
3759          return ret;
3760       } else {
3761          vtn_fail_if(indices[i] >= glsl_get_length(cur->type),
3762                      "All indices in an OpCompositeExtract must be in-bounds");
3763          cur = cur->elems[indices[i]];
3764       }
3765    }
3766 
3767    return cur;
3768 }
3769 
3770 static void
vtn_handle_composite(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)3771 vtn_handle_composite(struct vtn_builder *b, SpvOp opcode,
3772                      const uint32_t *w, unsigned count)
3773 {
3774    struct vtn_type *type = vtn_get_type(b, w[1]);
3775    struct vtn_ssa_value *ssa = vtn_create_ssa_value(b, type->type);
3776 
3777    switch (opcode) {
3778    case SpvOpVectorExtractDynamic:
3779       ssa->def = nir_vector_extract(&b->nb, vtn_get_nir_ssa(b, w[3]),
3780                                     vtn_get_nir_ssa(b, w[4]));
3781       break;
3782 
3783    case SpvOpVectorInsertDynamic:
3784       ssa->def = nir_vector_insert(&b->nb, vtn_get_nir_ssa(b, w[3]),
3785                                    vtn_get_nir_ssa(b, w[4]),
3786                                    vtn_get_nir_ssa(b, w[5]));
3787       break;
3788 
3789    case SpvOpVectorShuffle:
3790       ssa->def = vtn_vector_shuffle(b, glsl_get_vector_elements(type->type),
3791                                     vtn_get_nir_ssa(b, w[3]),
3792                                     vtn_get_nir_ssa(b, w[4]),
3793                                     w + 5);
3794       break;
3795 
3796    case SpvOpCompositeConstruct: {
3797       unsigned elems = count - 3;
3798       assume(elems >= 1);
3799       if (glsl_type_is_vector_or_scalar(type->type)) {
3800          nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS];
3801          for (unsigned i = 0; i < elems; i++)
3802             srcs[i] = vtn_get_nir_ssa(b, w[3 + i]);
3803          ssa->def =
3804             vtn_vector_construct(b, glsl_get_vector_elements(type->type),
3805                                  elems, srcs);
3806       } else {
3807          ssa->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
3808          for (unsigned i = 0; i < elems; i++)
3809             ssa->elems[i] = vtn_ssa_value(b, w[3 + i]);
3810       }
3811       break;
3812    }
3813    case SpvOpCompositeExtract:
3814       ssa = vtn_composite_extract(b, vtn_ssa_value(b, w[3]),
3815                                   w + 4, count - 4);
3816       break;
3817 
3818    case SpvOpCompositeInsert:
3819       ssa = vtn_composite_insert(b, vtn_ssa_value(b, w[4]),
3820                                  vtn_ssa_value(b, w[3]),
3821                                  w + 5, count - 5);
3822       break;
3823 
3824    case SpvOpCopyLogical:
3825       ssa = vtn_composite_copy(b, vtn_ssa_value(b, w[3]));
3826       break;
3827    case SpvOpCopyObject:
3828       vtn_copy_value(b, w[3], w[2]);
3829       return;
3830 
3831    default:
3832       vtn_fail_with_opcode("unknown composite operation", opcode);
3833    }
3834 
3835    vtn_push_ssa_value(b, w[2], ssa);
3836 }
3837 
3838 static void
vtn_emit_barrier(struct vtn_builder * b,nir_intrinsic_op op)3839 vtn_emit_barrier(struct vtn_builder *b, nir_intrinsic_op op)
3840 {
3841    nir_intrinsic_instr *intrin = nir_intrinsic_instr_create(b->shader, op);
3842    nir_builder_instr_insert(&b->nb, &intrin->instr);
3843 }
3844 
3845 void
vtn_emit_memory_barrier(struct vtn_builder * b,SpvScope scope,SpvMemorySemanticsMask semantics)3846 vtn_emit_memory_barrier(struct vtn_builder *b, SpvScope scope,
3847                         SpvMemorySemanticsMask semantics)
3848 {
3849    if (b->shader->options->use_scoped_barrier) {
3850       vtn_emit_scoped_memory_barrier(b, scope, semantics);
3851       return;
3852    }
3853 
3854    static const SpvMemorySemanticsMask all_memory_semantics =
3855       SpvMemorySemanticsUniformMemoryMask |
3856       SpvMemorySemanticsWorkgroupMemoryMask |
3857       SpvMemorySemanticsAtomicCounterMemoryMask |
3858       SpvMemorySemanticsImageMemoryMask |
3859       SpvMemorySemanticsOutputMemoryMask;
3860 
3861    /* If we're not actually doing a memory barrier, bail */
3862    if (!(semantics & all_memory_semantics))
3863       return;
3864 
3865    /* GL and Vulkan don't have these */
3866    vtn_assert(scope != SpvScopeCrossDevice);
3867 
3868    if (scope == SpvScopeSubgroup)
3869       return; /* Nothing to do here */
3870 
3871    if (scope == SpvScopeWorkgroup) {
3872       vtn_emit_barrier(b, nir_intrinsic_group_memory_barrier);
3873       return;
3874    }
3875 
3876    /* There's only two scopes thing left */
3877    vtn_assert(scope == SpvScopeInvocation || scope == SpvScopeDevice);
3878 
3879    /* Map the GLSL memoryBarrier() construct and any barriers with more than one
3880     * semantic to the corresponding NIR one.
3881     */
3882    if (util_bitcount(semantics & all_memory_semantics) > 1) {
3883       vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
3884       if (semantics & SpvMemorySemanticsOutputMemoryMask) {
3885          /* GLSL memoryBarrier() (and the corresponding NIR one) doesn't include
3886           * TCS outputs, so we have to emit it's own intrinsic for that. We
3887           * then need to emit another memory_barrier to prevent moving
3888           * non-output operations to before the tcs_patch barrier.
3889           */
3890          vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
3891          vtn_emit_barrier(b, nir_intrinsic_memory_barrier);
3892       }
3893       return;
3894    }
3895 
3896    /* Issue a more specific barrier */
3897    switch (semantics & all_memory_semantics) {
3898    case SpvMemorySemanticsUniformMemoryMask:
3899       vtn_emit_barrier(b, nir_intrinsic_memory_barrier_buffer);
3900       break;
3901    case SpvMemorySemanticsWorkgroupMemoryMask:
3902       vtn_emit_barrier(b, nir_intrinsic_memory_barrier_shared);
3903       break;
3904    case SpvMemorySemanticsAtomicCounterMemoryMask:
3905       vtn_emit_barrier(b, nir_intrinsic_memory_barrier_atomic_counter);
3906       break;
3907    case SpvMemorySemanticsImageMemoryMask:
3908       vtn_emit_barrier(b, nir_intrinsic_memory_barrier_image);
3909       break;
3910    case SpvMemorySemanticsOutputMemoryMask:
3911       if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL)
3912          vtn_emit_barrier(b, nir_intrinsic_memory_barrier_tcs_patch);
3913       break;
3914    default:
3915       break;
3916    }
3917 }
3918 
3919 static void
vtn_handle_barrier(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,UNUSED unsigned count)3920 vtn_handle_barrier(struct vtn_builder *b, SpvOp opcode,
3921                    const uint32_t *w, UNUSED unsigned count)
3922 {
3923    switch (opcode) {
3924    case SpvOpEmitVertex:
3925    case SpvOpEmitStreamVertex:
3926    case SpvOpEndPrimitive:
3927    case SpvOpEndStreamPrimitive: {
3928       nir_intrinsic_op intrinsic_op;
3929       switch (opcode) {
3930       case SpvOpEmitVertex:
3931       case SpvOpEmitStreamVertex:
3932          intrinsic_op = nir_intrinsic_emit_vertex;
3933          break;
3934       case SpvOpEndPrimitive:
3935       case SpvOpEndStreamPrimitive:
3936          intrinsic_op = nir_intrinsic_end_primitive;
3937          break;
3938       default:
3939          unreachable("Invalid opcode");
3940       }
3941 
3942       nir_intrinsic_instr *intrin =
3943          nir_intrinsic_instr_create(b->shader, intrinsic_op);
3944 
3945       switch (opcode) {
3946       case SpvOpEmitStreamVertex:
3947       case SpvOpEndStreamPrimitive: {
3948          unsigned stream = vtn_constant_uint(b, w[1]);
3949          nir_intrinsic_set_stream_id(intrin, stream);
3950          break;
3951       }
3952 
3953       default:
3954          break;
3955       }
3956 
3957       nir_builder_instr_insert(&b->nb, &intrin->instr);
3958       break;
3959    }
3960 
3961    case SpvOpMemoryBarrier: {
3962       SpvScope scope = vtn_constant_uint(b, w[1]);
3963       SpvMemorySemanticsMask semantics = vtn_constant_uint(b, w[2]);
3964       vtn_emit_memory_barrier(b, scope, semantics);
3965       return;
3966    }
3967 
3968    case SpvOpControlBarrier: {
3969       SpvScope execution_scope = vtn_constant_uint(b, w[1]);
3970       SpvScope memory_scope = vtn_constant_uint(b, w[2]);
3971       SpvMemorySemanticsMask memory_semantics = vtn_constant_uint(b, w[3]);
3972 
3973       /* GLSLang, prior to commit 8297936dd6eb3, emitted OpControlBarrier with
3974        * memory semantics of None for GLSL barrier().
3975        * And before that, prior to c3f1cdfa, emitted the OpControlBarrier with
3976        * Device instead of Workgroup for execution scope.
3977        */
3978       if (b->wa_glslang_cs_barrier &&
3979           b->nb.shader->info.stage == MESA_SHADER_COMPUTE &&
3980           (execution_scope == SpvScopeWorkgroup ||
3981            execution_scope == SpvScopeDevice) &&
3982           memory_semantics == SpvMemorySemanticsMaskNone) {
3983          execution_scope = SpvScopeWorkgroup;
3984          memory_scope = SpvScopeWorkgroup;
3985          memory_semantics = SpvMemorySemanticsAcquireReleaseMask |
3986                             SpvMemorySemanticsWorkgroupMemoryMask;
3987       }
3988 
3989       /* From the SPIR-V spec:
3990        *
3991        *    "When used with the TessellationControl execution model, it also
3992        *    implicitly synchronizes the Output Storage Class: Writes to Output
3993        *    variables performed by any invocation executed prior to a
3994        *    OpControlBarrier will be visible to any other invocation after
3995        *    return from that OpControlBarrier."
3996        */
3997       if (b->nb.shader->info.stage == MESA_SHADER_TESS_CTRL) {
3998          memory_semantics &= ~(SpvMemorySemanticsAcquireMask |
3999                                SpvMemorySemanticsReleaseMask |
4000                                SpvMemorySemanticsAcquireReleaseMask |
4001                                SpvMemorySemanticsSequentiallyConsistentMask);
4002          memory_semantics |= SpvMemorySemanticsAcquireReleaseMask |
4003                              SpvMemorySemanticsOutputMemoryMask;
4004       }
4005 
4006       if (b->shader->options->use_scoped_barrier) {
4007          vtn_emit_scoped_control_barrier(b, execution_scope, memory_scope,
4008                                          memory_semantics);
4009       } else {
4010          vtn_emit_memory_barrier(b, memory_scope, memory_semantics);
4011 
4012          if (execution_scope == SpvScopeWorkgroup)
4013             vtn_emit_barrier(b, nir_intrinsic_control_barrier);
4014       }
4015       break;
4016    }
4017 
4018    default:
4019       unreachable("unknown barrier instruction");
4020    }
4021 }
4022 
4023 static unsigned
gl_primitive_from_spv_execution_mode(struct vtn_builder * b,SpvExecutionMode mode)4024 gl_primitive_from_spv_execution_mode(struct vtn_builder *b,
4025                                      SpvExecutionMode mode)
4026 {
4027    switch (mode) {
4028    case SpvExecutionModeInputPoints:
4029    case SpvExecutionModeOutputPoints:
4030       return 0; /* GL_POINTS */
4031    case SpvExecutionModeInputLines:
4032       return 1; /* GL_LINES */
4033    case SpvExecutionModeInputLinesAdjacency:
4034       return 0x000A; /* GL_LINE_STRIP_ADJACENCY_ARB */
4035    case SpvExecutionModeTriangles:
4036       return 4; /* GL_TRIANGLES */
4037    case SpvExecutionModeInputTrianglesAdjacency:
4038       return 0x000C; /* GL_TRIANGLES_ADJACENCY_ARB */
4039    case SpvExecutionModeQuads:
4040       return 7; /* GL_QUADS */
4041    case SpvExecutionModeIsolines:
4042       return 0x8E7A; /* GL_ISOLINES */
4043    case SpvExecutionModeOutputLineStrip:
4044       return 3; /* GL_LINE_STRIP */
4045    case SpvExecutionModeOutputTriangleStrip:
4046       return 5; /* GL_TRIANGLE_STRIP */
4047    default:
4048       vtn_fail("Invalid primitive type: %s (%u)",
4049                spirv_executionmode_to_string(mode), mode);
4050    }
4051 }
4052 
4053 static unsigned
vertices_in_from_spv_execution_mode(struct vtn_builder * b,SpvExecutionMode mode)4054 vertices_in_from_spv_execution_mode(struct vtn_builder *b,
4055                                     SpvExecutionMode mode)
4056 {
4057    switch (mode) {
4058    case SpvExecutionModeInputPoints:
4059       return 1;
4060    case SpvExecutionModeInputLines:
4061       return 2;
4062    case SpvExecutionModeInputLinesAdjacency:
4063       return 4;
4064    case SpvExecutionModeTriangles:
4065       return 3;
4066    case SpvExecutionModeInputTrianglesAdjacency:
4067       return 6;
4068    default:
4069       vtn_fail("Invalid GS input mode: %s (%u)",
4070                spirv_executionmode_to_string(mode), mode);
4071    }
4072 }
4073 
4074 static gl_shader_stage
stage_for_execution_model(struct vtn_builder * b,SpvExecutionModel model)4075 stage_for_execution_model(struct vtn_builder *b, SpvExecutionModel model)
4076 {
4077    switch (model) {
4078    case SpvExecutionModelVertex:
4079       return MESA_SHADER_VERTEX;
4080    case SpvExecutionModelTessellationControl:
4081       return MESA_SHADER_TESS_CTRL;
4082    case SpvExecutionModelTessellationEvaluation:
4083       return MESA_SHADER_TESS_EVAL;
4084    case SpvExecutionModelGeometry:
4085       return MESA_SHADER_GEOMETRY;
4086    case SpvExecutionModelFragment:
4087       return MESA_SHADER_FRAGMENT;
4088    case SpvExecutionModelGLCompute:
4089       return MESA_SHADER_COMPUTE;
4090    case SpvExecutionModelKernel:
4091       return MESA_SHADER_KERNEL;
4092    case SpvExecutionModelRayGenerationKHR:
4093       return MESA_SHADER_RAYGEN;
4094    case SpvExecutionModelAnyHitKHR:
4095       return MESA_SHADER_ANY_HIT;
4096    case SpvExecutionModelClosestHitKHR:
4097       return MESA_SHADER_CLOSEST_HIT;
4098    case SpvExecutionModelMissKHR:
4099       return MESA_SHADER_MISS;
4100    case SpvExecutionModelIntersectionKHR:
4101       return MESA_SHADER_INTERSECTION;
4102    case SpvExecutionModelCallableKHR:
4103        return MESA_SHADER_CALLABLE;
4104    default:
4105       vtn_fail("Unsupported execution model: %s (%u)",
4106                spirv_executionmodel_to_string(model), model);
4107    }
4108 }
4109 
4110 #define spv_check_supported(name, cap) do {                 \
4111       if (!(b->options && b->options->caps.name))           \
4112          vtn_warn("Unsupported SPIR-V capability: %s (%u)", \
4113                   spirv_capability_to_string(cap), cap);    \
4114    } while(0)
4115 
4116 
4117 void
vtn_handle_entry_point(struct vtn_builder * b,const uint32_t * w,unsigned count)4118 vtn_handle_entry_point(struct vtn_builder *b, const uint32_t *w,
4119                        unsigned count)
4120 {
4121    struct vtn_value *entry_point = &b->values[w[2]];
4122    /* Let this be a name label regardless */
4123    unsigned name_words;
4124    entry_point->name = vtn_string_literal(b, &w[3], count - 3, &name_words);
4125 
4126    if (strcmp(entry_point->name, b->entry_point_name) != 0 ||
4127        stage_for_execution_model(b, w[1]) != b->entry_point_stage)
4128       return;
4129 
4130    vtn_assert(b->entry_point == NULL);
4131    b->entry_point = entry_point;
4132 }
4133 
4134 static bool
vtn_handle_preamble_instruction(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)4135 vtn_handle_preamble_instruction(struct vtn_builder *b, SpvOp opcode,
4136                                 const uint32_t *w, unsigned count)
4137 {
4138    switch (opcode) {
4139    case SpvOpSource: {
4140       const char *lang;
4141       switch (w[1]) {
4142       default:
4143       case SpvSourceLanguageUnknown:      lang = "unknown";    break;
4144       case SpvSourceLanguageESSL:         lang = "ESSL";       break;
4145       case SpvSourceLanguageGLSL:         lang = "GLSL";       break;
4146       case SpvSourceLanguageOpenCL_C:     lang = "OpenCL C";   break;
4147       case SpvSourceLanguageOpenCL_CPP:   lang = "OpenCL C++"; break;
4148       case SpvSourceLanguageHLSL:         lang = "HLSL";       break;
4149       }
4150 
4151       uint32_t version = w[2];
4152 
4153       const char *file =
4154          (count > 3) ? vtn_value(b, w[3], vtn_value_type_string)->str : "";
4155 
4156       vtn_info("Parsing SPIR-V from %s %u source file %s", lang, version, file);
4157 
4158       b->source_lang = w[1];
4159       break;
4160    }
4161 
4162    case SpvOpSourceExtension:
4163    case SpvOpSourceContinued:
4164    case SpvOpExtension:
4165    case SpvOpModuleProcessed:
4166       /* Unhandled, but these are for debug so that's ok. */
4167       break;
4168 
4169    case SpvOpCapability: {
4170       SpvCapability cap = w[1];
4171       switch (cap) {
4172       case SpvCapabilityMatrix:
4173       case SpvCapabilityShader:
4174       case SpvCapabilityGeometry:
4175       case SpvCapabilityGeometryPointSize:
4176       case SpvCapabilityUniformBufferArrayDynamicIndexing:
4177       case SpvCapabilitySampledImageArrayDynamicIndexing:
4178       case SpvCapabilityStorageBufferArrayDynamicIndexing:
4179       case SpvCapabilityStorageImageArrayDynamicIndexing:
4180       case SpvCapabilityImageRect:
4181       case SpvCapabilitySampledRect:
4182       case SpvCapabilitySampled1D:
4183       case SpvCapabilityImage1D:
4184       case SpvCapabilitySampledCubeArray:
4185       case SpvCapabilityImageCubeArray:
4186       case SpvCapabilitySampledBuffer:
4187       case SpvCapabilityImageBuffer:
4188       case SpvCapabilityImageQuery:
4189       case SpvCapabilityDerivativeControl:
4190       case SpvCapabilityInterpolationFunction:
4191       case SpvCapabilityMultiViewport:
4192       case SpvCapabilitySampleRateShading:
4193       case SpvCapabilityClipDistance:
4194       case SpvCapabilityCullDistance:
4195       case SpvCapabilityInputAttachment:
4196       case SpvCapabilityImageGatherExtended:
4197       case SpvCapabilityStorageImageExtendedFormats:
4198       case SpvCapabilityVector16:
4199          break;
4200 
4201       case SpvCapabilityLinkage:
4202       case SpvCapabilitySparseResidency:
4203          vtn_warn("Unsupported SPIR-V capability: %s",
4204                   spirv_capability_to_string(cap));
4205          break;
4206 
4207       case SpvCapabilityMinLod:
4208          spv_check_supported(min_lod, cap);
4209          break;
4210 
4211       case SpvCapabilityAtomicStorage:
4212          spv_check_supported(atomic_storage, cap);
4213          break;
4214 
4215       case SpvCapabilityFloat64:
4216          spv_check_supported(float64, cap);
4217          break;
4218       case SpvCapabilityInt64:
4219          spv_check_supported(int64, cap);
4220          break;
4221       case SpvCapabilityInt16:
4222          spv_check_supported(int16, cap);
4223          break;
4224       case SpvCapabilityInt8:
4225          spv_check_supported(int8, cap);
4226          break;
4227 
4228       case SpvCapabilityTransformFeedback:
4229          spv_check_supported(transform_feedback, cap);
4230          break;
4231 
4232       case SpvCapabilityGeometryStreams:
4233          spv_check_supported(geometry_streams, cap);
4234          break;
4235 
4236       case SpvCapabilityInt64Atomics:
4237          spv_check_supported(int64_atomics, cap);
4238          break;
4239 
4240       case SpvCapabilityStorageImageMultisample:
4241          spv_check_supported(storage_image_ms, cap);
4242          break;
4243 
4244       case SpvCapabilityAddresses:
4245          spv_check_supported(address, cap);
4246          break;
4247 
4248       case SpvCapabilityKernel:
4249       case SpvCapabilityFloat16Buffer:
4250          spv_check_supported(kernel, cap);
4251          break;
4252 
4253       case SpvCapabilityGenericPointer:
4254          spv_check_supported(generic_pointers, cap);
4255          break;
4256 
4257       case SpvCapabilityImageBasic:
4258          spv_check_supported(kernel_image, cap);
4259          break;
4260 
4261       case SpvCapabilityLiteralSampler:
4262          spv_check_supported(literal_sampler, cap);
4263          break;
4264 
4265       case SpvCapabilityImageReadWrite:
4266       case SpvCapabilityImageMipmap:
4267       case SpvCapabilityPipes:
4268       case SpvCapabilityDeviceEnqueue:
4269          vtn_warn("Unsupported OpenCL-style SPIR-V capability: %s",
4270                   spirv_capability_to_string(cap));
4271          break;
4272 
4273       case SpvCapabilityImageMSArray:
4274          spv_check_supported(image_ms_array, cap);
4275          break;
4276 
4277       case SpvCapabilityTessellation:
4278       case SpvCapabilityTessellationPointSize:
4279          spv_check_supported(tessellation, cap);
4280          break;
4281 
4282       case SpvCapabilityDrawParameters:
4283          spv_check_supported(draw_parameters, cap);
4284          break;
4285 
4286       case SpvCapabilityStorageImageReadWithoutFormat:
4287          spv_check_supported(image_read_without_format, cap);
4288          break;
4289 
4290       case SpvCapabilityStorageImageWriteWithoutFormat:
4291          spv_check_supported(image_write_without_format, cap);
4292          break;
4293 
4294       case SpvCapabilityDeviceGroup:
4295          spv_check_supported(device_group, cap);
4296          break;
4297 
4298       case SpvCapabilityMultiView:
4299          spv_check_supported(multiview, cap);
4300          break;
4301 
4302       case SpvCapabilityGroupNonUniform:
4303          spv_check_supported(subgroup_basic, cap);
4304          break;
4305 
4306       case SpvCapabilitySubgroupVoteKHR:
4307       case SpvCapabilityGroupNonUniformVote:
4308          spv_check_supported(subgroup_vote, cap);
4309          break;
4310 
4311       case SpvCapabilitySubgroupBallotKHR:
4312       case SpvCapabilityGroupNonUniformBallot:
4313          spv_check_supported(subgroup_ballot, cap);
4314          break;
4315 
4316       case SpvCapabilityGroupNonUniformShuffle:
4317       case SpvCapabilityGroupNonUniformShuffleRelative:
4318          spv_check_supported(subgroup_shuffle, cap);
4319          break;
4320 
4321       case SpvCapabilityGroupNonUniformQuad:
4322          spv_check_supported(subgroup_quad, cap);
4323          break;
4324 
4325       case SpvCapabilityGroupNonUniformArithmetic:
4326       case SpvCapabilityGroupNonUniformClustered:
4327          spv_check_supported(subgroup_arithmetic, cap);
4328          break;
4329 
4330       case SpvCapabilityGroups:
4331          spv_check_supported(amd_shader_ballot, cap);
4332          break;
4333 
4334       case SpvCapabilityVariablePointersStorageBuffer:
4335       case SpvCapabilityVariablePointers:
4336          spv_check_supported(variable_pointers, cap);
4337          b->variable_pointers = true;
4338          break;
4339 
4340       case SpvCapabilityStorageUniformBufferBlock16:
4341       case SpvCapabilityStorageUniform16:
4342       case SpvCapabilityStoragePushConstant16:
4343       case SpvCapabilityStorageInputOutput16:
4344          spv_check_supported(storage_16bit, cap);
4345          break;
4346 
4347       case SpvCapabilityShaderLayer:
4348       case SpvCapabilityShaderViewportIndex:
4349       case SpvCapabilityShaderViewportIndexLayerEXT:
4350          spv_check_supported(shader_viewport_index_layer, cap);
4351          break;
4352 
4353       case SpvCapabilityStorageBuffer8BitAccess:
4354       case SpvCapabilityUniformAndStorageBuffer8BitAccess:
4355       case SpvCapabilityStoragePushConstant8:
4356          spv_check_supported(storage_8bit, cap);
4357          break;
4358 
4359       case SpvCapabilityShaderNonUniformEXT:
4360          spv_check_supported(descriptor_indexing, cap);
4361          break;
4362 
4363       case SpvCapabilityInputAttachmentArrayDynamicIndexingEXT:
4364       case SpvCapabilityUniformTexelBufferArrayDynamicIndexingEXT:
4365       case SpvCapabilityStorageTexelBufferArrayDynamicIndexingEXT:
4366          spv_check_supported(descriptor_array_dynamic_indexing, cap);
4367          break;
4368 
4369       case SpvCapabilityUniformBufferArrayNonUniformIndexingEXT:
4370       case SpvCapabilitySampledImageArrayNonUniformIndexingEXT:
4371       case SpvCapabilityStorageBufferArrayNonUniformIndexingEXT:
4372       case SpvCapabilityStorageImageArrayNonUniformIndexingEXT:
4373       case SpvCapabilityInputAttachmentArrayNonUniformIndexingEXT:
4374       case SpvCapabilityUniformTexelBufferArrayNonUniformIndexingEXT:
4375       case SpvCapabilityStorageTexelBufferArrayNonUniformIndexingEXT:
4376          spv_check_supported(descriptor_array_non_uniform_indexing, cap);
4377          break;
4378 
4379       case SpvCapabilityRuntimeDescriptorArrayEXT:
4380          spv_check_supported(runtime_descriptor_array, cap);
4381          break;
4382 
4383       case SpvCapabilityStencilExportEXT:
4384          spv_check_supported(stencil_export, cap);
4385          break;
4386 
4387       case SpvCapabilitySampleMaskPostDepthCoverage:
4388          spv_check_supported(post_depth_coverage, cap);
4389          break;
4390 
4391       case SpvCapabilityDenormFlushToZero:
4392       case SpvCapabilityDenormPreserve:
4393       case SpvCapabilitySignedZeroInfNanPreserve:
4394       case SpvCapabilityRoundingModeRTE:
4395       case SpvCapabilityRoundingModeRTZ:
4396          spv_check_supported(float_controls, cap);
4397          break;
4398 
4399       case SpvCapabilityPhysicalStorageBufferAddresses:
4400          spv_check_supported(physical_storage_buffer_address, cap);
4401          break;
4402 
4403       case SpvCapabilityComputeDerivativeGroupQuadsNV:
4404       case SpvCapabilityComputeDerivativeGroupLinearNV:
4405          spv_check_supported(derivative_group, cap);
4406          break;
4407 
4408       case SpvCapabilityFloat16:
4409          spv_check_supported(float16, cap);
4410          break;
4411 
4412       case SpvCapabilityFragmentShaderSampleInterlockEXT:
4413          spv_check_supported(fragment_shader_sample_interlock, cap);
4414          break;
4415 
4416       case SpvCapabilityFragmentShaderPixelInterlockEXT:
4417          spv_check_supported(fragment_shader_pixel_interlock, cap);
4418          break;
4419 
4420       case SpvCapabilityDemoteToHelperInvocationEXT:
4421          spv_check_supported(demote_to_helper_invocation, cap);
4422          b->uses_demote_to_helper_invocation = true;
4423          break;
4424 
4425       case SpvCapabilityShaderClockKHR:
4426          spv_check_supported(shader_clock, cap);
4427 	 break;
4428 
4429       case SpvCapabilityVulkanMemoryModel:
4430          spv_check_supported(vk_memory_model, cap);
4431          break;
4432 
4433       case SpvCapabilityVulkanMemoryModelDeviceScope:
4434          spv_check_supported(vk_memory_model_device_scope, cap);
4435          break;
4436 
4437       case SpvCapabilityImageReadWriteLodAMD:
4438          spv_check_supported(amd_image_read_write_lod, cap);
4439          break;
4440 
4441       case SpvCapabilityIntegerFunctions2INTEL:
4442          spv_check_supported(integer_functions2, cap);
4443          break;
4444 
4445       case SpvCapabilityFragmentMaskAMD:
4446          spv_check_supported(amd_fragment_mask, cap);
4447          break;
4448 
4449       case SpvCapabilityImageGatherBiasLodAMD:
4450          spv_check_supported(amd_image_gather_bias_lod, cap);
4451          break;
4452 
4453       case SpvCapabilityAtomicFloat32AddEXT:
4454          spv_check_supported(float32_atomic_add, cap);
4455          break;
4456 
4457       case SpvCapabilityAtomicFloat64AddEXT:
4458          spv_check_supported(float64_atomic_add, cap);
4459          break;
4460 
4461       case SpvCapabilitySubgroupShuffleINTEL:
4462          spv_check_supported(intel_subgroup_shuffle, cap);
4463          break;
4464 
4465       case SpvCapabilitySubgroupBufferBlockIOINTEL:
4466          spv_check_supported(intel_subgroup_buffer_block_io, cap);
4467          break;
4468 
4469       case SpvCapabilityRayTracingProvisionalKHR:
4470          spv_check_supported(ray_tracing, cap);
4471          break;
4472 
4473       case SpvCapabilityRayQueryProvisionalKHR:
4474          spv_check_supported(ray_query, cap);
4475          break;
4476 
4477       case SpvCapabilityRayTraversalPrimitiveCullingProvisionalKHR:
4478          spv_check_supported(ray_traversal_primitive_culling, cap);
4479          break;
4480 
4481       case SpvCapabilityInt64ImageEXT:
4482          spv_check_supported(image_atomic_int64, cap);
4483          break;
4484 
4485       default:
4486          vtn_fail("Unhandled capability: %s (%u)",
4487                   spirv_capability_to_string(cap), cap);
4488       }
4489       break;
4490    }
4491 
4492    case SpvOpExtInstImport:
4493       vtn_handle_extension(b, opcode, w, count);
4494       break;
4495 
4496    case SpvOpMemoryModel:
4497       switch (w[1]) {
4498       case SpvAddressingModelPhysical32:
4499          vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
4500                      "AddressingModelPhysical32 only supported for kernels");
4501          b->shader->info.cs.ptr_size = 32;
4502          b->physical_ptrs = true;
4503          assert(nir_address_format_bit_size(b->options->global_addr_format) == 32);
4504          assert(nir_address_format_num_components(b->options->global_addr_format) == 1);
4505          assert(nir_address_format_bit_size(b->options->shared_addr_format) == 32);
4506          assert(nir_address_format_num_components(b->options->shared_addr_format) == 1);
4507          assert(nir_address_format_bit_size(b->options->constant_addr_format) == 32);
4508          assert(nir_address_format_num_components(b->options->constant_addr_format) == 1);
4509          break;
4510       case SpvAddressingModelPhysical64:
4511          vtn_fail_if(b->shader->info.stage != MESA_SHADER_KERNEL,
4512                      "AddressingModelPhysical64 only supported for kernels");
4513          b->shader->info.cs.ptr_size = 64;
4514          b->physical_ptrs = true;
4515          assert(nir_address_format_bit_size(b->options->global_addr_format) == 64);
4516          assert(nir_address_format_num_components(b->options->global_addr_format) == 1);
4517          assert(nir_address_format_bit_size(b->options->shared_addr_format) == 64);
4518          assert(nir_address_format_num_components(b->options->shared_addr_format) == 1);
4519          assert(nir_address_format_bit_size(b->options->constant_addr_format) == 64);
4520          assert(nir_address_format_num_components(b->options->constant_addr_format) == 1);
4521          break;
4522       case SpvAddressingModelLogical:
4523          vtn_fail_if(b->shader->info.stage == MESA_SHADER_KERNEL,
4524                      "AddressingModelLogical only supported for shaders");
4525          b->physical_ptrs = false;
4526          break;
4527       case SpvAddressingModelPhysicalStorageBuffer64:
4528          vtn_fail_if(!b->options ||
4529                      !b->options->caps.physical_storage_buffer_address,
4530                      "AddressingModelPhysicalStorageBuffer64 not supported");
4531          break;
4532       default:
4533          vtn_fail("Unknown addressing model: %s (%u)",
4534                   spirv_addressingmodel_to_string(w[1]), w[1]);
4535          break;
4536       }
4537 
4538       b->mem_model = w[2];
4539       switch (w[2]) {
4540       case SpvMemoryModelSimple:
4541       case SpvMemoryModelGLSL450:
4542       case SpvMemoryModelOpenCL:
4543          break;
4544       case SpvMemoryModelVulkan:
4545          vtn_fail_if(!b->options->caps.vk_memory_model,
4546                      "Vulkan memory model is unsupported by this driver");
4547          break;
4548       default:
4549          vtn_fail("Unsupported memory model: %s",
4550                   spirv_memorymodel_to_string(w[2]));
4551          break;
4552       }
4553       break;
4554 
4555    case SpvOpEntryPoint:
4556       vtn_handle_entry_point(b, w, count);
4557       break;
4558 
4559    case SpvOpString:
4560       vtn_push_value(b, w[1], vtn_value_type_string)->str =
4561          vtn_string_literal(b, &w[2], count - 2, NULL);
4562       break;
4563 
4564    case SpvOpName:
4565       b->values[w[1]].name = vtn_string_literal(b, &w[2], count - 2, NULL);
4566       break;
4567 
4568    case SpvOpMemberName:
4569       /* TODO */
4570       break;
4571 
4572    case SpvOpExecutionMode:
4573    case SpvOpExecutionModeId:
4574    case SpvOpDecorationGroup:
4575    case SpvOpDecorate:
4576    case SpvOpDecorateId:
4577    case SpvOpMemberDecorate:
4578    case SpvOpGroupDecorate:
4579    case SpvOpGroupMemberDecorate:
4580    case SpvOpDecorateString:
4581    case SpvOpMemberDecorateString:
4582       vtn_handle_decoration(b, opcode, w, count);
4583       break;
4584 
4585    case SpvOpExtInst: {
4586       struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
4587       if (val->ext_handler == vtn_handle_non_semantic_instruction) {
4588          /* NonSemantic extended instructions are acceptable in preamble. */
4589          vtn_handle_non_semantic_instruction(b, w[4], w, count);
4590          return true;
4591       } else {
4592          return false; /* End of preamble. */
4593       }
4594    }
4595 
4596    default:
4597       return false; /* End of preamble */
4598    }
4599 
4600    return true;
4601 }
4602 
4603 static void
vtn_handle_execution_mode(struct vtn_builder * b,struct vtn_value * entry_point,const struct vtn_decoration * mode,UNUSED void * data)4604 vtn_handle_execution_mode(struct vtn_builder *b, struct vtn_value *entry_point,
4605                           const struct vtn_decoration *mode, UNUSED void *data)
4606 {
4607    vtn_assert(b->entry_point == entry_point);
4608 
4609    switch(mode->exec_mode) {
4610    case SpvExecutionModeOriginUpperLeft:
4611    case SpvExecutionModeOriginLowerLeft:
4612       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4613       b->shader->info.fs.origin_upper_left =
4614          (mode->exec_mode == SpvExecutionModeOriginUpperLeft);
4615       break;
4616 
4617    case SpvExecutionModeEarlyFragmentTests:
4618       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4619       b->shader->info.fs.early_fragment_tests = true;
4620       break;
4621 
4622    case SpvExecutionModePostDepthCoverage:
4623       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4624       b->shader->info.fs.post_depth_coverage = true;
4625       break;
4626 
4627    case SpvExecutionModeInvocations:
4628       vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4629       b->shader->info.gs.invocations = MAX2(1, mode->operands[0]);
4630       break;
4631 
4632    case SpvExecutionModeDepthReplacing:
4633       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4634       b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_ANY;
4635       break;
4636    case SpvExecutionModeDepthGreater:
4637       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4638       b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_GREATER;
4639       break;
4640    case SpvExecutionModeDepthLess:
4641       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4642       b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_LESS;
4643       break;
4644    case SpvExecutionModeDepthUnchanged:
4645       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4646       b->shader->info.fs.depth_layout = FRAG_DEPTH_LAYOUT_UNCHANGED;
4647       break;
4648 
4649    case SpvExecutionModeLocalSize:
4650       vtn_assert(gl_shader_stage_is_compute(b->shader->info.stage));
4651       b->shader->info.cs.local_size[0] = mode->operands[0];
4652       b->shader->info.cs.local_size[1] = mode->operands[1];
4653       b->shader->info.cs.local_size[2] = mode->operands[2];
4654       break;
4655 
4656    case SpvExecutionModeLocalSizeHint:
4657       break; /* Nothing to do with this */
4658 
4659    case SpvExecutionModeOutputVertices:
4660       if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4661           b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4662          b->shader->info.tess.tcs_vertices_out = mode->operands[0];
4663       } else {
4664          vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4665          b->shader->info.gs.vertices_out = mode->operands[0];
4666       }
4667       break;
4668 
4669    case SpvExecutionModeInputPoints:
4670    case SpvExecutionModeInputLines:
4671    case SpvExecutionModeInputLinesAdjacency:
4672    case SpvExecutionModeTriangles:
4673    case SpvExecutionModeInputTrianglesAdjacency:
4674    case SpvExecutionModeQuads:
4675    case SpvExecutionModeIsolines:
4676       if (b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4677           b->shader->info.stage == MESA_SHADER_TESS_EVAL) {
4678          b->shader->info.tess.primitive_mode =
4679             gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4680       } else {
4681          vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4682          b->shader->info.gs.vertices_in =
4683             vertices_in_from_spv_execution_mode(b, mode->exec_mode);
4684          b->shader->info.gs.input_primitive =
4685             gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4686       }
4687       break;
4688 
4689    case SpvExecutionModeOutputPoints:
4690    case SpvExecutionModeOutputLineStrip:
4691    case SpvExecutionModeOutputTriangleStrip:
4692       vtn_assert(b->shader->info.stage == MESA_SHADER_GEOMETRY);
4693       b->shader->info.gs.output_primitive =
4694          gl_primitive_from_spv_execution_mode(b, mode->exec_mode);
4695       break;
4696 
4697    case SpvExecutionModeSpacingEqual:
4698       vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4699                  b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4700       b->shader->info.tess.spacing = TESS_SPACING_EQUAL;
4701       break;
4702    case SpvExecutionModeSpacingFractionalEven:
4703       vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4704                  b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4705       b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_EVEN;
4706       break;
4707    case SpvExecutionModeSpacingFractionalOdd:
4708       vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4709                  b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4710       b->shader->info.tess.spacing = TESS_SPACING_FRACTIONAL_ODD;
4711       break;
4712    case SpvExecutionModeVertexOrderCw:
4713       vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4714                  b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4715       b->shader->info.tess.ccw = false;
4716       break;
4717    case SpvExecutionModeVertexOrderCcw:
4718       vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4719                  b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4720       b->shader->info.tess.ccw = true;
4721       break;
4722    case SpvExecutionModePointMode:
4723       vtn_assert(b->shader->info.stage == MESA_SHADER_TESS_CTRL ||
4724                  b->shader->info.stage == MESA_SHADER_TESS_EVAL);
4725       b->shader->info.tess.point_mode = true;
4726       break;
4727 
4728    case SpvExecutionModePixelCenterInteger:
4729       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4730       b->shader->info.fs.pixel_center_integer = true;
4731       break;
4732 
4733    case SpvExecutionModeXfb:
4734       b->shader->info.has_transform_feedback_varyings = true;
4735       break;
4736 
4737    case SpvExecutionModeVecTypeHint:
4738       break; /* OpenCL */
4739 
4740    case SpvExecutionModeContractionOff:
4741       if (b->shader->info.stage != MESA_SHADER_KERNEL)
4742          vtn_warn("ExectionMode only allowed for CL-style kernels: %s",
4743                   spirv_executionmode_to_string(mode->exec_mode));
4744       else
4745          b->exact = true;
4746       break;
4747 
4748    case SpvExecutionModeStencilRefReplacingEXT:
4749       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4750       break;
4751 
4752    case SpvExecutionModeDerivativeGroupQuadsNV:
4753       vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
4754       b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_QUADS;
4755       break;
4756 
4757    case SpvExecutionModeDerivativeGroupLinearNV:
4758       vtn_assert(b->shader->info.stage == MESA_SHADER_COMPUTE);
4759       b->shader->info.cs.derivative_group = DERIVATIVE_GROUP_LINEAR;
4760       break;
4761 
4762    case SpvExecutionModePixelInterlockOrderedEXT:
4763       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4764       b->shader->info.fs.pixel_interlock_ordered = true;
4765       break;
4766 
4767    case SpvExecutionModePixelInterlockUnorderedEXT:
4768       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4769       b->shader->info.fs.pixel_interlock_unordered = true;
4770       break;
4771 
4772    case SpvExecutionModeSampleInterlockOrderedEXT:
4773       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4774       b->shader->info.fs.sample_interlock_ordered = true;
4775       break;
4776 
4777    case SpvExecutionModeSampleInterlockUnorderedEXT:
4778       vtn_assert(b->shader->info.stage == MESA_SHADER_FRAGMENT);
4779       b->shader->info.fs.sample_interlock_unordered = true;
4780       break;
4781 
4782    case SpvExecutionModeDenormPreserve:
4783    case SpvExecutionModeDenormFlushToZero:
4784    case SpvExecutionModeSignedZeroInfNanPreserve:
4785    case SpvExecutionModeRoundingModeRTE:
4786    case SpvExecutionModeRoundingModeRTZ: {
4787       unsigned execution_mode = 0;
4788       switch (mode->exec_mode) {
4789       case SpvExecutionModeDenormPreserve:
4790          switch (mode->operands[0]) {
4791          case 16: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP16; break;
4792          case 32: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP32; break;
4793          case 64: execution_mode = FLOAT_CONTROLS_DENORM_PRESERVE_FP64; break;
4794          default: vtn_fail("Floating point type not supported");
4795          }
4796          break;
4797       case SpvExecutionModeDenormFlushToZero:
4798          switch (mode->operands[0]) {
4799          case 16: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP16; break;
4800          case 32: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP32; break;
4801          case 64: execution_mode = FLOAT_CONTROLS_DENORM_FLUSH_TO_ZERO_FP64; break;
4802          default: vtn_fail("Floating point type not supported");
4803          }
4804          break;
4805       case SpvExecutionModeSignedZeroInfNanPreserve:
4806          switch (mode->operands[0]) {
4807          case 16: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP16; break;
4808          case 32: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP32; break;
4809          case 64: execution_mode = FLOAT_CONTROLS_SIGNED_ZERO_INF_NAN_PRESERVE_FP64; break;
4810          default: vtn_fail("Floating point type not supported");
4811          }
4812          break;
4813       case SpvExecutionModeRoundingModeRTE:
4814          switch (mode->operands[0]) {
4815          case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP16; break;
4816          case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP32; break;
4817          case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTE_FP64; break;
4818          default: vtn_fail("Floating point type not supported");
4819          }
4820          break;
4821       case SpvExecutionModeRoundingModeRTZ:
4822          switch (mode->operands[0]) {
4823          case 16: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP16; break;
4824          case 32: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP32; break;
4825          case 64: execution_mode = FLOAT_CONTROLS_ROUNDING_MODE_RTZ_FP64; break;
4826          default: vtn_fail("Floating point type not supported");
4827          }
4828          break;
4829       default:
4830          break;
4831       }
4832 
4833       b->shader->info.float_controls_execution_mode |= execution_mode;
4834       break;
4835    }
4836 
4837    case SpvExecutionModeLocalSizeId:
4838    case SpvExecutionModeLocalSizeHintId:
4839       /* Handled later by vtn_handle_execution_mode_id(). */
4840       break;
4841 
4842    default:
4843       vtn_fail("Unhandled execution mode: %s (%u)",
4844                spirv_executionmode_to_string(mode->exec_mode),
4845                mode->exec_mode);
4846    }
4847 }
4848 
4849 static void
vtn_handle_execution_mode_id(struct vtn_builder * b,struct vtn_value * entry_point,const struct vtn_decoration * mode,UNUSED void * data)4850 vtn_handle_execution_mode_id(struct vtn_builder *b, struct vtn_value *entry_point,
4851                              const struct vtn_decoration *mode, UNUSED void *data)
4852 {
4853 
4854    vtn_assert(b->entry_point == entry_point);
4855 
4856    switch (mode->exec_mode) {
4857    case SpvExecutionModeLocalSizeId:
4858       b->shader->info.cs.local_size[0] = vtn_constant_uint(b, mode->operands[0]);
4859       b->shader->info.cs.local_size[1] = vtn_constant_uint(b, mode->operands[1]);
4860       b->shader->info.cs.local_size[2] = vtn_constant_uint(b, mode->operands[2]);
4861       break;
4862 
4863    case SpvExecutionModeLocalSizeHintId:
4864       /* Nothing to do with this hint. */
4865       break;
4866 
4867    default:
4868       /* Nothing to do.  Literal execution modes already handled by
4869        * vtn_handle_execution_mode(). */
4870       break;
4871    }
4872 }
4873 
4874 static bool
vtn_handle_variable_or_type_instruction(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)4875 vtn_handle_variable_or_type_instruction(struct vtn_builder *b, SpvOp opcode,
4876                                         const uint32_t *w, unsigned count)
4877 {
4878    vtn_set_instruction_result_type(b, opcode, w, count);
4879 
4880    switch (opcode) {
4881    case SpvOpSource:
4882    case SpvOpSourceContinued:
4883    case SpvOpSourceExtension:
4884    case SpvOpExtension:
4885    case SpvOpCapability:
4886    case SpvOpExtInstImport:
4887    case SpvOpMemoryModel:
4888    case SpvOpEntryPoint:
4889    case SpvOpExecutionMode:
4890    case SpvOpString:
4891    case SpvOpName:
4892    case SpvOpMemberName:
4893    case SpvOpDecorationGroup:
4894    case SpvOpDecorate:
4895    case SpvOpDecorateId:
4896    case SpvOpMemberDecorate:
4897    case SpvOpGroupDecorate:
4898    case SpvOpGroupMemberDecorate:
4899    case SpvOpDecorateString:
4900    case SpvOpMemberDecorateString:
4901       vtn_fail("Invalid opcode types and variables section");
4902       break;
4903 
4904    case SpvOpTypeVoid:
4905    case SpvOpTypeBool:
4906    case SpvOpTypeInt:
4907    case SpvOpTypeFloat:
4908    case SpvOpTypeVector:
4909    case SpvOpTypeMatrix:
4910    case SpvOpTypeImage:
4911    case SpvOpTypeSampler:
4912    case SpvOpTypeSampledImage:
4913    case SpvOpTypeArray:
4914    case SpvOpTypeRuntimeArray:
4915    case SpvOpTypeStruct:
4916    case SpvOpTypeOpaque:
4917    case SpvOpTypePointer:
4918    case SpvOpTypeForwardPointer:
4919    case SpvOpTypeFunction:
4920    case SpvOpTypeEvent:
4921    case SpvOpTypeDeviceEvent:
4922    case SpvOpTypeReserveId:
4923    case SpvOpTypeQueue:
4924    case SpvOpTypePipe:
4925    case SpvOpTypeAccelerationStructureKHR:
4926       vtn_handle_type(b, opcode, w, count);
4927       break;
4928 
4929    case SpvOpConstantTrue:
4930    case SpvOpConstantFalse:
4931    case SpvOpConstant:
4932    case SpvOpConstantComposite:
4933    case SpvOpConstantNull:
4934    case SpvOpSpecConstantTrue:
4935    case SpvOpSpecConstantFalse:
4936    case SpvOpSpecConstant:
4937    case SpvOpSpecConstantComposite:
4938    case SpvOpSpecConstantOp:
4939       vtn_handle_constant(b, opcode, w, count);
4940       break;
4941 
4942    case SpvOpUndef:
4943    case SpvOpVariable:
4944    case SpvOpConstantSampler:
4945       vtn_handle_variables(b, opcode, w, count);
4946       break;
4947 
4948    case SpvOpExtInst: {
4949       struct vtn_value *val = vtn_value(b, w[3], vtn_value_type_extension);
4950       /* NonSemantic extended instructions are acceptable in preamble, others
4951        * will indicate the end of preamble.
4952        */
4953       return val->ext_handler == vtn_handle_non_semantic_instruction;
4954    }
4955 
4956    default:
4957       return false; /* End of preamble */
4958    }
4959 
4960    return true;
4961 }
4962 
4963 static struct vtn_ssa_value *
vtn_nir_select(struct vtn_builder * b,struct vtn_ssa_value * src0,struct vtn_ssa_value * src1,struct vtn_ssa_value * src2)4964 vtn_nir_select(struct vtn_builder *b, struct vtn_ssa_value *src0,
4965                struct vtn_ssa_value *src1, struct vtn_ssa_value *src2)
4966 {
4967    struct vtn_ssa_value *dest = rzalloc(b, struct vtn_ssa_value);
4968    dest->type = src1->type;
4969 
4970    if (glsl_type_is_vector_or_scalar(src1->type)) {
4971       dest->def = nir_bcsel(&b->nb, src0->def, src1->def, src2->def);
4972    } else {
4973       unsigned elems = glsl_get_length(src1->type);
4974 
4975       dest->elems = ralloc_array(b, struct vtn_ssa_value *, elems);
4976       for (unsigned i = 0; i < elems; i++) {
4977          dest->elems[i] = vtn_nir_select(b, src0,
4978                                          src1->elems[i], src2->elems[i]);
4979       }
4980    }
4981 
4982    return dest;
4983 }
4984 
4985 static void
vtn_handle_select(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)4986 vtn_handle_select(struct vtn_builder *b, SpvOp opcode,
4987                   const uint32_t *w, unsigned count)
4988 {
4989    /* Handle OpSelect up-front here because it needs to be able to handle
4990     * pointers and not just regular vectors and scalars.
4991     */
4992    struct vtn_value *res_val = vtn_untyped_value(b, w[2]);
4993    struct vtn_value *cond_val = vtn_untyped_value(b, w[3]);
4994    struct vtn_value *obj1_val = vtn_untyped_value(b, w[4]);
4995    struct vtn_value *obj2_val = vtn_untyped_value(b, w[5]);
4996 
4997    vtn_fail_if(obj1_val->type != res_val->type ||
4998                obj2_val->type != res_val->type,
4999                "Object types must match the result type in OpSelect");
5000 
5001    vtn_fail_if((cond_val->type->base_type != vtn_base_type_scalar &&
5002                 cond_val->type->base_type != vtn_base_type_vector) ||
5003                !glsl_type_is_boolean(cond_val->type->type),
5004                "OpSelect must have either a vector of booleans or "
5005                "a boolean as Condition type");
5006 
5007    vtn_fail_if(cond_val->type->base_type == vtn_base_type_vector &&
5008                (res_val->type->base_type != vtn_base_type_vector ||
5009                 res_val->type->length != cond_val->type->length),
5010                "When Condition type in OpSelect is a vector, the Result "
5011                "type must be a vector of the same length");
5012 
5013    switch (res_val->type->base_type) {
5014    case vtn_base_type_scalar:
5015    case vtn_base_type_vector:
5016    case vtn_base_type_matrix:
5017    case vtn_base_type_array:
5018    case vtn_base_type_struct:
5019       /* OK. */
5020       break;
5021    case vtn_base_type_pointer:
5022       /* We need to have actual storage for pointer types. */
5023       vtn_fail_if(res_val->type->type == NULL,
5024                   "Invalid pointer result type for OpSelect");
5025       break;
5026    default:
5027       vtn_fail("Result type of OpSelect must be a scalar, composite, or pointer");
5028    }
5029 
5030    vtn_push_ssa_value(b, w[2],
5031       vtn_nir_select(b, vtn_ssa_value(b, w[3]),
5032                         vtn_ssa_value(b, w[4]),
5033                         vtn_ssa_value(b, w[5])));
5034 }
5035 
5036 static void
vtn_handle_ptr(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)5037 vtn_handle_ptr(struct vtn_builder *b, SpvOp opcode,
5038                const uint32_t *w, unsigned count)
5039 {
5040    struct vtn_type *type1 = vtn_get_value_type(b, w[3]);
5041    struct vtn_type *type2 = vtn_get_value_type(b, w[4]);
5042    vtn_fail_if(type1->base_type != vtn_base_type_pointer ||
5043                type2->base_type != vtn_base_type_pointer,
5044                "%s operands must have pointer types",
5045                spirv_op_to_string(opcode));
5046    vtn_fail_if(type1->storage_class != type2->storage_class,
5047                "%s operands must have the same storage class",
5048                spirv_op_to_string(opcode));
5049 
5050    struct vtn_type *vtn_type = vtn_get_type(b, w[1]);
5051    const struct glsl_type *type = vtn_type->type;
5052 
5053    nir_address_format addr_format = vtn_mode_to_address_format(
5054       b, vtn_storage_class_to_mode(b, type1->storage_class, NULL, NULL));
5055 
5056    nir_ssa_def *def;
5057 
5058    switch (opcode) {
5059    case SpvOpPtrDiff: {
5060       /* OpPtrDiff returns the difference in number of elements (not byte offset). */
5061       unsigned elem_size, elem_align;
5062       glsl_get_natural_size_align_bytes(type1->deref->type,
5063                                         &elem_size, &elem_align);
5064 
5065       def = nir_build_addr_isub(&b->nb,
5066                                 vtn_get_nir_ssa(b, w[3]),
5067                                 vtn_get_nir_ssa(b, w[4]),
5068                                 addr_format);
5069       def = nir_idiv(&b->nb, def, nir_imm_intN_t(&b->nb, elem_size, def->bit_size));
5070       def = nir_i2i(&b->nb, def, glsl_get_bit_size(type));
5071       break;
5072    }
5073 
5074    case SpvOpPtrEqual:
5075    case SpvOpPtrNotEqual: {
5076       def = nir_build_addr_ieq(&b->nb,
5077                                vtn_get_nir_ssa(b, w[3]),
5078                                vtn_get_nir_ssa(b, w[4]),
5079                                addr_format);
5080       if (opcode == SpvOpPtrNotEqual)
5081          def = nir_inot(&b->nb, def);
5082       break;
5083    }
5084 
5085    default:
5086       unreachable("Invalid ptr operation");
5087    }
5088 
5089    vtn_push_nir_ssa(b, w[2], def);
5090 }
5091 
5092 static void
vtn_handle_ray_intrinsic(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)5093 vtn_handle_ray_intrinsic(struct vtn_builder *b, SpvOp opcode,
5094                          const uint32_t *w, unsigned count)
5095 {
5096    nir_intrinsic_instr *intrin;
5097 
5098    switch (opcode) {
5099    case SpvOpTraceRayKHR: {
5100       intrin = nir_intrinsic_instr_create(b->nb.shader,
5101                                           nir_intrinsic_trace_ray);
5102 
5103       /* The sources are in the same order in the NIR intrinsic */
5104       for (unsigned i = 0; i < 10; i++)
5105          intrin->src[i] = nir_src_for_ssa(vtn_ssa_value(b, w[i + 1])->def);
5106 
5107       nir_deref_instr *payload = vtn_get_call_payload_for_location(b, w[11]);
5108       intrin->src[10] = nir_src_for_ssa(&payload->dest.ssa);
5109       nir_builder_instr_insert(&b->nb, &intrin->instr);
5110       break;
5111    }
5112 
5113    case SpvOpReportIntersectionKHR: {
5114       intrin = nir_intrinsic_instr_create(b->nb.shader,
5115                                           nir_intrinsic_report_ray_intersection);
5116       intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[3])->def);
5117       intrin->src[1] = nir_src_for_ssa(vtn_ssa_value(b, w[4])->def);
5118       nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
5119       nir_builder_instr_insert(&b->nb, &intrin->instr);
5120       vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
5121       break;
5122    }
5123 
5124    case SpvOpIgnoreIntersectionKHR:
5125       intrin = nir_intrinsic_instr_create(b->nb.shader,
5126                                           nir_intrinsic_ignore_ray_intersection);
5127       nir_builder_instr_insert(&b->nb, &intrin->instr);
5128       break;
5129 
5130    case SpvOpTerminateRayKHR:
5131       intrin = nir_intrinsic_instr_create(b->nb.shader,
5132                                           nir_intrinsic_terminate_ray);
5133       nir_builder_instr_insert(&b->nb, &intrin->instr);
5134       break;
5135 
5136    case SpvOpExecuteCallableKHR: {
5137       intrin = nir_intrinsic_instr_create(b->nb.shader,
5138                                           nir_intrinsic_execute_callable);
5139       intrin->src[0] = nir_src_for_ssa(vtn_ssa_value(b, w[1])->def);
5140       nir_deref_instr *payload = vtn_get_call_payload_for_location(b, w[2]);
5141       intrin->src[1] = nir_src_for_ssa(&payload->dest.ssa);
5142       nir_builder_instr_insert(&b->nb, &intrin->instr);
5143       break;
5144    }
5145 
5146    default:
5147       vtn_fail_with_opcode("Unhandled opcode", opcode);
5148    }
5149 }
5150 
5151 static bool
vtn_handle_body_instruction(struct vtn_builder * b,SpvOp opcode,const uint32_t * w,unsigned count)5152 vtn_handle_body_instruction(struct vtn_builder *b, SpvOp opcode,
5153                             const uint32_t *w, unsigned count)
5154 {
5155    switch (opcode) {
5156    case SpvOpLabel:
5157       break;
5158 
5159    case SpvOpLoopMerge:
5160    case SpvOpSelectionMerge:
5161       /* This is handled by cfg pre-pass and walk_blocks */
5162       break;
5163 
5164    case SpvOpUndef: {
5165       struct vtn_value *val = vtn_push_value(b, w[2], vtn_value_type_undef);
5166       val->type = vtn_get_type(b, w[1]);
5167       break;
5168    }
5169 
5170    case SpvOpExtInst:
5171       vtn_handle_extension(b, opcode, w, count);
5172       break;
5173 
5174    case SpvOpVariable:
5175    case SpvOpLoad:
5176    case SpvOpStore:
5177    case SpvOpCopyMemory:
5178    case SpvOpCopyMemorySized:
5179    case SpvOpAccessChain:
5180    case SpvOpPtrAccessChain:
5181    case SpvOpInBoundsAccessChain:
5182    case SpvOpInBoundsPtrAccessChain:
5183    case SpvOpArrayLength:
5184    case SpvOpConvertPtrToU:
5185    case SpvOpConvertUToPtr:
5186    case SpvOpGenericCastToPtrExplicit:
5187    case SpvOpGenericPtrMemSemantics:
5188    case SpvOpSubgroupBlockReadINTEL:
5189    case SpvOpSubgroupBlockWriteINTEL:
5190       vtn_handle_variables(b, opcode, w, count);
5191       break;
5192 
5193    case SpvOpFunctionCall:
5194       vtn_handle_function_call(b, opcode, w, count);
5195       break;
5196 
5197    case SpvOpSampledImage:
5198    case SpvOpImage:
5199    case SpvOpImageSampleImplicitLod:
5200    case SpvOpImageSampleExplicitLod:
5201    case SpvOpImageSampleDrefImplicitLod:
5202    case SpvOpImageSampleDrefExplicitLod:
5203    case SpvOpImageSampleProjImplicitLod:
5204    case SpvOpImageSampleProjExplicitLod:
5205    case SpvOpImageSampleProjDrefImplicitLod:
5206    case SpvOpImageSampleProjDrefExplicitLod:
5207    case SpvOpImageFetch:
5208    case SpvOpImageGather:
5209    case SpvOpImageDrefGather:
5210    case SpvOpImageQueryLod:
5211    case SpvOpImageQueryLevels:
5212    case SpvOpImageQuerySamples:
5213       vtn_handle_texture(b, opcode, w, count);
5214       break;
5215 
5216    case SpvOpImageRead:
5217    case SpvOpImageWrite:
5218    case SpvOpImageTexelPointer:
5219    case SpvOpImageQueryFormat:
5220    case SpvOpImageQueryOrder:
5221       vtn_handle_image(b, opcode, w, count);
5222       break;
5223 
5224    case SpvOpImageQuerySizeLod:
5225    case SpvOpImageQuerySize: {
5226       struct vtn_type *image_type = vtn_get_value_type(b, w[3]);
5227       vtn_assert(image_type->base_type == vtn_base_type_image);
5228       if (glsl_type_is_image(image_type->glsl_image)) {
5229          vtn_handle_image(b, opcode, w, count);
5230       } else {
5231          vtn_assert(glsl_type_is_sampler(image_type->glsl_image));
5232          vtn_handle_texture(b, opcode, w, count);
5233       }
5234       break;
5235    }
5236 
5237    case SpvOpFragmentMaskFetchAMD:
5238    case SpvOpFragmentFetchAMD:
5239       vtn_handle_texture(b, opcode, w, count);
5240       break;
5241 
5242    case SpvOpAtomicLoad:
5243    case SpvOpAtomicExchange:
5244    case SpvOpAtomicCompareExchange:
5245    case SpvOpAtomicCompareExchangeWeak:
5246    case SpvOpAtomicIIncrement:
5247    case SpvOpAtomicIDecrement:
5248    case SpvOpAtomicIAdd:
5249    case SpvOpAtomicISub:
5250    case SpvOpAtomicSMin:
5251    case SpvOpAtomicUMin:
5252    case SpvOpAtomicSMax:
5253    case SpvOpAtomicUMax:
5254    case SpvOpAtomicAnd:
5255    case SpvOpAtomicOr:
5256    case SpvOpAtomicXor:
5257    case SpvOpAtomicFAddEXT: {
5258       struct vtn_value *pointer = vtn_untyped_value(b, w[3]);
5259       if (pointer->value_type == vtn_value_type_image_pointer) {
5260          vtn_handle_image(b, opcode, w, count);
5261       } else {
5262          vtn_assert(pointer->value_type == vtn_value_type_pointer);
5263          vtn_handle_atomics(b, opcode, w, count);
5264       }
5265       break;
5266    }
5267 
5268    case SpvOpAtomicStore: {
5269       struct vtn_value *pointer = vtn_untyped_value(b, w[1]);
5270       if (pointer->value_type == vtn_value_type_image_pointer) {
5271          vtn_handle_image(b, opcode, w, count);
5272       } else {
5273          vtn_assert(pointer->value_type == vtn_value_type_pointer);
5274          vtn_handle_atomics(b, opcode, w, count);
5275       }
5276       break;
5277    }
5278 
5279    case SpvOpSelect:
5280       vtn_handle_select(b, opcode, w, count);
5281       break;
5282 
5283    case SpvOpSNegate:
5284    case SpvOpFNegate:
5285    case SpvOpNot:
5286    case SpvOpAny:
5287    case SpvOpAll:
5288    case SpvOpConvertFToU:
5289    case SpvOpConvertFToS:
5290    case SpvOpConvertSToF:
5291    case SpvOpConvertUToF:
5292    case SpvOpUConvert:
5293    case SpvOpSConvert:
5294    case SpvOpFConvert:
5295    case SpvOpQuantizeToF16:
5296    case SpvOpSatConvertSToU:
5297    case SpvOpSatConvertUToS:
5298    case SpvOpPtrCastToGeneric:
5299    case SpvOpGenericCastToPtr:
5300    case SpvOpIsNan:
5301    case SpvOpIsInf:
5302    case SpvOpIsFinite:
5303    case SpvOpIsNormal:
5304    case SpvOpSignBitSet:
5305    case SpvOpLessOrGreater:
5306    case SpvOpOrdered:
5307    case SpvOpUnordered:
5308    case SpvOpIAdd:
5309    case SpvOpFAdd:
5310    case SpvOpISub:
5311    case SpvOpFSub:
5312    case SpvOpIMul:
5313    case SpvOpFMul:
5314    case SpvOpUDiv:
5315    case SpvOpSDiv:
5316    case SpvOpFDiv:
5317    case SpvOpUMod:
5318    case SpvOpSRem:
5319    case SpvOpSMod:
5320    case SpvOpFRem:
5321    case SpvOpFMod:
5322    case SpvOpVectorTimesScalar:
5323    case SpvOpDot:
5324    case SpvOpIAddCarry:
5325    case SpvOpISubBorrow:
5326    case SpvOpUMulExtended:
5327    case SpvOpSMulExtended:
5328    case SpvOpShiftRightLogical:
5329    case SpvOpShiftRightArithmetic:
5330    case SpvOpShiftLeftLogical:
5331    case SpvOpLogicalEqual:
5332    case SpvOpLogicalNotEqual:
5333    case SpvOpLogicalOr:
5334    case SpvOpLogicalAnd:
5335    case SpvOpLogicalNot:
5336    case SpvOpBitwiseOr:
5337    case SpvOpBitwiseXor:
5338    case SpvOpBitwiseAnd:
5339    case SpvOpIEqual:
5340    case SpvOpFOrdEqual:
5341    case SpvOpFUnordEqual:
5342    case SpvOpINotEqual:
5343    case SpvOpFOrdNotEqual:
5344    case SpvOpFUnordNotEqual:
5345    case SpvOpULessThan:
5346    case SpvOpSLessThan:
5347    case SpvOpFOrdLessThan:
5348    case SpvOpFUnordLessThan:
5349    case SpvOpUGreaterThan:
5350    case SpvOpSGreaterThan:
5351    case SpvOpFOrdGreaterThan:
5352    case SpvOpFUnordGreaterThan:
5353    case SpvOpULessThanEqual:
5354    case SpvOpSLessThanEqual:
5355    case SpvOpFOrdLessThanEqual:
5356    case SpvOpFUnordLessThanEqual:
5357    case SpvOpUGreaterThanEqual:
5358    case SpvOpSGreaterThanEqual:
5359    case SpvOpFOrdGreaterThanEqual:
5360    case SpvOpFUnordGreaterThanEqual:
5361    case SpvOpDPdx:
5362    case SpvOpDPdy:
5363    case SpvOpFwidth:
5364    case SpvOpDPdxFine:
5365    case SpvOpDPdyFine:
5366    case SpvOpFwidthFine:
5367    case SpvOpDPdxCoarse:
5368    case SpvOpDPdyCoarse:
5369    case SpvOpFwidthCoarse:
5370    case SpvOpBitFieldInsert:
5371    case SpvOpBitFieldSExtract:
5372    case SpvOpBitFieldUExtract:
5373    case SpvOpBitReverse:
5374    case SpvOpBitCount:
5375    case SpvOpTranspose:
5376    case SpvOpOuterProduct:
5377    case SpvOpMatrixTimesScalar:
5378    case SpvOpVectorTimesMatrix:
5379    case SpvOpMatrixTimesVector:
5380    case SpvOpMatrixTimesMatrix:
5381    case SpvOpUCountLeadingZerosINTEL:
5382    case SpvOpUCountTrailingZerosINTEL:
5383    case SpvOpAbsISubINTEL:
5384    case SpvOpAbsUSubINTEL:
5385    case SpvOpIAddSatINTEL:
5386    case SpvOpUAddSatINTEL:
5387    case SpvOpIAverageINTEL:
5388    case SpvOpUAverageINTEL:
5389    case SpvOpIAverageRoundedINTEL:
5390    case SpvOpUAverageRoundedINTEL:
5391    case SpvOpISubSatINTEL:
5392    case SpvOpUSubSatINTEL:
5393    case SpvOpIMul32x16INTEL:
5394    case SpvOpUMul32x16INTEL:
5395       vtn_handle_alu(b, opcode, w, count);
5396       break;
5397 
5398    case SpvOpBitcast:
5399       vtn_handle_bitcast(b, w, count);
5400       break;
5401 
5402    case SpvOpVectorExtractDynamic:
5403    case SpvOpVectorInsertDynamic:
5404    case SpvOpVectorShuffle:
5405    case SpvOpCompositeConstruct:
5406    case SpvOpCompositeExtract:
5407    case SpvOpCompositeInsert:
5408    case SpvOpCopyLogical:
5409    case SpvOpCopyObject:
5410       vtn_handle_composite(b, opcode, w, count);
5411       break;
5412 
5413    case SpvOpEmitVertex:
5414    case SpvOpEndPrimitive:
5415    case SpvOpEmitStreamVertex:
5416    case SpvOpEndStreamPrimitive:
5417    case SpvOpControlBarrier:
5418    case SpvOpMemoryBarrier:
5419       vtn_handle_barrier(b, opcode, w, count);
5420       break;
5421 
5422    case SpvOpGroupNonUniformElect:
5423    case SpvOpGroupNonUniformAll:
5424    case SpvOpGroupNonUniformAny:
5425    case SpvOpGroupNonUniformAllEqual:
5426    case SpvOpGroupNonUniformBroadcast:
5427    case SpvOpGroupNonUniformBroadcastFirst:
5428    case SpvOpGroupNonUniformBallot:
5429    case SpvOpGroupNonUniformInverseBallot:
5430    case SpvOpGroupNonUniformBallotBitExtract:
5431    case SpvOpGroupNonUniformBallotBitCount:
5432    case SpvOpGroupNonUniformBallotFindLSB:
5433    case SpvOpGroupNonUniformBallotFindMSB:
5434    case SpvOpGroupNonUniformShuffle:
5435    case SpvOpGroupNonUniformShuffleXor:
5436    case SpvOpGroupNonUniformShuffleUp:
5437    case SpvOpGroupNonUniformShuffleDown:
5438    case SpvOpGroupNonUniformIAdd:
5439    case SpvOpGroupNonUniformFAdd:
5440    case SpvOpGroupNonUniformIMul:
5441    case SpvOpGroupNonUniformFMul:
5442    case SpvOpGroupNonUniformSMin:
5443    case SpvOpGroupNonUniformUMin:
5444    case SpvOpGroupNonUniformFMin:
5445    case SpvOpGroupNonUniformSMax:
5446    case SpvOpGroupNonUniformUMax:
5447    case SpvOpGroupNonUniformFMax:
5448    case SpvOpGroupNonUniformBitwiseAnd:
5449    case SpvOpGroupNonUniformBitwiseOr:
5450    case SpvOpGroupNonUniformBitwiseXor:
5451    case SpvOpGroupNonUniformLogicalAnd:
5452    case SpvOpGroupNonUniformLogicalOr:
5453    case SpvOpGroupNonUniformLogicalXor:
5454    case SpvOpGroupNonUniformQuadBroadcast:
5455    case SpvOpGroupNonUniformQuadSwap:
5456    case SpvOpGroupAll:
5457    case SpvOpGroupAny:
5458    case SpvOpGroupBroadcast:
5459    case SpvOpGroupIAdd:
5460    case SpvOpGroupFAdd:
5461    case SpvOpGroupFMin:
5462    case SpvOpGroupUMin:
5463    case SpvOpGroupSMin:
5464    case SpvOpGroupFMax:
5465    case SpvOpGroupUMax:
5466    case SpvOpGroupSMax:
5467    case SpvOpSubgroupBallotKHR:
5468    case SpvOpSubgroupFirstInvocationKHR:
5469    case SpvOpSubgroupReadInvocationKHR:
5470    case SpvOpSubgroupAllKHR:
5471    case SpvOpSubgroupAnyKHR:
5472    case SpvOpSubgroupAllEqualKHR:
5473    case SpvOpGroupIAddNonUniformAMD:
5474    case SpvOpGroupFAddNonUniformAMD:
5475    case SpvOpGroupFMinNonUniformAMD:
5476    case SpvOpGroupUMinNonUniformAMD:
5477    case SpvOpGroupSMinNonUniformAMD:
5478    case SpvOpGroupFMaxNonUniformAMD:
5479    case SpvOpGroupUMaxNonUniformAMD:
5480    case SpvOpGroupSMaxNonUniformAMD:
5481    case SpvOpSubgroupShuffleINTEL:
5482    case SpvOpSubgroupShuffleDownINTEL:
5483    case SpvOpSubgroupShuffleUpINTEL:
5484    case SpvOpSubgroupShuffleXorINTEL:
5485       vtn_handle_subgroup(b, opcode, w, count);
5486       break;
5487 
5488    case SpvOpPtrDiff:
5489    case SpvOpPtrEqual:
5490    case SpvOpPtrNotEqual:
5491       vtn_handle_ptr(b, opcode, w, count);
5492       break;
5493 
5494    case SpvOpBeginInvocationInterlockEXT:
5495       vtn_emit_barrier(b, nir_intrinsic_begin_invocation_interlock);
5496       break;
5497 
5498    case SpvOpEndInvocationInterlockEXT:
5499       vtn_emit_barrier(b, nir_intrinsic_end_invocation_interlock);
5500       break;
5501 
5502    case SpvOpDemoteToHelperInvocationEXT: {
5503       nir_intrinsic_instr *intrin =
5504          nir_intrinsic_instr_create(b->shader, nir_intrinsic_demote);
5505       nir_builder_instr_insert(&b->nb, &intrin->instr);
5506       break;
5507    }
5508 
5509    case SpvOpIsHelperInvocationEXT: {
5510       nir_intrinsic_instr *intrin =
5511          nir_intrinsic_instr_create(b->shader, nir_intrinsic_is_helper_invocation);
5512       nir_ssa_dest_init(&intrin->instr, &intrin->dest, 1, 1, NULL);
5513       nir_builder_instr_insert(&b->nb, &intrin->instr);
5514 
5515       vtn_push_nir_ssa(b, w[2], &intrin->dest.ssa);
5516       break;
5517    }
5518 
5519    case SpvOpReadClockKHR: {
5520       SpvScope scope = vtn_constant_uint(b, w[3]);
5521       nir_scope nir_scope;
5522 
5523       switch (scope) {
5524       case SpvScopeDevice:
5525          nir_scope = NIR_SCOPE_DEVICE;
5526          break;
5527       case SpvScopeSubgroup:
5528          nir_scope = NIR_SCOPE_SUBGROUP;
5529          break;
5530       default:
5531          vtn_fail("invalid read clock scope");
5532       }
5533 
5534       /* Operation supports two result types: uvec2 and uint64_t.  The NIR
5535        * intrinsic gives uvec2, so pack the result for the other case.
5536        */
5537       nir_intrinsic_instr *intrin =
5538          nir_intrinsic_instr_create(b->nb.shader, nir_intrinsic_shader_clock);
5539       nir_ssa_dest_init(&intrin->instr, &intrin->dest, 2, 32, NULL);
5540       nir_intrinsic_set_memory_scope(intrin, nir_scope);
5541       nir_builder_instr_insert(&b->nb, &intrin->instr);
5542 
5543       struct vtn_type *type = vtn_get_type(b, w[1]);
5544       const struct glsl_type *dest_type = type->type;
5545       nir_ssa_def *result;
5546 
5547       if (glsl_type_is_vector(dest_type)) {
5548          assert(dest_type == glsl_vector_type(GLSL_TYPE_UINT, 2));
5549          result = &intrin->dest.ssa;
5550       } else {
5551          assert(glsl_type_is_scalar(dest_type));
5552          assert(glsl_get_base_type(dest_type) == GLSL_TYPE_UINT64);
5553          result = nir_pack_64_2x32(&b->nb, &intrin->dest.ssa);
5554       }
5555 
5556       vtn_push_nir_ssa(b, w[2], result);
5557       break;
5558    }
5559 
5560    case SpvOpTraceRayKHR:
5561    case SpvOpReportIntersectionKHR:
5562    case SpvOpIgnoreIntersectionKHR:
5563    case SpvOpTerminateRayKHR:
5564    case SpvOpExecuteCallableKHR:
5565       vtn_handle_ray_intrinsic(b, opcode, w, count);
5566       break;
5567 
5568    case SpvOpLifetimeStart:
5569    case SpvOpLifetimeStop:
5570       break;
5571 
5572    case SpvOpGroupAsyncCopy:
5573    case SpvOpGroupWaitEvents:
5574       vtn_handle_opencl_core_instruction(b, opcode, w, count);
5575       break;
5576 
5577    default:
5578       vtn_fail_with_opcode("Unhandled opcode", opcode);
5579    }
5580 
5581    return true;
5582 }
5583 
5584 struct vtn_builder*
vtn_create_builder(const uint32_t * words,size_t word_count,gl_shader_stage stage,const char * entry_point_name,const struct spirv_to_nir_options * options)5585 vtn_create_builder(const uint32_t *words, size_t word_count,
5586                    gl_shader_stage stage, const char *entry_point_name,
5587                    const struct spirv_to_nir_options *options)
5588 {
5589    /* Initialize the vtn_builder object */
5590    struct vtn_builder *b = rzalloc(NULL, struct vtn_builder);
5591    struct spirv_to_nir_options *dup_options =
5592       ralloc(b, struct spirv_to_nir_options);
5593    *dup_options = *options;
5594 
5595    b->spirv = words;
5596    b->spirv_word_count = word_count;
5597    b->file = NULL;
5598    b->line = -1;
5599    b->col = -1;
5600    list_inithead(&b->functions);
5601    b->entry_point_stage = stage;
5602    b->entry_point_name = entry_point_name;
5603    b->options = dup_options;
5604 
5605    /*
5606     * Handle the SPIR-V header (first 5 dwords).
5607     * Can't use vtx_assert() as the setjmp(3) target isn't initialized yet.
5608     */
5609    if (word_count <= 5)
5610       goto fail;
5611 
5612    if (words[0] != SpvMagicNumber) {
5613       vtn_err("words[0] was 0x%x, want 0x%x", words[0], SpvMagicNumber);
5614       goto fail;
5615    }
5616    if (words[1] < 0x10000) {
5617       vtn_err("words[1] was 0x%x, want >= 0x10000", words[1]);
5618       goto fail;
5619    }
5620 
5621    b->generator_id = words[2] >> 16;
5622    uint16_t generator_version = words[2];
5623 
5624    /* In GLSLang commit 8297936dd6eb3, their handling of barrier() was fixed
5625     * to provide correct memory semantics on compute shader barrier()
5626     * commands.  Prior to that, we need to fix them up ourselves.  This
5627     * GLSLang fix caused them to bump to generator version 3.
5628     */
5629    b->wa_glslang_cs_barrier =
5630       (b->generator_id == vtn_generator_glslang_reference_front_end &&
5631        generator_version < 3);
5632 
5633    /* words[2] == generator magic */
5634    unsigned value_id_bound = words[3];
5635    if (words[4] != 0) {
5636       vtn_err("words[4] was %u, want 0", words[4]);
5637       goto fail;
5638    }
5639 
5640    b->value_id_bound = value_id_bound;
5641    b->values = rzalloc_array(b, struct vtn_value, value_id_bound);
5642 
5643    if (b->options->environment == NIR_SPIRV_VULKAN)
5644       b->vars_used_indirectly = _mesa_pointer_set_create(b);
5645 
5646    return b;
5647  fail:
5648    ralloc_free(b);
5649    return NULL;
5650 }
5651 
5652 static nir_function *
vtn_emit_kernel_entry_point_wrapper(struct vtn_builder * b,nir_function * entry_point)5653 vtn_emit_kernel_entry_point_wrapper(struct vtn_builder *b,
5654                                     nir_function *entry_point)
5655 {
5656    vtn_assert(entry_point == b->entry_point->func->impl->function);
5657    vtn_fail_if(!entry_point->name, "entry points are required to have a name");
5658    const char *func_name =
5659       ralloc_asprintf(b->shader, "__wrapped_%s", entry_point->name);
5660 
5661    vtn_assert(b->shader->info.stage == MESA_SHADER_KERNEL);
5662 
5663    nir_function *main_entry_point = nir_function_create(b->shader, func_name);
5664    main_entry_point->impl = nir_function_impl_create(main_entry_point);
5665    nir_builder_init(&b->nb, main_entry_point->impl);
5666    b->nb.cursor = nir_after_cf_list(&main_entry_point->impl->body);
5667    b->func_param_idx = 0;
5668 
5669    nir_call_instr *call = nir_call_instr_create(b->nb.shader, entry_point);
5670 
5671    for (unsigned i = 0; i < entry_point->num_params; ++i) {
5672       struct vtn_type *param_type = b->entry_point->func->type->params[i];
5673 
5674       /* consider all pointers to function memory to be parameters passed
5675        * by value
5676        */
5677       bool is_by_val = param_type->base_type == vtn_base_type_pointer &&
5678          param_type->storage_class == SpvStorageClassFunction;
5679 
5680       /* input variable */
5681       nir_variable *in_var = rzalloc(b->nb.shader, nir_variable);
5682       in_var->data.mode = nir_var_uniform;
5683       in_var->data.read_only = true;
5684       in_var->data.location = i;
5685       if (param_type->base_type == vtn_base_type_image) {
5686          in_var->data.access =
5687             spirv_to_gl_access_qualifier(b, param_type->access_qualifier);
5688       }
5689 
5690       if (is_by_val)
5691          in_var->type = param_type->deref->type;
5692       else if (param_type->base_type == vtn_base_type_image)
5693          in_var->type = param_type->glsl_image;
5694       else if (param_type->base_type == vtn_base_type_sampler)
5695          in_var->type = glsl_bare_sampler_type();
5696       else
5697          in_var->type = param_type->type;
5698 
5699       nir_shader_add_variable(b->nb.shader, in_var);
5700 
5701       /* we have to copy the entire variable into function memory */
5702       if (is_by_val) {
5703          nir_variable *copy_var =
5704             nir_local_variable_create(main_entry_point->impl, in_var->type,
5705                                       "copy_in");
5706          nir_copy_var(&b->nb, copy_var, in_var);
5707          call->params[i] =
5708             nir_src_for_ssa(&nir_build_deref_var(&b->nb, copy_var)->dest.ssa);
5709       } else if (param_type->base_type == vtn_base_type_image ||
5710                  param_type->base_type == vtn_base_type_sampler) {
5711          /* Don't load the var, just pass a deref of it */
5712          call->params[i] = nir_src_for_ssa(&nir_build_deref_var(&b->nb, in_var)->dest.ssa);
5713       } else {
5714          call->params[i] = nir_src_for_ssa(nir_load_var(&b->nb, in_var));
5715       }
5716    }
5717 
5718    nir_builder_instr_insert(&b->nb, &call->instr);
5719 
5720    return main_entry_point;
5721 }
5722 
5723 static bool
can_remove(nir_variable * var,void * data)5724 can_remove(nir_variable *var, void *data)
5725 {
5726    const struct set *vars_used_indirectly = data;
5727    return !_mesa_set_search(vars_used_indirectly, var);
5728 }
5729 
5730 nir_shader *
spirv_to_nir(const uint32_t * words,size_t word_count,struct nir_spirv_specialization * spec,unsigned num_spec,gl_shader_stage stage,const char * entry_point_name,const struct spirv_to_nir_options * options,const nir_shader_compiler_options * nir_options)5731 spirv_to_nir(const uint32_t *words, size_t word_count,
5732              struct nir_spirv_specialization *spec, unsigned num_spec,
5733              gl_shader_stage stage, const char *entry_point_name,
5734              const struct spirv_to_nir_options *options,
5735              const nir_shader_compiler_options *nir_options)
5736 
5737 {
5738    const uint32_t *word_end = words + word_count;
5739 
5740    struct vtn_builder *b = vtn_create_builder(words, word_count,
5741                                               stage, entry_point_name,
5742                                               options);
5743 
5744    if (b == NULL)
5745       return NULL;
5746 
5747    /* See also _vtn_fail() */
5748    if (setjmp(b->fail_jump)) {
5749       ralloc_free(b);
5750       return NULL;
5751    }
5752 
5753    /* Skip the SPIR-V header, handled at vtn_create_builder */
5754    words+= 5;
5755 
5756    b->shader = nir_shader_create(b, stage, nir_options, NULL);
5757 
5758    /* Handle all the preamble instructions */
5759    words = vtn_foreach_instruction(b, words, word_end,
5760                                    vtn_handle_preamble_instruction);
5761 
5762    /* DirectXShaderCompiler and glslang/shaderc both create OpKill from HLSL's
5763     * discard/clip, which uses demote semantics. DirectXShaderCompiler will use
5764     * demote if the extension is enabled, so we disable this workaround in that
5765     * case.
5766     *
5767     * Related glslang issue: https://github.com/KhronosGroup/glslang/issues/2416
5768     */
5769    bool glslang = b->generator_id == vtn_generator_glslang_reference_front_end ||
5770                   b->generator_id == vtn_generator_shaderc_over_glslang;
5771    bool dxsc = b->generator_id == vtn_generator_spiregg;
5772    b->convert_discard_to_demote = ((dxsc && !b->uses_demote_to_helper_invocation) ||
5773                                    (glslang && b->source_lang == SpvSourceLanguageHLSL)) &&
5774                                   options->caps.demote_to_helper_invocation;
5775 
5776    if (!options->create_library && b->entry_point == NULL) {
5777       vtn_fail("Entry point not found for %s shader \"%s\"",
5778                _mesa_shader_stage_to_string(stage), entry_point_name);
5779       ralloc_free(b);
5780       return NULL;
5781    }
5782 
5783    /* Ensure a sane address mode is being used for function temps */
5784    assert(nir_address_format_bit_size(b->options->temp_addr_format) == nir_get_ptr_bitsize(b->shader));
5785    assert(nir_address_format_num_components(b->options->temp_addr_format) == 1);
5786 
5787    /* Set shader info defaults */
5788    if (stage == MESA_SHADER_GEOMETRY)
5789       b->shader->info.gs.invocations = 1;
5790 
5791    /* Parse execution modes. */
5792    if (!options->create_library)
5793       vtn_foreach_execution_mode(b, b->entry_point,
5794                                  vtn_handle_execution_mode, NULL);
5795 
5796    b->specializations = spec;
5797    b->num_specializations = num_spec;
5798 
5799    /* Handle all variable, type, and constant instructions */
5800    words = vtn_foreach_instruction(b, words, word_end,
5801                                    vtn_handle_variable_or_type_instruction);
5802 
5803    /* Parse execution modes that depend on IDs. Must happen after we have
5804     * constants parsed.
5805     */
5806    if (!options->create_library)
5807       vtn_foreach_execution_mode(b, b->entry_point,
5808                                  vtn_handle_execution_mode_id, NULL);
5809 
5810    if (b->workgroup_size_builtin) {
5811       vtn_assert(b->workgroup_size_builtin->type->type ==
5812                  glsl_vector_type(GLSL_TYPE_UINT, 3));
5813 
5814       nir_const_value *const_size =
5815          b->workgroup_size_builtin->constant->values;
5816 
5817       b->shader->info.cs.local_size[0] = const_size[0].u32;
5818       b->shader->info.cs.local_size[1] = const_size[1].u32;
5819       b->shader->info.cs.local_size[2] = const_size[2].u32;
5820    }
5821 
5822    /* Set types on all vtn_values */
5823    vtn_foreach_instruction(b, words, word_end, vtn_set_instruction_result_type);
5824 
5825    vtn_build_cfg(b, words, word_end);
5826 
5827    if (!options->create_library) {
5828       assert(b->entry_point->value_type == vtn_value_type_function);
5829       b->entry_point->func->referenced = true;
5830    }
5831 
5832    bool progress;
5833    do {
5834       progress = false;
5835       vtn_foreach_cf_node(node, &b->functions) {
5836          struct vtn_function *func = vtn_cf_node_as_function(node);
5837          if ((options->create_library || func->referenced) && !func->emitted) {
5838             b->const_table = _mesa_pointer_hash_table_create(b);
5839 
5840             vtn_function_emit(b, func, vtn_handle_body_instruction);
5841             progress = true;
5842          }
5843       }
5844    } while (progress);
5845 
5846    if (!options->create_library) {
5847       vtn_assert(b->entry_point->value_type == vtn_value_type_function);
5848       nir_function *entry_point = b->entry_point->func->impl->function;
5849       vtn_assert(entry_point);
5850 
5851       /* post process entry_points with input params */
5852       if (entry_point->num_params && b->shader->info.stage == MESA_SHADER_KERNEL)
5853          entry_point = vtn_emit_kernel_entry_point_wrapper(b, entry_point);
5854 
5855       entry_point->is_entrypoint = true;
5856    }
5857 
5858    /* structurize the CFG */
5859    nir_lower_goto_ifs(b->shader);
5860 
5861    /* A SPIR-V module can have multiple shaders stages and also multiple
5862     * shaders of the same stage.  Global variables are declared per-module, so
5863     * they are all collected when parsing a single shader.  These dead
5864     * variables can result in invalid NIR, e.g.
5865     *
5866     * - TCS outputs must be per-vertex arrays (or decorated 'patch'), while VS
5867     *   output variables wouldn't be;
5868     * - Two vertex shaders have two different typed blocks associated to the
5869     *   same Binding.
5870     *
5871     * Before cleaning the dead variables, we must lower any constant
5872     * initializers on outputs so nir_remove_dead_variables sees that they're
5873     * written to.
5874     */
5875    nir_lower_variable_initializers(b->shader, nir_var_shader_out |
5876                                               nir_var_system_value);
5877    const nir_remove_dead_variables_options dead_opts = {
5878       .can_remove_var = can_remove,
5879       .can_remove_var_data = b->vars_used_indirectly,
5880    };
5881    nir_remove_dead_variables(b->shader, ~nir_var_function_temp,
5882                              b->vars_used_indirectly ? &dead_opts : NULL);
5883 
5884    /* We sometimes generate bogus derefs that, while never used, give the
5885     * validator a bit of heartburn.  Run dead code to get rid of them.
5886     */
5887    nir_opt_dce(b->shader);
5888 
5889    /* Unparent the shader from the vtn_builder before we delete the builder */
5890    ralloc_steal(NULL, b->shader);
5891 
5892    nir_shader *shader = b->shader;
5893    ralloc_free(b);
5894 
5895    return shader;
5896 }
5897