1 /*
2  * Copyright © 2014-2015 Broadcom
3  * Copyright (C) 2014 Rob Clark <robclark@freedesktop.org>
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9  * and/or sell copies of the Software, and to permit persons to whom the
10  * Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22  * IN THE SOFTWARE.
23  */
24 
25 #include "util/blob.h"
26 #include "util/disk_cache.h"
27 #include "util/u_memory.h"
28 #include "util/ralloc.h"
29 #include "pipe/p_screen.h"
30 
31 #include "compiler/nir/nir.h"
32 #include "compiler/nir/nir_control_flow.h"
33 #include "compiler/nir/nir_builder.h"
34 #include "compiler/nir/nir_serialize.h"
35 #include "compiler/shader_enums.h"
36 
37 #include "tgsi_to_nir.h"
38 #include "tgsi/tgsi_parse.h"
39 #include "tgsi/tgsi_dump.h"
40 #include "tgsi/tgsi_info.h"
41 #include "tgsi/tgsi_scan.h"
42 #include "tgsi/tgsi_from_mesa.h"
43 
44 #define SWIZ(X, Y, Z, W) (unsigned[4]){      \
45       TGSI_SWIZZLE_##X,                      \
46       TGSI_SWIZZLE_##Y,                      \
47       TGSI_SWIZZLE_##Z,                      \
48       TGSI_SWIZZLE_##W,                      \
49    }
50 
51 struct ttn_reg_info {
52    /** nir register containing this TGSI index. */
53    nir_register *reg;
54    nir_variable *var;
55    /** Offset (in vec4s) from the start of var for this TGSI index. */
56    int offset;
57 };
58 
59 struct ttn_compile {
60    union tgsi_full_token *token;
61    nir_builder build;
62    struct tgsi_shader_info *scan;
63 
64    struct ttn_reg_info *output_regs;
65    struct ttn_reg_info *temp_regs;
66    nir_ssa_def **imm_defs;
67 
68    unsigned num_samp_types;
69    nir_alu_type *samp_types;
70 
71    nir_register *addr_reg;
72 
73    nir_variable **inputs;
74    nir_variable **outputs;
75    nir_variable *samplers[PIPE_MAX_SAMPLERS];
76    nir_variable *images[PIPE_MAX_SHADER_IMAGES];
77    nir_variable *ssbo[PIPE_MAX_SHADER_BUFFERS];
78    uint32_t ubo_sizes[PIPE_MAX_CONSTANT_BUFFERS];
79 
80    unsigned num_samplers;
81    unsigned num_images;
82    unsigned num_msaa_images;
83 
84    nir_variable *input_var_face;
85    nir_variable *input_var_position;
86    nir_variable *input_var_point;
87 
88    /* How many TGSI_FILE_IMMEDIATE vec4s have been parsed so far. */
89    unsigned next_imm;
90 
91    bool cap_face_is_sysval;
92    bool cap_position_is_sysval;
93    bool cap_point_is_sysval;
94    bool cap_samplers_as_deref;
95 };
96 
97 #define ttn_swizzle(b, src, x, y, z, w) \
98    nir_swizzle(b, src, SWIZ(x, y, z, w), 4)
99 #define ttn_channel(b, src, swiz) \
100    nir_channel(b, src, TGSI_SWIZZLE_##swiz)
101 
102 gl_varying_slot
tgsi_varying_semantic_to_slot(unsigned semantic,unsigned index)103 tgsi_varying_semantic_to_slot(unsigned semantic, unsigned index)
104 {
105    switch (semantic) {
106    case TGSI_SEMANTIC_POSITION:
107       return VARYING_SLOT_POS;
108    case TGSI_SEMANTIC_COLOR:
109       if (index == 0)
110          return VARYING_SLOT_COL0;
111       else
112          return VARYING_SLOT_COL1;
113    case TGSI_SEMANTIC_BCOLOR:
114       if (index == 0)
115          return VARYING_SLOT_BFC0;
116       else
117          return VARYING_SLOT_BFC1;
118    case TGSI_SEMANTIC_FOG:
119       return VARYING_SLOT_FOGC;
120    case TGSI_SEMANTIC_PSIZE:
121       return VARYING_SLOT_PSIZ;
122    case TGSI_SEMANTIC_GENERIC:
123       assert(index < 32);
124       return VARYING_SLOT_VAR0 + index;
125    case TGSI_SEMANTIC_FACE:
126       return VARYING_SLOT_FACE;
127    case TGSI_SEMANTIC_EDGEFLAG:
128       return VARYING_SLOT_EDGE;
129    case TGSI_SEMANTIC_PRIMID:
130       return VARYING_SLOT_PRIMITIVE_ID;
131    case TGSI_SEMANTIC_CLIPDIST:
132       if (index == 0)
133          return VARYING_SLOT_CLIP_DIST0;
134       else
135          return VARYING_SLOT_CLIP_DIST1;
136    case TGSI_SEMANTIC_CLIPVERTEX:
137       return VARYING_SLOT_CLIP_VERTEX;
138    case TGSI_SEMANTIC_TEXCOORD:
139       assert(index < 8);
140       return VARYING_SLOT_TEX0 + index;
141    case TGSI_SEMANTIC_PCOORD:
142       return VARYING_SLOT_PNTC;
143    case TGSI_SEMANTIC_VIEWPORT_INDEX:
144       return VARYING_SLOT_VIEWPORT;
145    case TGSI_SEMANTIC_LAYER:
146       return VARYING_SLOT_LAYER;
147    case TGSI_SEMANTIC_TESSINNER:
148       return VARYING_SLOT_TESS_LEVEL_INNER;
149    case TGSI_SEMANTIC_TESSOUTER:
150       return VARYING_SLOT_TESS_LEVEL_OUTER;
151    default:
152       fprintf(stderr, "Bad TGSI semantic: %d/%d\n", semantic, index);
153       abort();
154    }
155 }
156 
157 static enum gl_frag_depth_layout
ttn_get_depth_layout(unsigned tgsi_fs_depth_layout)158 ttn_get_depth_layout(unsigned tgsi_fs_depth_layout)
159 {
160    switch (tgsi_fs_depth_layout) {
161    case TGSI_FS_DEPTH_LAYOUT_NONE:
162       return FRAG_DEPTH_LAYOUT_NONE;
163    case TGSI_FS_DEPTH_LAYOUT_ANY:
164       return FRAG_DEPTH_LAYOUT_ANY;
165    case TGSI_FS_DEPTH_LAYOUT_GREATER:
166       return FRAG_DEPTH_LAYOUT_GREATER;
167    case TGSI_FS_DEPTH_LAYOUT_LESS:
168       return FRAG_DEPTH_LAYOUT_LESS;
169    case TGSI_FS_DEPTH_LAYOUT_UNCHANGED:
170       return FRAG_DEPTH_LAYOUT_UNCHANGED;
171    default:
172       unreachable("bad TGSI FS depth layout");
173    }
174 }
175 
176 static nir_ssa_def *
ttn_src_for_dest(nir_builder * b,nir_alu_dest * dest)177 ttn_src_for_dest(nir_builder *b, nir_alu_dest *dest)
178 {
179    nir_alu_src src;
180    memset(&src, 0, sizeof(src));
181 
182    if (dest->dest.is_ssa)
183       src.src = nir_src_for_ssa(&dest->dest.ssa);
184    else {
185       assert(!dest->dest.reg.indirect);
186       src.src = nir_src_for_reg(dest->dest.reg.reg);
187       src.src.reg.base_offset = dest->dest.reg.base_offset;
188    }
189 
190    for (int i = 0; i < 4; i++)
191       src.swizzle[i] = i;
192 
193    return nir_mov_alu(b, src, 4);
194 }
195 
196 static enum glsl_interp_mode
ttn_translate_interp_mode(unsigned tgsi_interp)197 ttn_translate_interp_mode(unsigned tgsi_interp)
198 {
199    switch (tgsi_interp) {
200    case TGSI_INTERPOLATE_CONSTANT:
201       return INTERP_MODE_FLAT;
202    case TGSI_INTERPOLATE_LINEAR:
203       return INTERP_MODE_NOPERSPECTIVE;
204    case TGSI_INTERPOLATE_PERSPECTIVE:
205       return INTERP_MODE_SMOOTH;
206    case TGSI_INTERPOLATE_COLOR:
207       return INTERP_MODE_NONE;
208    default:
209       unreachable("bad TGSI interpolation mode");
210    }
211 }
212 
213 static void
ttn_emit_declaration(struct ttn_compile * c)214 ttn_emit_declaration(struct ttn_compile *c)
215 {
216    nir_builder *b = &c->build;
217    struct tgsi_full_declaration *decl = &c->token->FullDeclaration;
218    unsigned array_size = decl->Range.Last - decl->Range.First + 1;
219    unsigned file = decl->Declaration.File;
220    unsigned i;
221 
222    if (file == TGSI_FILE_TEMPORARY) {
223       if (decl->Declaration.Array) {
224          /* for arrays, we create variables instead of registers: */
225          nir_variable *var =
226             nir_variable_create(b->shader, nir_var_shader_temp,
227                                 glsl_array_type(glsl_vec4_type(), array_size, 0),
228                                 ralloc_asprintf(b->shader, "arr_%d",
229                                                 decl->Array.ArrayID));
230 
231          for (i = 0; i < array_size; i++) {
232             /* point all the matching slots to the same var,
233              * with appropriate offset set, mostly just so
234              * we know what to do when tgsi does a non-indirect
235              * access
236              */
237             c->temp_regs[decl->Range.First + i].reg = NULL;
238             c->temp_regs[decl->Range.First + i].var = var;
239             c->temp_regs[decl->Range.First + i].offset = i;
240          }
241       } else {
242          for (i = 0; i < array_size; i++) {
243             nir_register *reg = nir_local_reg_create(b->impl);
244             reg->num_components = 4;
245             c->temp_regs[decl->Range.First + i].reg = reg;
246             c->temp_regs[decl->Range.First + i].var = NULL;
247             c->temp_regs[decl->Range.First + i].offset = 0;
248          }
249       }
250    } else if (file == TGSI_FILE_ADDRESS) {
251       c->addr_reg = nir_local_reg_create(b->impl);
252       c->addr_reg->num_components = 4;
253    } else if (file == TGSI_FILE_SYSTEM_VALUE) {
254       /* Nothing to record for system values. */
255    } else if (file == TGSI_FILE_BUFFER) {
256       /* Nothing to record for buffers. */
257    } else if (file == TGSI_FILE_IMAGE) {
258       /* Nothing to record for images. */
259    } else if (file == TGSI_FILE_SAMPLER) {
260       /* Nothing to record for samplers. */
261    } else if (file == TGSI_FILE_SAMPLER_VIEW) {
262       struct tgsi_declaration_sampler_view *sview = &decl->SamplerView;
263       nir_alu_type type;
264 
265       assert((sview->ReturnTypeX == sview->ReturnTypeY) &&
266              (sview->ReturnTypeX == sview->ReturnTypeZ) &&
267              (sview->ReturnTypeX == sview->ReturnTypeW));
268 
269       switch (sview->ReturnTypeX) {
270       case TGSI_RETURN_TYPE_SINT:
271          type = nir_type_int;
272          break;
273       case TGSI_RETURN_TYPE_UINT:
274          type = nir_type_uint;
275          break;
276       case TGSI_RETURN_TYPE_FLOAT:
277       default:
278          type = nir_type_float;
279          break;
280       }
281 
282       for (i = 0; i < array_size; i++) {
283          c->samp_types[decl->Range.First + i] = type;
284       }
285    } else {
286       bool is_array = (array_size > 1);
287 
288       assert(file == TGSI_FILE_INPUT ||
289              file == TGSI_FILE_OUTPUT ||
290              file == TGSI_FILE_CONSTANT);
291 
292       /* nothing to do for UBOs: */
293       if ((file == TGSI_FILE_CONSTANT) && decl->Declaration.Dimension &&
294           decl->Dim.Index2D != 0) {
295          b->shader->info.num_ubos =
296             MAX2(b->shader->info.num_ubos, decl->Dim.Index2D);
297          c->ubo_sizes[decl->Dim.Index2D] =
298             MAX2(c->ubo_sizes[decl->Dim.Index2D], decl->Range.Last * 16);
299          return;
300       }
301 
302       if ((file == TGSI_FILE_INPUT) || (file == TGSI_FILE_OUTPUT)) {
303          is_array = (is_array && decl->Declaration.Array &&
304                      (decl->Array.ArrayID != 0));
305       }
306 
307       for (i = 0; i < array_size; i++) {
308          unsigned idx = decl->Range.First + i;
309          nir_variable *var = rzalloc(b->shader, nir_variable);
310 
311          var->data.driver_location = idx;
312 
313          var->type = glsl_vec4_type();
314          if (is_array)
315             var->type = glsl_array_type(var->type, array_size, 0);
316 
317          switch (file) {
318          case TGSI_FILE_INPUT:
319             var->data.read_only = true;
320             var->data.mode = nir_var_shader_in;
321             var->name = ralloc_asprintf(var, "in_%d", idx);
322 
323             if (c->scan->processor == PIPE_SHADER_FRAGMENT) {
324                if (decl->Semantic.Name == TGSI_SEMANTIC_FACE) {
325                   var->type = glsl_bool_type();
326                   if (c->cap_face_is_sysval) {
327                      var->data.mode = nir_var_system_value;
328                      var->data.location = SYSTEM_VALUE_FRONT_FACE;
329                   } else {
330                      var->data.location = VARYING_SLOT_FACE;
331                   }
332                   c->input_var_face = var;
333                } else if (decl->Semantic.Name == TGSI_SEMANTIC_POSITION) {
334                   if (c->cap_position_is_sysval) {
335                      var->data.mode = nir_var_system_value;
336                      var->data.location = SYSTEM_VALUE_FRAG_COORD;
337                   } else {
338                      var->data.location = VARYING_SLOT_POS;
339                   }
340                   c->input_var_position = var;
341                } else if (decl->Semantic.Name == TGSI_SEMANTIC_PCOORD) {
342                   if (c->cap_point_is_sysval) {
343                      var->data.mode = nir_var_system_value;
344                      var->data.location = SYSTEM_VALUE_POINT_COORD;
345                   } else {
346                      var->data.location = VARYING_SLOT_PNTC;
347                   }
348                   c->input_var_point = var;
349                } else {
350                   var->data.location =
351                      tgsi_varying_semantic_to_slot(decl->Semantic.Name,
352                                                    decl->Semantic.Index);
353                }
354             } else {
355                assert(!decl->Declaration.Semantic);
356                var->data.location = VERT_ATTRIB_GENERIC0 + idx;
357             }
358             var->data.index = 0;
359             var->data.interpolation =
360                ttn_translate_interp_mode(decl->Interp.Interpolate);
361 
362             c->inputs[idx] = var;
363 
364             for (int i = 0; i < array_size; i++)
365                b->shader->info.inputs_read |= 1 << (var->data.location + i);
366 
367             break;
368          case TGSI_FILE_OUTPUT: {
369             int semantic_name = decl->Semantic.Name;
370             int semantic_index = decl->Semantic.Index;
371             /* Since we can't load from outputs in the IR, we make temporaries
372              * for the outputs and emit stores to the real outputs at the end of
373              * the shader.
374              */
375             nir_register *reg = nir_local_reg_create(b->impl);
376             reg->num_components = 4;
377             if (is_array)
378                reg->num_array_elems = array_size;
379 
380             var->data.mode = nir_var_shader_out;
381             var->name = ralloc_asprintf(var, "out_%d", idx);
382             var->data.index = 0;
383             var->data.interpolation =
384                ttn_translate_interp_mode(decl->Interp.Interpolate);
385             var->data.patch = semantic_name == TGSI_SEMANTIC_TESSINNER ||
386                               semantic_name == TGSI_SEMANTIC_TESSOUTER ||
387                               semantic_name == TGSI_SEMANTIC_PATCH;
388 
389             if (c->scan->processor == PIPE_SHADER_FRAGMENT) {
390                switch (semantic_name) {
391                case TGSI_SEMANTIC_COLOR: {
392                   /* TODO tgsi loses some information, so we cannot
393                    * actually differentiate here between DSB and MRT
394                    * at this point.  But so far no drivers using tgsi-
395                    * to-nir support dual source blend:
396                    */
397                   bool dual_src_blend = false;
398                   if (dual_src_blend && (semantic_index == 1)) {
399                      var->data.location = FRAG_RESULT_DATA0;
400                      var->data.index = 1;
401                   } else {
402                      if (c->scan->properties[TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS])
403                         var->data.location = FRAG_RESULT_COLOR;
404                      else
405                         var->data.location = FRAG_RESULT_DATA0 + semantic_index;
406                   }
407                   break;
408                }
409                case TGSI_SEMANTIC_POSITION:
410                   var->data.location = FRAG_RESULT_DEPTH;
411                   var->type = glsl_float_type();
412                   break;
413                case TGSI_SEMANTIC_STENCIL:
414                   var->data.location = FRAG_RESULT_STENCIL;
415                   var->type = glsl_int_type();
416                   break;
417                default:
418                   fprintf(stderr, "Bad TGSI semantic: %d/%d\n",
419                           decl->Semantic.Name, decl->Semantic.Index);
420                   abort();
421                }
422             } else {
423                var->data.location =
424                   tgsi_varying_semantic_to_slot(semantic_name, semantic_index);
425                if (var->data.location == VARYING_SLOT_FOGC ||
426                    var->data.location == VARYING_SLOT_PSIZ) {
427                   var->type = glsl_float_type();
428                }
429             }
430 
431             if (is_array) {
432                unsigned j;
433                for (j = 0; j < array_size; j++) {
434                   c->output_regs[idx + j].offset = i + j;
435                   c->output_regs[idx + j].reg = reg;
436                }
437             } else {
438                c->output_regs[idx].offset = i;
439                c->output_regs[idx].reg = reg;
440             }
441 
442             c->outputs[idx] = var;
443 
444             for (int i = 0; i < array_size; i++)
445                b->shader->info.outputs_written |= 1ull << (var->data.location + i);
446          }
447             break;
448          case TGSI_FILE_CONSTANT:
449             var->data.mode = nir_var_uniform;
450             var->name = ralloc_asprintf(var, "uniform_%d", idx);
451             var->data.location = idx;
452             break;
453          default:
454             unreachable("bad declaration file");
455             return;
456          }
457 
458          nir_shader_add_variable(b->shader, var);
459 
460          if (is_array)
461             break;
462       }
463 
464    }
465 }
466 
467 static void
ttn_emit_immediate(struct ttn_compile * c)468 ttn_emit_immediate(struct ttn_compile *c)
469 {
470    nir_builder *b = &c->build;
471    struct tgsi_full_immediate *tgsi_imm = &c->token->FullImmediate;
472    nir_load_const_instr *load_const;
473    int i;
474 
475    load_const = nir_load_const_instr_create(b->shader, 4, 32);
476    c->imm_defs[c->next_imm] = &load_const->def;
477    c->next_imm++;
478 
479    for (i = 0; i < load_const->def.num_components; i++)
480       load_const->value[i].u32 = tgsi_imm->u[i].Uint;
481 
482    nir_builder_instr_insert(b, &load_const->instr);
483 }
484 
485 static nir_ssa_def *
486 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect);
487 
488 /* generate either a constant or indirect deref chain for accessing an
489  * array variable.
490  */
491 static nir_deref_instr *
ttn_array_deref(struct ttn_compile * c,nir_variable * var,unsigned offset,struct tgsi_ind_register * indirect)492 ttn_array_deref(struct ttn_compile *c, nir_variable *var, unsigned offset,
493                 struct tgsi_ind_register *indirect)
494 {
495    nir_deref_instr *deref = nir_build_deref_var(&c->build, var);
496    nir_ssa_def *index = nir_imm_int(&c->build, offset);
497    if (indirect)
498       index = nir_iadd(&c->build, index, ttn_src_for_indirect(c, indirect));
499    return nir_build_deref_array(&c->build, deref, index);
500 }
501 
502 /* Special case: Turn the frontface varying into a load of the
503  * frontface variable, and create the vector as required by TGSI.
504  */
505 static nir_ssa_def *
ttn_emulate_tgsi_front_face(struct ttn_compile * c)506 ttn_emulate_tgsi_front_face(struct ttn_compile *c)
507 {
508    nir_ssa_def *tgsi_frontface[4];
509 
510    if (c->cap_face_is_sysval) {
511       /* When it's a system value, it should be an integer vector: (F, 0, 0, 1)
512        * F is 0xffffffff if front-facing, 0 if not.
513        */
514 
515       nir_ssa_def *frontface = nir_load_front_face(&c->build, 1);
516 
517       tgsi_frontface[0] = nir_bcsel(&c->build,
518                              frontface,
519                              nir_imm_int(&c->build, 0xffffffff),
520                              nir_imm_int(&c->build, 0));
521       tgsi_frontface[1] = nir_imm_int(&c->build, 0);
522       tgsi_frontface[2] = nir_imm_int(&c->build, 0);
523       tgsi_frontface[3] = nir_imm_int(&c->build, 1);
524    } else {
525       /* When it's an input, it should be a float vector: (F, 0.0, 0.0, 1.0)
526        * F is positive if front-facing, negative if not.
527        */
528 
529       assert(c->input_var_face);
530       nir_ssa_def *frontface = nir_load_var(&c->build, c->input_var_face);
531 
532       tgsi_frontface[0] = nir_bcsel(&c->build,
533                              frontface,
534                              nir_imm_float(&c->build, 1.0),
535                              nir_imm_float(&c->build, -1.0));
536       tgsi_frontface[1] = nir_imm_float(&c->build, 0.0);
537       tgsi_frontface[2] = nir_imm_float(&c->build, 0.0);
538       tgsi_frontface[3] = nir_imm_float(&c->build, 1.0);
539    }
540 
541    return nir_vec(&c->build, tgsi_frontface, 4);
542 }
543 
544 static nir_src
ttn_src_for_file_and_index(struct ttn_compile * c,unsigned file,unsigned index,struct tgsi_ind_register * indirect,struct tgsi_dimension * dim,struct tgsi_ind_register * dimind,bool src_is_float)545 ttn_src_for_file_and_index(struct ttn_compile *c, unsigned file, unsigned index,
546                            struct tgsi_ind_register *indirect,
547                            struct tgsi_dimension *dim,
548                            struct tgsi_ind_register *dimind,
549                            bool src_is_float)
550 {
551    nir_builder *b = &c->build;
552    nir_src src;
553 
554    memset(&src, 0, sizeof(src));
555 
556    switch (file) {
557    case TGSI_FILE_TEMPORARY:
558       if (c->temp_regs[index].var) {
559          unsigned offset = c->temp_regs[index].offset;
560          nir_variable *var = c->temp_regs[index].var;
561          nir_ssa_def *load = nir_load_deref(&c->build,
562                ttn_array_deref(c, var, offset, indirect));
563 
564          src = nir_src_for_ssa(load);
565       } else {
566          assert(!indirect);
567          src.reg.reg = c->temp_regs[index].reg;
568       }
569       assert(!dim);
570       break;
571 
572    case TGSI_FILE_ADDRESS:
573       src.reg.reg = c->addr_reg;
574       assert(!dim);
575       break;
576 
577    case TGSI_FILE_IMMEDIATE:
578       src = nir_src_for_ssa(c->imm_defs[index]);
579       assert(!indirect);
580       assert(!dim);
581       break;
582 
583    case TGSI_FILE_SYSTEM_VALUE: {
584       nir_intrinsic_op op;
585       nir_ssa_def *load;
586 
587       assert(!indirect);
588       assert(!dim);
589 
590       switch (c->scan->system_value_semantic_name[index]) {
591       case TGSI_SEMANTIC_VERTEXID_NOBASE:
592          op = nir_intrinsic_load_vertex_id_zero_base;
593          load = nir_load_vertex_id_zero_base(b);
594          break;
595       case TGSI_SEMANTIC_VERTEXID:
596          op = nir_intrinsic_load_vertex_id;
597          load = nir_load_vertex_id(b);
598          break;
599       case TGSI_SEMANTIC_BASEVERTEX:
600          op = nir_intrinsic_load_base_vertex;
601          load = nir_load_base_vertex(b);
602          break;
603       case TGSI_SEMANTIC_INSTANCEID:
604          op = nir_intrinsic_load_instance_id;
605          load = nir_load_instance_id(b);
606          break;
607       case TGSI_SEMANTIC_FACE:
608          assert(c->cap_face_is_sysval);
609          op = nir_intrinsic_load_front_face;
610          load = ttn_emulate_tgsi_front_face(c);
611          break;
612       case TGSI_SEMANTIC_POSITION:
613          assert(c->cap_position_is_sysval);
614          op = nir_intrinsic_load_frag_coord;
615          load = nir_load_frag_coord(b);
616          break;
617       case TGSI_SEMANTIC_PCOORD:
618          assert(c->cap_point_is_sysval);
619          op = nir_intrinsic_load_point_coord;
620          load = nir_load_point_coord(b);
621          break;
622       case TGSI_SEMANTIC_THREAD_ID:
623          op = nir_intrinsic_load_local_invocation_id;
624          load = nir_load_local_invocation_id(b);
625          break;
626       case TGSI_SEMANTIC_BLOCK_ID:
627          op = nir_intrinsic_load_work_group_id;
628          load = nir_load_work_group_id(b, 32);
629          break;
630       case TGSI_SEMANTIC_BLOCK_SIZE:
631          op = nir_intrinsic_load_local_group_size;
632          load = nir_load_local_group_size(b);
633          break;
634       case TGSI_SEMANTIC_CS_USER_DATA_AMD:
635          op = nir_intrinsic_load_user_data_amd;
636          load = nir_load_user_data_amd(b);
637          break;
638       case TGSI_SEMANTIC_TESS_DEFAULT_INNER_LEVEL:
639          op = nir_intrinsic_load_tess_level_inner_default;
640          load = nir_load_tess_level_inner_default(b);
641          break;
642       case TGSI_SEMANTIC_TESS_DEFAULT_OUTER_LEVEL:
643          op = nir_intrinsic_load_tess_level_outer_default;
644          load = nir_load_tess_level_outer_default(b);
645          break;
646       default:
647          unreachable("bad system value");
648       }
649 
650       if (load->num_components == 2)
651          load = nir_swizzle(b, load, SWIZ(X, Y, Y, Y), 4);
652       else if (load->num_components == 3)
653          load = nir_swizzle(b, load, SWIZ(X, Y, Z, Z), 4);
654 
655       src = nir_src_for_ssa(load);
656       BITSET_SET(b->shader->info.system_values_read,
657                  nir_system_value_from_intrinsic(op));
658 
659       break;
660    }
661 
662    case TGSI_FILE_INPUT:
663       if (c->scan->processor == PIPE_SHADER_FRAGMENT &&
664           c->scan->input_semantic_name[index] == TGSI_SEMANTIC_FACE) {
665          assert(!c->cap_face_is_sysval && c->input_var_face);
666          return nir_src_for_ssa(ttn_emulate_tgsi_front_face(c));
667       } else if (c->scan->processor == PIPE_SHADER_FRAGMENT &&
668           c->scan->input_semantic_name[index] == TGSI_SEMANTIC_POSITION) {
669          assert(!c->cap_position_is_sysval && c->input_var_position);
670          return nir_src_for_ssa(nir_load_var(&c->build, c->input_var_position));
671       } else if (c->scan->processor == PIPE_SHADER_FRAGMENT &&
672           c->scan->input_semantic_name[index] == TGSI_SEMANTIC_PCOORD) {
673          assert(!c->cap_point_is_sysval && c->input_var_point);
674          return nir_src_for_ssa(nir_load_var(&c->build, c->input_var_point));
675       } else {
676          /* Indirection on input arrays isn't supported by TTN. */
677          assert(!dim);
678          nir_deref_instr *deref = nir_build_deref_var(&c->build,
679                                                       c->inputs[index]);
680          return nir_src_for_ssa(nir_load_deref(&c->build, deref));
681       }
682       break;
683 
684    case TGSI_FILE_CONSTANT: {
685       nir_intrinsic_instr *load;
686       nir_intrinsic_op op;
687       unsigned srcn = 0;
688 
689       if (dim && (dim->Index > 0 || dim->Indirect)) {
690          op = nir_intrinsic_load_ubo;
691       } else {
692          op = nir_intrinsic_load_uniform;
693       }
694 
695       load = nir_intrinsic_instr_create(b->shader, op);
696       if (op == nir_intrinsic_load_uniform) {
697          nir_intrinsic_set_dest_type(load, src_is_float ? nir_type_float :
698                                                           nir_type_int);
699       }
700 
701       load->num_components = 4;
702       if (dim && (dim->Index > 0 || dim->Indirect)) {
703          if (dimind) {
704             load->src[srcn] =
705                ttn_src_for_file_and_index(c, dimind->File, dimind->Index,
706                                           NULL, NULL, NULL, false);
707          } else {
708             /* UBOs start at index 1 in TGSI: */
709             load->src[srcn] =
710                nir_src_for_ssa(nir_imm_int(b, dim->Index - 1));
711          }
712          srcn++;
713       }
714 
715       nir_ssa_def *offset;
716       if (op == nir_intrinsic_load_ubo) {
717          /* UBO loads don't have a base offset. */
718          offset = nir_imm_int(b, index);
719          if (indirect) {
720             offset = nir_iadd(b, offset, ttn_src_for_indirect(c, indirect));
721          }
722          /* UBO offsets are in bytes, but TGSI gives them to us in vec4's */
723          offset = nir_ishl(b, offset, nir_imm_int(b, 4));
724          nir_intrinsic_set_align(load, 16, 0);
725 
726          /* Set a very conservative base/range of the access: 16 bytes if not
727           * indirect at all, offset to the end of the UBO if the offset is
728           * indirect, and totally unknown if the block number is indirect.
729           */
730          uint32_t base = index * 16;
731          nir_intrinsic_set_range_base(load, base);
732          if (dimind)
733             nir_intrinsic_set_range(load, ~0);
734          else if (indirect)
735             nir_intrinsic_set_range(load, c->ubo_sizes[dim->Index] - base);
736          else
737             nir_intrinsic_set_range(load, base + 16);
738       } else {
739          nir_intrinsic_set_base(load, index);
740          if (indirect) {
741             offset = ttn_src_for_indirect(c, indirect);
742             nir_intrinsic_set_range(load, c->ubo_sizes[0] - index);
743          } else {
744             offset = nir_imm_int(b, 0);
745             nir_intrinsic_set_range(load, 1);
746          }
747       }
748       load->src[srcn++] = nir_src_for_ssa(offset);
749 
750       nir_ssa_dest_init(&load->instr, &load->dest, 4, 32, NULL);
751       nir_builder_instr_insert(b, &load->instr);
752 
753       src = nir_src_for_ssa(&load->dest.ssa);
754       break;
755    }
756 
757    default:
758       unreachable("bad src file");
759    }
760 
761 
762    return src;
763 }
764 
765 static nir_ssa_def *
ttn_src_for_indirect(struct ttn_compile * c,struct tgsi_ind_register * indirect)766 ttn_src_for_indirect(struct ttn_compile *c, struct tgsi_ind_register *indirect)
767 {
768    nir_builder *b = &c->build;
769    nir_alu_src src;
770    memset(&src, 0, sizeof(src));
771    for (int i = 0; i < 4; i++)
772       src.swizzle[i] = indirect->Swizzle;
773    src.src = ttn_src_for_file_and_index(c,
774                                         indirect->File,
775                                         indirect->Index,
776                                         NULL, NULL, NULL,
777                                         false);
778    return nir_mov_alu(b, src, 1);
779 }
780 
781 static nir_alu_dest
ttn_get_dest(struct ttn_compile * c,struct tgsi_full_dst_register * tgsi_fdst)782 ttn_get_dest(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
783 {
784    struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
785    nir_alu_dest dest;
786    unsigned index = tgsi_dst->Index;
787 
788    memset(&dest, 0, sizeof(dest));
789 
790    if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
791       if (c->temp_regs[index].var) {
792           nir_register *reg;
793 
794          /* this works, because TGSI will give us a base offset
795           * (in case of indirect index) that points back into
796           * the array.  Access can be direct or indirect, we
797           * don't really care.  Just create a one-shot dst reg
798           * that will get store_var'd back into the array var
799           * at the end of ttn_emit_instruction()
800           */
801          reg = nir_local_reg_create(c->build.impl);
802          reg->num_components = 4;
803          dest.dest.reg.reg = reg;
804          dest.dest.reg.base_offset = 0;
805       } else {
806          assert(!tgsi_dst->Indirect);
807          dest.dest.reg.reg = c->temp_regs[index].reg;
808          dest.dest.reg.base_offset = c->temp_regs[index].offset;
809       }
810    } else if (tgsi_dst->File == TGSI_FILE_OUTPUT) {
811       dest.dest.reg.reg = c->output_regs[index].reg;
812       dest.dest.reg.base_offset = c->output_regs[index].offset;
813    } else if (tgsi_dst->File == TGSI_FILE_ADDRESS) {
814       assert(index == 0);
815       dest.dest.reg.reg = c->addr_reg;
816    }
817 
818    dest.write_mask = tgsi_dst->WriteMask;
819    dest.saturate = false;
820 
821    if (tgsi_dst->Indirect && (tgsi_dst->File != TGSI_FILE_TEMPORARY)) {
822       nir_src *indirect = ralloc(c->build.shader, nir_src);
823       *indirect = nir_src_for_ssa(ttn_src_for_indirect(c, &tgsi_fdst->Indirect));
824       dest.dest.reg.indirect = indirect;
825    }
826 
827    return dest;
828 }
829 
830 static nir_variable *
ttn_get_var(struct ttn_compile * c,struct tgsi_full_dst_register * tgsi_fdst)831 ttn_get_var(struct ttn_compile *c, struct tgsi_full_dst_register *tgsi_fdst)
832 {
833    struct tgsi_dst_register *tgsi_dst = &tgsi_fdst->Register;
834    unsigned index = tgsi_dst->Index;
835 
836    if (tgsi_dst->File == TGSI_FILE_TEMPORARY) {
837       /* we should not have an indirect when there is no var! */
838       if (!c->temp_regs[index].var)
839          assert(!tgsi_dst->Indirect);
840       return c->temp_regs[index].var;
841    }
842 
843    return NULL;
844 }
845 
846 static nir_ssa_def *
ttn_get_src(struct ttn_compile * c,struct tgsi_full_src_register * tgsi_fsrc,int src_idx)847 ttn_get_src(struct ttn_compile *c, struct tgsi_full_src_register *tgsi_fsrc,
848             int src_idx)
849 {
850    nir_builder *b = &c->build;
851    struct tgsi_src_register *tgsi_src = &tgsi_fsrc->Register;
852    enum tgsi_opcode opcode = c->token->FullInstruction.Instruction.Opcode;
853    unsigned tgsi_src_type = tgsi_opcode_infer_src_type(opcode, src_idx);
854    bool src_is_float = (tgsi_src_type == TGSI_TYPE_FLOAT ||
855                         tgsi_src_type == TGSI_TYPE_DOUBLE ||
856                         tgsi_src_type == TGSI_TYPE_UNTYPED);
857    nir_alu_src src;
858 
859    memset(&src, 0, sizeof(src));
860 
861    if (tgsi_src->File == TGSI_FILE_NULL) {
862       return nir_imm_float(b, 0.0);
863    } else if (tgsi_src->File == TGSI_FILE_SAMPLER ||
864               tgsi_src->File == TGSI_FILE_IMAGE ||
865               tgsi_src->File == TGSI_FILE_BUFFER) {
866       /* Only the index of the resource gets used in texturing, and it will
867        * handle looking that up on its own instead of using the nir_alu_src.
868        */
869       assert(!tgsi_src->Indirect);
870       return NULL;
871    } else {
872       struct tgsi_ind_register *ind = NULL;
873       struct tgsi_dimension *dim = NULL;
874       struct tgsi_ind_register *dimind = NULL;
875       if (tgsi_src->Indirect)
876          ind = &tgsi_fsrc->Indirect;
877       if (tgsi_src->Dimension) {
878          dim = &tgsi_fsrc->Dimension;
879          if (dim->Indirect)
880             dimind = &tgsi_fsrc->DimIndirect;
881       }
882       src.src = ttn_src_for_file_and_index(c,
883                                            tgsi_src->File,
884                                            tgsi_src->Index,
885                                            ind, dim, dimind,
886                                            src_is_float);
887    }
888 
889    src.swizzle[0] = tgsi_src->SwizzleX;
890    src.swizzle[1] = tgsi_src->SwizzleY;
891    src.swizzle[2] = tgsi_src->SwizzleZ;
892    src.swizzle[3] = tgsi_src->SwizzleW;
893 
894    nir_ssa_def *def = nir_mov_alu(b, src, 4);
895 
896    if (tgsi_type_is_64bit(tgsi_src_type))
897       def = nir_bitcast_vector(b, def, 64);
898 
899    if (tgsi_src->Absolute) {
900       if (src_is_float)
901          def = nir_fabs(b, def);
902       else
903          def = nir_iabs(b, def);
904    }
905 
906    if (tgsi_src->Negate) {
907       if (src_is_float)
908          def = nir_fneg(b, def);
909       else
910          def = nir_ineg(b, def);
911    }
912 
913    return def;
914 }
915 
916 static void
ttn_move_dest_masked(nir_builder * b,nir_alu_dest dest,nir_ssa_def * def,unsigned write_mask)917 ttn_move_dest_masked(nir_builder *b, nir_alu_dest dest,
918                      nir_ssa_def *def, unsigned write_mask)
919 {
920    if (!(dest.write_mask & write_mask))
921       return;
922 
923    nir_alu_instr *mov = nir_alu_instr_create(b->shader, nir_op_mov);
924    mov->dest = dest;
925    mov->dest.write_mask &= write_mask;
926    mov->src[0].src = nir_src_for_ssa(def);
927    for (unsigned i = def->num_components; i < 4; i++)
928       mov->src[0].swizzle[i] = def->num_components - 1;
929    nir_builder_instr_insert(b, &mov->instr);
930 }
931 
932 static void
ttn_move_dest(nir_builder * b,nir_alu_dest dest,nir_ssa_def * def)933 ttn_move_dest(nir_builder *b, nir_alu_dest dest, nir_ssa_def *def)
934 {
935    ttn_move_dest_masked(b, dest, def, TGSI_WRITEMASK_XYZW);
936 }
937 
938 static void
ttn_alu(nir_builder * b,nir_op op,nir_alu_dest dest,unsigned dest_bitsize,nir_ssa_def ** src)939 ttn_alu(nir_builder *b, nir_op op, nir_alu_dest dest, unsigned dest_bitsize,
940         nir_ssa_def **src)
941 {
942    nir_ssa_def *def = nir_build_alu_src_arr(b, op, src);
943    if (def->bit_size == 1)
944       def = nir_ineg(b, nir_b2i(b, def, dest_bitsize));
945    assert(def->bit_size == dest_bitsize);
946    if (dest_bitsize == 64) {
947       if (def->num_components > 2) {
948          /* 32 -> 64 bit conversion ops are supposed to only convert the first
949           * two components, and we need to truncate here to avoid creating a
950           * vec8 after bitcasting the destination.
951           */
952          def = nir_channels(b, def, 0x3);
953       }
954       def = nir_bitcast_vector(b, def, 32);
955    }
956    ttn_move_dest(b, dest, def);
957 }
958 
959 static void
ttn_arl(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)960 ttn_arl(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
961 {
962    ttn_move_dest(b, dest, nir_f2i32(b, nir_ffloor(b, src[0])));
963 }
964 
965 /* EXP - Approximate Exponential Base 2
966  *  dst.x = 2^{\lfloor src.x\rfloor}
967  *  dst.y = src.x - \lfloor src.x\rfloor
968  *  dst.z = 2^{src.x}
969  *  dst.w = 1.0
970  */
971 static void
ttn_exp(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)972 ttn_exp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
973 {
974    nir_ssa_def *srcx = ttn_channel(b, src[0], X);
975 
976    ttn_move_dest_masked(b, dest, nir_fexp2(b, nir_ffloor(b, srcx)),
977                         TGSI_WRITEMASK_X);
978    ttn_move_dest_masked(b, dest, nir_fsub(b, srcx, nir_ffloor(b, srcx)),
979                         TGSI_WRITEMASK_Y);
980    ttn_move_dest_masked(b, dest, nir_fexp2(b, srcx), TGSI_WRITEMASK_Z);
981    ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
982 }
983 
984 /* LOG - Approximate Logarithm Base 2
985  *  dst.x = \lfloor\log_2{|src.x|}\rfloor
986  *  dst.y = \frac{|src.x|}{2^{\lfloor\log_2{|src.x|}\rfloor}}
987  *  dst.z = \log_2{|src.x|}
988  *  dst.w = 1.0
989  */
990 static void
ttn_log(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)991 ttn_log(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
992 {
993    nir_ssa_def *abs_srcx = nir_fabs(b, ttn_channel(b, src[0], X));
994    nir_ssa_def *log2 = nir_flog2(b, abs_srcx);
995 
996    ttn_move_dest_masked(b, dest, nir_ffloor(b, log2), TGSI_WRITEMASK_X);
997    ttn_move_dest_masked(b, dest,
998                         nir_fdiv(b, abs_srcx, nir_fexp2(b, nir_ffloor(b, log2))),
999                         TGSI_WRITEMASK_Y);
1000    ttn_move_dest_masked(b, dest, nir_flog2(b, abs_srcx), TGSI_WRITEMASK_Z);
1001    ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_W);
1002 }
1003 
1004 /* DST - Distance Vector
1005  *   dst.x = 1.0
1006  *   dst.y = src0.y \times src1.y
1007  *   dst.z = src0.z
1008  *   dst.w = src1.w
1009  */
1010 static void
ttn_dst(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1011 ttn_dst(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1012 {
1013    ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_X);
1014    ttn_move_dest_masked(b, dest, nir_fmul(b, src[0], src[1]), TGSI_WRITEMASK_Y);
1015    ttn_move_dest_masked(b, dest, nir_mov(b, src[0]), TGSI_WRITEMASK_Z);
1016    ttn_move_dest_masked(b, dest, nir_mov(b, src[1]), TGSI_WRITEMASK_W);
1017 }
1018 
1019 /* LIT - Light Coefficients
1020  *  dst.x = 1.0
1021  *  dst.y = max(src.x, 0.0)
1022  *  dst.z = (src.x > 0.0) ? max(src.y, 0.0)^{clamp(src.w, -128.0, 128.0))} : 0
1023  *  dst.w = 1.0
1024  */
1025 static void
ttn_lit(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1026 ttn_lit(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1027 {
1028    ttn_move_dest_masked(b, dest, nir_imm_float(b, 1.0), TGSI_WRITEMASK_XW);
1029 
1030    ttn_move_dest_masked(b, dest, nir_fmax(b, ttn_channel(b, src[0], X),
1031                                           nir_imm_float(b, 0.0)), TGSI_WRITEMASK_Y);
1032 
1033    if (dest.write_mask & TGSI_WRITEMASK_Z) {
1034       nir_ssa_def *src0_y = ttn_channel(b, src[0], Y);
1035       nir_ssa_def *wclamp = nir_fmax(b, nir_fmin(b, ttn_channel(b, src[0], W),
1036                                                  nir_imm_float(b, 128.0)),
1037                                      nir_imm_float(b, -128.0));
1038       nir_ssa_def *pow = nir_fpow(b, nir_fmax(b, src0_y, nir_imm_float(b, 0.0)),
1039                                   wclamp);
1040 
1041       ttn_move_dest_masked(b, dest,
1042                            nir_bcsel(b,
1043                                      nir_flt(b,
1044                                              ttn_channel(b, src[0], X),
1045                                              nir_imm_float(b, 0.0)),
1046                                      nir_imm_float(b, 0.0),
1047                                      pow),
1048                            TGSI_WRITEMASK_Z);
1049    }
1050 }
1051 
1052 static void
ttn_sle(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1053 ttn_sle(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1054 {
1055    ttn_move_dest(b, dest, nir_sge(b, src[1], src[0]));
1056 }
1057 
1058 static void
ttn_sgt(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1059 ttn_sgt(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1060 {
1061    ttn_move_dest(b, dest, nir_slt(b, src[1], src[0]));
1062 }
1063 
1064 static void
ttn_dp2(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1065 ttn_dp2(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1066 {
1067    ttn_move_dest(b, dest, nir_fdot2(b, src[0], src[1]));
1068 }
1069 
1070 static void
ttn_dp3(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1071 ttn_dp3(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1072 {
1073    ttn_move_dest(b, dest, nir_fdot3(b, src[0], src[1]));
1074 }
1075 
1076 static void
ttn_dp4(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1077 ttn_dp4(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1078 {
1079    ttn_move_dest(b, dest, nir_fdot4(b, src[0], src[1]));
1080 }
1081 
1082 static void
ttn_umad(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1083 ttn_umad(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1084 {
1085    ttn_move_dest(b, dest, nir_iadd(b, nir_imul(b, src[0], src[1]), src[2]));
1086 }
1087 
1088 static void
ttn_arr(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1089 ttn_arr(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1090 {
1091    ttn_move_dest(b, dest, nir_f2i32(b, nir_fround_even(b, src[0])));
1092 }
1093 
1094 static void
ttn_cmp(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1095 ttn_cmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1096 {
1097    ttn_move_dest(b, dest, nir_bcsel(b,
1098                                     nir_flt(b, src[0], nir_imm_float(b, 0.0)),
1099                                     src[1], src[2]));
1100 }
1101 
1102 static void
ttn_ucmp(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1103 ttn_ucmp(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1104 {
1105    ttn_move_dest(b, dest, nir_bcsel(b,
1106                                     nir_ine(b, src[0], nir_imm_int(b, 0)),
1107                                     src[1], src[2]));
1108 }
1109 
1110 static void
ttn_barrier(nir_builder * b)1111 ttn_barrier(nir_builder *b)
1112 {
1113    nir_intrinsic_instr *barrier =
1114       nir_intrinsic_instr_create(b->shader, nir_intrinsic_control_barrier);
1115    nir_builder_instr_insert(b, &barrier->instr);
1116 }
1117 
1118 static void
ttn_kill(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1119 ttn_kill(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1120 {
1121    nir_intrinsic_instr *discard =
1122       nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard);
1123    nir_builder_instr_insert(b, &discard->instr);
1124    b->shader->info.fs.uses_discard = true;
1125 }
1126 
1127 static void
ttn_kill_if(nir_builder * b,nir_op op,nir_alu_dest dest,nir_ssa_def ** src)1128 ttn_kill_if(nir_builder *b, nir_op op, nir_alu_dest dest, nir_ssa_def **src)
1129 {
1130    /* flt must be exact, because NaN shouldn't discard. (apps rely on this) */
1131    b->exact = true;
1132    nir_ssa_def *cmp = nir_bany(b, nir_flt(b, src[0], nir_imm_float(b, 0.0)));
1133    b->exact = false;
1134 
1135    nir_intrinsic_instr *discard =
1136       nir_intrinsic_instr_create(b->shader, nir_intrinsic_discard_if);
1137    discard->src[0] = nir_src_for_ssa(cmp);
1138    nir_builder_instr_insert(b, &discard->instr);
1139    b->shader->info.fs.uses_discard = true;
1140 }
1141 
1142 static void
get_texture_info(unsigned texture,enum glsl_sampler_dim * dim,bool * is_shadow,bool * is_array)1143 get_texture_info(unsigned texture,
1144                  enum glsl_sampler_dim *dim,
1145                  bool *is_shadow,
1146                  bool *is_array)
1147 {
1148    assert(is_array);
1149    *is_array = false;
1150 
1151    if (is_shadow)
1152       *is_shadow = false;
1153 
1154    switch (texture) {
1155    case TGSI_TEXTURE_BUFFER:
1156       *dim = GLSL_SAMPLER_DIM_BUF;
1157       break;
1158    case TGSI_TEXTURE_1D:
1159       *dim = GLSL_SAMPLER_DIM_1D;
1160       break;
1161    case TGSI_TEXTURE_1D_ARRAY:
1162       *dim = GLSL_SAMPLER_DIM_1D;
1163       *is_array = true;
1164       break;
1165    case TGSI_TEXTURE_SHADOW1D:
1166       *dim = GLSL_SAMPLER_DIM_1D;
1167       *is_shadow = true;
1168       break;
1169    case TGSI_TEXTURE_SHADOW1D_ARRAY:
1170       *dim = GLSL_SAMPLER_DIM_1D;
1171       *is_shadow = true;
1172       *is_array = true;
1173       break;
1174    case TGSI_TEXTURE_2D:
1175       *dim = GLSL_SAMPLER_DIM_2D;
1176       break;
1177    case TGSI_TEXTURE_2D_ARRAY:
1178       *dim = GLSL_SAMPLER_DIM_2D;
1179       *is_array = true;
1180       break;
1181    case TGSI_TEXTURE_2D_MSAA:
1182       *dim = GLSL_SAMPLER_DIM_MS;
1183       break;
1184    case TGSI_TEXTURE_2D_ARRAY_MSAA:
1185       *dim = GLSL_SAMPLER_DIM_MS;
1186       *is_array = true;
1187       break;
1188    case TGSI_TEXTURE_SHADOW2D:
1189       *dim = GLSL_SAMPLER_DIM_2D;
1190       *is_shadow = true;
1191       break;
1192    case TGSI_TEXTURE_SHADOW2D_ARRAY:
1193       *dim = GLSL_SAMPLER_DIM_2D;
1194       *is_shadow = true;
1195       *is_array = true;
1196       break;
1197    case TGSI_TEXTURE_3D:
1198       *dim = GLSL_SAMPLER_DIM_3D;
1199       break;
1200    case TGSI_TEXTURE_CUBE:
1201       *dim = GLSL_SAMPLER_DIM_CUBE;
1202       break;
1203    case TGSI_TEXTURE_CUBE_ARRAY:
1204       *dim = GLSL_SAMPLER_DIM_CUBE;
1205       *is_array = true;
1206       break;
1207    case TGSI_TEXTURE_SHADOWCUBE:
1208       *dim = GLSL_SAMPLER_DIM_CUBE;
1209       *is_shadow = true;
1210       break;
1211    case TGSI_TEXTURE_SHADOWCUBE_ARRAY:
1212       *dim = GLSL_SAMPLER_DIM_CUBE;
1213       *is_shadow = true;
1214       *is_array = true;
1215       break;
1216    case TGSI_TEXTURE_RECT:
1217       *dim = GLSL_SAMPLER_DIM_RECT;
1218       break;
1219    case TGSI_TEXTURE_SHADOWRECT:
1220       *dim = GLSL_SAMPLER_DIM_RECT;
1221       *is_shadow = true;
1222       break;
1223    default:
1224       fprintf(stderr, "Unknown TGSI texture target %d\n", texture);
1225       abort();
1226    }
1227 }
1228 
1229 static enum glsl_base_type
base_type_for_alu_type(nir_alu_type type)1230 base_type_for_alu_type(nir_alu_type type)
1231 {
1232    type = nir_alu_type_get_base_type(type);
1233 
1234    switch (type) {
1235    case nir_type_float:
1236       return GLSL_TYPE_FLOAT;
1237    case nir_type_int:
1238       return GLSL_TYPE_INT;
1239    case nir_type_uint:
1240       return GLSL_TYPE_UINT;
1241    default:
1242       unreachable("invalid type");
1243    }
1244 }
1245 
1246 static nir_variable *
get_sampler_var(struct ttn_compile * c,int binding,enum glsl_sampler_dim dim,bool is_shadow,bool is_array,enum glsl_base_type base_type,nir_texop op)1247 get_sampler_var(struct ttn_compile *c, int binding,
1248                 enum glsl_sampler_dim dim,
1249                 bool is_shadow,
1250                 bool is_array,
1251                 enum glsl_base_type base_type,
1252                 nir_texop op)
1253 {
1254    nir_variable *var = c->samplers[binding];
1255    if (!var) {
1256       const struct glsl_type *type =
1257          glsl_sampler_type(dim, is_shadow, is_array, base_type);
1258       var = nir_variable_create(c->build.shader, nir_var_uniform, type,
1259                                 "sampler");
1260       var->data.binding = binding;
1261       var->data.explicit_binding = true;
1262 
1263       c->samplers[binding] = var;
1264       c->num_samplers = MAX2(c->num_samplers, binding + 1);
1265 
1266       /* Record textures used */
1267       unsigned mask = 1 << binding;
1268       c->build.shader->info.textures_used |= mask;
1269       if (op == nir_texop_txf ||
1270           op == nir_texop_txf_ms ||
1271           op == nir_texop_txf_ms_mcs)
1272          c->build.shader->info.textures_used_by_txf |= mask;
1273    }
1274 
1275    return var;
1276 }
1277 
1278 static nir_variable *
get_image_var(struct ttn_compile * c,int binding,enum glsl_sampler_dim dim,bool is_array,enum glsl_base_type base_type,enum gl_access_qualifier access,enum pipe_format format)1279 get_image_var(struct ttn_compile *c, int binding,
1280               enum glsl_sampler_dim dim,
1281               bool is_array,
1282               enum glsl_base_type base_type,
1283               enum gl_access_qualifier access,
1284               enum pipe_format format)
1285 {
1286    nir_variable *var = c->images[binding];
1287 
1288    if (!var) {
1289       const struct glsl_type *type = glsl_image_type(dim, is_array, base_type);
1290 
1291       var = nir_variable_create(c->build.shader, nir_var_uniform, type, "image");
1292       var->data.binding = binding;
1293       var->data.explicit_binding = true;
1294       var->data.access = access;
1295       var->data.image.format = format;
1296 
1297       c->images[binding] = var;
1298       c->num_images = MAX2(c->num_images, binding + 1);
1299       if (dim == GLSL_SAMPLER_DIM_MS)
1300          c->num_msaa_images = c->num_images;
1301    }
1302 
1303    return var;
1304 }
1305 
1306 static void
add_ssbo_var(struct ttn_compile * c,int binding)1307 add_ssbo_var(struct ttn_compile *c, int binding)
1308 {
1309    nir_variable *var = c->ssbo[binding];
1310 
1311    if (!var) {
1312       /* A length of 0 is used to denote unsized arrays */
1313       const struct glsl_type *type = glsl_array_type(glsl_uint_type(), 0, 0);
1314 
1315       struct glsl_struct_field field = {
1316             .type = type,
1317             .name = "data",
1318             .location = -1,
1319       };
1320 
1321       var = nir_variable_create(c->build.shader, nir_var_mem_ssbo, type, "ssbo");
1322       var->data.binding = binding;
1323       var->interface_type =
1324          glsl_interface_type(&field, 1, GLSL_INTERFACE_PACKING_STD430,
1325                              false, "data");
1326       c->ssbo[binding] = var;
1327    }
1328 }
1329 
1330 static void
ttn_tex(struct ttn_compile * c,nir_alu_dest dest,nir_ssa_def ** src)1331 ttn_tex(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1332 {
1333    nir_builder *b = &c->build;
1334    struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1335    nir_tex_instr *instr;
1336    nir_texop op;
1337    unsigned num_srcs, samp = 1, sview, i;
1338 
1339    switch (tgsi_inst->Instruction.Opcode) {
1340    case TGSI_OPCODE_TEX:
1341       op = nir_texop_tex;
1342       num_srcs = 1;
1343       break;
1344    case TGSI_OPCODE_TEX2:
1345       op = nir_texop_tex;
1346       num_srcs = 1;
1347       samp = 2;
1348       break;
1349    case TGSI_OPCODE_TXP:
1350       op = nir_texop_tex;
1351       num_srcs = 2;
1352       break;
1353    case TGSI_OPCODE_TXB:
1354       op = nir_texop_txb;
1355       num_srcs = 2;
1356       break;
1357    case TGSI_OPCODE_TXB2:
1358       op = nir_texop_txb;
1359       num_srcs = 2;
1360       samp = 2;
1361       break;
1362    case TGSI_OPCODE_TXL:
1363    case TGSI_OPCODE_TEX_LZ:
1364       op = nir_texop_txl;
1365       num_srcs = 2;
1366       break;
1367    case TGSI_OPCODE_TXL2:
1368       op = nir_texop_txl;
1369       num_srcs = 2;
1370       samp = 2;
1371       break;
1372    case TGSI_OPCODE_TXF:
1373    case TGSI_OPCODE_TXF_LZ:
1374       if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_MSAA ||
1375           tgsi_inst->Texture.Texture == TGSI_TEXTURE_2D_ARRAY_MSAA) {
1376          op = nir_texop_txf_ms;
1377       } else {
1378          op = nir_texop_txf;
1379       }
1380       num_srcs = 2;
1381       break;
1382    case TGSI_OPCODE_TXD:
1383       op = nir_texop_txd;
1384       num_srcs = 3;
1385       samp = 3;
1386       break;
1387    case TGSI_OPCODE_LODQ:
1388       op = nir_texop_lod;
1389       num_srcs = 1;
1390       break;
1391 
1392    default:
1393       fprintf(stderr, "unknown TGSI tex op %d\n", tgsi_inst->Instruction.Opcode);
1394       abort();
1395    }
1396 
1397    if (tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D ||
1398        tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW1D_ARRAY ||
1399        tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D ||
1400        tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOW2D_ARRAY ||
1401        tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWRECT ||
1402        tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE ||
1403        tgsi_inst->Texture.Texture == TGSI_TEXTURE_SHADOWCUBE_ARRAY) {
1404       num_srcs++;
1405    }
1406 
1407    /* Deref sources */
1408    num_srcs += 2;
1409 
1410    num_srcs += tgsi_inst->Texture.NumOffsets;
1411 
1412    instr = nir_tex_instr_create(b->shader, num_srcs);
1413    instr->op = op;
1414 
1415    get_texture_info(tgsi_inst->Texture.Texture,
1416                     &instr->sampler_dim, &instr->is_shadow, &instr->is_array);
1417 
1418    instr->coord_components =
1419       glsl_get_sampler_dim_coordinate_components(instr->sampler_dim);
1420 
1421    if (instr->is_array)
1422       instr->coord_components++;
1423 
1424    assert(tgsi_inst->Src[samp].Register.File == TGSI_FILE_SAMPLER);
1425 
1426    /* TODO if we supported any opc's which take an explicit SVIEW
1427     * src, we would use that here instead.  But for the "legacy"
1428     * texture opc's the SVIEW index is same as SAMP index:
1429     */
1430    sview = tgsi_inst->Src[samp].Register.Index;
1431 
1432    if (op == nir_texop_lod) {
1433       instr->dest_type = nir_type_float;
1434    } else if (sview < c->num_samp_types) {
1435       instr->dest_type = c->samp_types[sview];
1436    } else {
1437       instr->dest_type = nir_type_float;
1438    }
1439 
1440    nir_variable *var =
1441       get_sampler_var(c, sview, instr->sampler_dim,
1442                       instr->is_shadow,
1443                       instr->is_array,
1444                       base_type_for_alu_type(instr->dest_type),
1445                       op);
1446 
1447    nir_deref_instr *deref = nir_build_deref_var(b, var);
1448 
1449    unsigned src_number = 0;
1450 
1451    instr->src[src_number].src = nir_src_for_ssa(&deref->dest.ssa);
1452    instr->src[src_number].src_type = nir_tex_src_texture_deref;
1453    src_number++;
1454    instr->src[src_number].src = nir_src_for_ssa(&deref->dest.ssa);
1455    instr->src[src_number].src_type = nir_tex_src_sampler_deref;
1456    src_number++;
1457 
1458    instr->src[src_number].src =
1459       nir_src_for_ssa(nir_swizzle(b, src[0], SWIZ(X, Y, Z, W),
1460                                   instr->coord_components));
1461    instr->src[src_number].src_type = nir_tex_src_coord;
1462    src_number++;
1463 
1464    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXP) {
1465       instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1466       instr->src[src_number].src_type = nir_tex_src_projector;
1467       src_number++;
1468    }
1469 
1470    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB) {
1471       instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1472       instr->src[src_number].src_type = nir_tex_src_bias;
1473       src_number++;
1474    }
1475 
1476    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXB2) {
1477       instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1478       instr->src[src_number].src_type = nir_tex_src_bias;
1479       src_number++;
1480    }
1481 
1482    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL ||
1483        tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TEX_LZ) {
1484       if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TEX_LZ)
1485          instr->src[src_number].src = nir_src_for_ssa(nir_imm_int(b, 0));
1486       else
1487          instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1488       instr->src[src_number].src_type = nir_tex_src_lod;
1489       src_number++;
1490    }
1491 
1492    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXL2) {
1493       instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1494       instr->src[src_number].src_type = nir_tex_src_lod;
1495       src_number++;
1496    }
1497 
1498    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF ||
1499        tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF_LZ) {
1500       if (op == nir_texop_txf_ms) {
1501          instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1502          instr->src[src_number].src_type = nir_tex_src_ms_index;
1503       } else {
1504          if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXF_LZ)
1505             instr->src[src_number].src = nir_src_for_ssa(nir_imm_int(b, 0));
1506          else
1507             instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1508          instr->src[src_number].src_type = nir_tex_src_lod;
1509       }
1510       src_number++;
1511    }
1512 
1513    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_TXD) {
1514       instr->src[src_number].src_type = nir_tex_src_ddx;
1515       instr->src[src_number].src =
1516          nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1517 				     nir_tex_instr_src_size(instr, src_number)));
1518       src_number++;
1519       instr->src[src_number].src_type = nir_tex_src_ddy;
1520       instr->src[src_number].src =
1521          nir_src_for_ssa(nir_swizzle(b, src[2], SWIZ(X, Y, Z, W),
1522 				     nir_tex_instr_src_size(instr, src_number)));
1523       src_number++;
1524    }
1525 
1526    if (instr->is_shadow) {
1527       if (instr->coord_components == 4)
1528          instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[1], X));
1529       else if (instr->coord_components == 3)
1530          instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], W));
1531       else
1532          instr->src[src_number].src = nir_src_for_ssa(ttn_channel(b, src[0], Z));
1533 
1534       instr->src[src_number].src_type = nir_tex_src_comparator;
1535       src_number++;
1536    }
1537 
1538    for (i = 0; i < tgsi_inst->Texture.NumOffsets; i++) {
1539       struct tgsi_texture_offset *tex_offset = &tgsi_inst->TexOffsets[i];
1540       /* since TexOffset ins't using tgsi_full_src_register we get to
1541        * do some extra gymnastics:
1542        */
1543       nir_alu_src src;
1544 
1545       memset(&src, 0, sizeof(src));
1546 
1547       src.src = ttn_src_for_file_and_index(c,
1548                                            tex_offset->File,
1549                                            tex_offset->Index,
1550                                            NULL, NULL, NULL,
1551                                            true);
1552 
1553       src.swizzle[0] = tex_offset->SwizzleX;
1554       src.swizzle[1] = tex_offset->SwizzleY;
1555       src.swizzle[2] = tex_offset->SwizzleZ;
1556       src.swizzle[3] = TGSI_SWIZZLE_W;
1557 
1558       instr->src[src_number].src_type = nir_tex_src_offset;
1559       instr->src[src_number].src = nir_src_for_ssa(
1560          nir_mov_alu(b, src, nir_tex_instr_src_size(instr, src_number)));
1561       src_number++;
1562    }
1563 
1564    assert(src_number == num_srcs);
1565    assert(src_number == instr->num_srcs);
1566 
1567    nir_ssa_dest_init(&instr->instr, &instr->dest,
1568 		     nir_tex_instr_dest_size(instr),
1569 		     32, NULL);
1570    nir_builder_instr_insert(b, &instr->instr);
1571 
1572    /* Resolve the writemask on the texture op. */
1573    ttn_move_dest(b, dest, &instr->dest.ssa);
1574 }
1575 
1576 /* TGSI_OPCODE_TXQ is actually two distinct operations:
1577  *
1578  *     dst.x = texture\_width(unit, lod)
1579  *     dst.y = texture\_height(unit, lod)
1580  *     dst.z = texture\_depth(unit, lod)
1581  *     dst.w = texture\_levels(unit)
1582  *
1583  * dst.xyz map to NIR txs opcode, and dst.w maps to query_levels
1584  */
1585 static void
ttn_txq(struct ttn_compile * c,nir_alu_dest dest,nir_ssa_def ** src)1586 ttn_txq(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1587 {
1588    nir_builder *b = &c->build;
1589    struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1590    nir_tex_instr *txs, *qlv;
1591 
1592    txs = nir_tex_instr_create(b->shader, 2);
1593    txs->op = nir_texop_txs;
1594    get_texture_info(tgsi_inst->Texture.Texture,
1595                     &txs->sampler_dim, &txs->is_shadow, &txs->is_array);
1596 
1597    qlv = nir_tex_instr_create(b->shader, 1);
1598    qlv->op = nir_texop_query_levels;
1599    get_texture_info(tgsi_inst->Texture.Texture,
1600                     &qlv->sampler_dim, &qlv->is_shadow, &qlv->is_array);
1601 
1602    assert(tgsi_inst->Src[1].Register.File == TGSI_FILE_SAMPLER);
1603    int tex_index = tgsi_inst->Src[1].Register.Index;
1604 
1605    nir_variable *var =
1606       get_sampler_var(c, tex_index, txs->sampler_dim,
1607                       txs->is_shadow,
1608                       txs->is_array,
1609                       base_type_for_alu_type(txs->dest_type),
1610                       nir_texop_txs);
1611 
1612    nir_deref_instr *deref = nir_build_deref_var(b, var);
1613 
1614    txs->src[0].src = nir_src_for_ssa(&deref->dest.ssa);
1615    txs->src[0].src_type = nir_tex_src_texture_deref;
1616 
1617    qlv->src[0].src = nir_src_for_ssa(&deref->dest.ssa);
1618    qlv->src[0].src_type = nir_tex_src_texture_deref;
1619 
1620    /* lod: */
1621    txs->src[1].src = nir_src_for_ssa(ttn_channel(b, src[0], X));
1622    txs->src[1].src_type = nir_tex_src_lod;
1623 
1624    nir_ssa_dest_init(&txs->instr, &txs->dest,
1625 		     nir_tex_instr_dest_size(txs), 32, NULL);
1626    nir_builder_instr_insert(b, &txs->instr);
1627 
1628    nir_ssa_dest_init(&qlv->instr, &qlv->dest, 1, 32, NULL);
1629    nir_builder_instr_insert(b, &qlv->instr);
1630 
1631    ttn_move_dest_masked(b, dest, &txs->dest.ssa, TGSI_WRITEMASK_XYZ);
1632    ttn_move_dest_masked(b, dest, &qlv->dest.ssa, TGSI_WRITEMASK_W);
1633 }
1634 
1635 static enum glsl_base_type
get_image_base_type(struct tgsi_full_instruction * tgsi_inst)1636 get_image_base_type(struct tgsi_full_instruction *tgsi_inst)
1637 {
1638    const struct util_format_description *desc =
1639       util_format_description(tgsi_inst->Memory.Format);
1640 
1641    if (desc->channel[0].pure_integer) {
1642       if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED)
1643          return GLSL_TYPE_INT;
1644       else
1645          return GLSL_TYPE_UINT;
1646    }
1647    return GLSL_TYPE_FLOAT;
1648 }
1649 
1650 static enum gl_access_qualifier
get_mem_qualifier(struct tgsi_full_instruction * tgsi_inst)1651 get_mem_qualifier(struct tgsi_full_instruction *tgsi_inst)
1652 {
1653    enum gl_access_qualifier access = 0;
1654 
1655    if (tgsi_inst->Memory.Qualifier & TGSI_MEMORY_COHERENT)
1656       access |= ACCESS_COHERENT;
1657    if (tgsi_inst->Memory.Qualifier & TGSI_MEMORY_RESTRICT)
1658       access |= ACCESS_RESTRICT;
1659    if (tgsi_inst->Memory.Qualifier & TGSI_MEMORY_VOLATILE)
1660       access |= ACCESS_VOLATILE;
1661    if (tgsi_inst->Memory.Qualifier & TGSI_MEMORY_STREAM_CACHE_POLICY)
1662       access |= ACCESS_STREAM_CACHE_POLICY;
1663 
1664    return access;
1665 }
1666 
1667 static void
ttn_mem(struct ttn_compile * c,nir_alu_dest dest,nir_ssa_def ** src)1668 ttn_mem(struct ttn_compile *c, nir_alu_dest dest, nir_ssa_def **src)
1669 {
1670    nir_builder *b = &c->build;
1671    struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1672    nir_intrinsic_instr *instr = NULL;
1673    unsigned resource_index, addr_src_index, file;
1674 
1675    switch (tgsi_inst->Instruction.Opcode) {
1676    case TGSI_OPCODE_LOAD:
1677       assert(!tgsi_inst->Src[0].Register.Indirect);
1678       resource_index = tgsi_inst->Src[0].Register.Index;
1679       file = tgsi_inst->Src[0].Register.File;
1680       addr_src_index = 1;
1681       break;
1682    case TGSI_OPCODE_STORE:
1683       assert(!tgsi_inst->Dst[0].Register.Indirect);
1684       resource_index = tgsi_inst->Dst[0].Register.Index;
1685       file = tgsi_inst->Dst[0].Register.File;
1686       addr_src_index = 0;
1687       break;
1688    default:
1689       unreachable("unexpected memory opcode");
1690    }
1691 
1692    if (file == TGSI_FILE_BUFFER) {
1693       nir_intrinsic_op op;
1694 
1695       switch (tgsi_inst->Instruction.Opcode) {
1696       case TGSI_OPCODE_LOAD:
1697          op = nir_intrinsic_load_ssbo;
1698          break;
1699       case TGSI_OPCODE_STORE:
1700          op = nir_intrinsic_store_ssbo;
1701          break;
1702       }
1703 
1704       add_ssbo_var(c, resource_index);
1705 
1706       instr = nir_intrinsic_instr_create(b->shader, op);
1707       instr->num_components = util_last_bit(tgsi_inst->Dst[0].Register.WriteMask);
1708       nir_intrinsic_set_access(instr, get_mem_qualifier(tgsi_inst));
1709       nir_intrinsic_set_align(instr, 4, 0);
1710 
1711       unsigned i = 0;
1712       if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_STORE)
1713          instr->src[i++] = nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1714                                                        instr->num_components));
1715       instr->src[i++] = nir_src_for_ssa(nir_imm_int(b, resource_index));
1716       instr->src[i++] = nir_src_for_ssa(ttn_channel(b, src[addr_src_index], X));
1717 
1718       if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_STORE)
1719          nir_intrinsic_set_write_mask(instr, tgsi_inst->Dst[0].Register.WriteMask);
1720 
1721    } else if (file == TGSI_FILE_IMAGE) {
1722       nir_intrinsic_op op;
1723 
1724       switch (tgsi_inst->Instruction.Opcode) {
1725       case TGSI_OPCODE_LOAD:
1726          op = nir_intrinsic_image_deref_load;
1727          break;
1728       case TGSI_OPCODE_STORE:
1729          op = nir_intrinsic_image_deref_store;
1730          break;
1731       }
1732 
1733       instr = nir_intrinsic_instr_create(b->shader, op);
1734 
1735       /* Set the image variable dereference. */
1736       enum glsl_sampler_dim dim;
1737       bool is_array;
1738       get_texture_info(tgsi_inst->Memory.Texture, &dim, NULL, &is_array);
1739 
1740       enum glsl_base_type base_type = get_image_base_type(tgsi_inst);
1741       enum gl_access_qualifier access = get_mem_qualifier(tgsi_inst);
1742 
1743       nir_variable *image =
1744          get_image_var(c, resource_index,
1745                        dim, is_array, base_type, access,
1746                        tgsi_inst->Memory.Format);
1747       nir_deref_instr *image_deref = nir_build_deref_var(b, image);
1748       const struct glsl_type *type = image_deref->type;
1749 
1750       nir_intrinsic_set_access(instr, image_deref->var->data.access);
1751 
1752       instr->src[0] = nir_src_for_ssa(&image_deref->dest.ssa);
1753       instr->src[1] = nir_src_for_ssa(src[addr_src_index]);
1754 
1755       /* Set the sample argument, which is undefined for single-sample images. */
1756       if (glsl_get_sampler_dim(type) == GLSL_SAMPLER_DIM_MS) {
1757          instr->src[2] = nir_src_for_ssa(ttn_channel(b, src[addr_src_index], W));
1758       } else {
1759          instr->src[2] = nir_src_for_ssa(nir_ssa_undef(b, 1, 32));
1760       }
1761 
1762       if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_LOAD) {
1763          instr->src[3] = nir_src_for_ssa(nir_imm_int(b, 0)); /* LOD */
1764       }
1765 
1766       unsigned num_components = util_last_bit(tgsi_inst->Dst[0].Register.WriteMask);
1767 
1768       if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_STORE) {
1769          instr->src[3] = nir_src_for_ssa(nir_swizzle(b, src[1], SWIZ(X, Y, Z, W),
1770                                                      num_components));
1771          instr->src[4] = nir_src_for_ssa(nir_imm_int(b, 0)); /* LOD */
1772       }
1773 
1774       instr->num_components = num_components;
1775    } else {
1776       unreachable("unexpected file");
1777    }
1778 
1779 
1780    if (tgsi_inst->Instruction.Opcode == TGSI_OPCODE_LOAD) {
1781       nir_ssa_dest_init(&instr->instr, &instr->dest, instr->num_components,
1782                         32, NULL);
1783       nir_builder_instr_insert(b, &instr->instr);
1784       ttn_move_dest(b, dest, &instr->dest.ssa);
1785    } else {
1786       nir_builder_instr_insert(b, &instr->instr);
1787    }
1788 }
1789 
1790 static const nir_op op_trans[TGSI_OPCODE_LAST] = {
1791    [TGSI_OPCODE_ARL] = 0,
1792    [TGSI_OPCODE_MOV] = nir_op_mov,
1793    [TGSI_OPCODE_LIT] = 0,
1794    [TGSI_OPCODE_RCP] = nir_op_frcp,
1795    [TGSI_OPCODE_RSQ] = nir_op_frsq,
1796    [TGSI_OPCODE_EXP] = 0,
1797    [TGSI_OPCODE_LOG] = 0,
1798    [TGSI_OPCODE_MUL] = nir_op_fmul,
1799    [TGSI_OPCODE_ADD] = nir_op_fadd,
1800    [TGSI_OPCODE_DP3] = 0,
1801    [TGSI_OPCODE_DP4] = 0,
1802    [TGSI_OPCODE_DST] = 0,
1803    [TGSI_OPCODE_MIN] = nir_op_fmin,
1804    [TGSI_OPCODE_MAX] = nir_op_fmax,
1805    [TGSI_OPCODE_SLT] = nir_op_slt,
1806    [TGSI_OPCODE_SGE] = nir_op_sge,
1807    [TGSI_OPCODE_MAD] = nir_op_ffma,
1808    [TGSI_OPCODE_TEX_LZ] = 0,
1809    [TGSI_OPCODE_LRP] = 0,
1810    [TGSI_OPCODE_SQRT] = nir_op_fsqrt,
1811    [TGSI_OPCODE_FRC] = nir_op_ffract,
1812    [TGSI_OPCODE_TXF_LZ] = 0,
1813    [TGSI_OPCODE_FLR] = nir_op_ffloor,
1814    [TGSI_OPCODE_ROUND] = nir_op_fround_even,
1815    [TGSI_OPCODE_EX2] = nir_op_fexp2,
1816    [TGSI_OPCODE_LG2] = nir_op_flog2,
1817    [TGSI_OPCODE_POW] = nir_op_fpow,
1818    [TGSI_OPCODE_COS] = nir_op_fcos,
1819    [TGSI_OPCODE_DDX] = nir_op_fddx,
1820    [TGSI_OPCODE_DDY] = nir_op_fddy,
1821    [TGSI_OPCODE_KILL] = 0,
1822    [TGSI_OPCODE_PK2H] = 0, /* XXX */
1823    [TGSI_OPCODE_PK2US] = 0, /* XXX */
1824    [TGSI_OPCODE_PK4B] = 0, /* XXX */
1825    [TGSI_OPCODE_PK4UB] = 0, /* XXX */
1826    [TGSI_OPCODE_SEQ] = nir_op_seq,
1827    [TGSI_OPCODE_SGT] = 0,
1828    [TGSI_OPCODE_SIN] = nir_op_fsin,
1829    [TGSI_OPCODE_SNE] = nir_op_sne,
1830    [TGSI_OPCODE_SLE] = 0,
1831    [TGSI_OPCODE_TEX] = 0,
1832    [TGSI_OPCODE_TXD] = 0,
1833    [TGSI_OPCODE_TXP] = 0,
1834    [TGSI_OPCODE_UP2H] = 0, /* XXX */
1835    [TGSI_OPCODE_UP2US] = 0, /* XXX */
1836    [TGSI_OPCODE_UP4B] = 0, /* XXX */
1837    [TGSI_OPCODE_UP4UB] = 0, /* XXX */
1838    [TGSI_OPCODE_ARR] = 0,
1839 
1840    /* No function calls, yet. */
1841    [TGSI_OPCODE_CAL] = 0, /* XXX */
1842    [TGSI_OPCODE_RET] = 0, /* XXX */
1843 
1844    [TGSI_OPCODE_SSG] = nir_op_fsign,
1845    [TGSI_OPCODE_CMP] = 0,
1846    [TGSI_OPCODE_TXB] = 0,
1847    [TGSI_OPCODE_DIV] = nir_op_fdiv,
1848    [TGSI_OPCODE_DP2] = 0,
1849    [TGSI_OPCODE_TXL] = 0,
1850 
1851    [TGSI_OPCODE_BRK] = 0,
1852    [TGSI_OPCODE_IF] = 0,
1853    [TGSI_OPCODE_UIF] = 0,
1854    [TGSI_OPCODE_ELSE] = 0,
1855    [TGSI_OPCODE_ENDIF] = 0,
1856 
1857    [TGSI_OPCODE_DDX_FINE] = nir_op_fddx_fine,
1858    [TGSI_OPCODE_DDY_FINE] = nir_op_fddy_fine,
1859 
1860    [TGSI_OPCODE_CEIL] = nir_op_fceil,
1861    [TGSI_OPCODE_I2F] = nir_op_i2f32,
1862    [TGSI_OPCODE_NOT] = nir_op_inot,
1863    [TGSI_OPCODE_TRUNC] = nir_op_ftrunc,
1864    [TGSI_OPCODE_SHL] = nir_op_ishl,
1865    [TGSI_OPCODE_AND] = nir_op_iand,
1866    [TGSI_OPCODE_OR] = nir_op_ior,
1867    [TGSI_OPCODE_MOD] = nir_op_umod,
1868    [TGSI_OPCODE_XOR] = nir_op_ixor,
1869    [TGSI_OPCODE_TXF] = 0,
1870    [TGSI_OPCODE_TXQ] = 0,
1871 
1872    [TGSI_OPCODE_CONT] = 0,
1873 
1874    [TGSI_OPCODE_EMIT] = 0, /* XXX */
1875    [TGSI_OPCODE_ENDPRIM] = 0, /* XXX */
1876 
1877    [TGSI_OPCODE_BGNLOOP] = 0,
1878    [TGSI_OPCODE_BGNSUB] = 0, /* XXX: no function calls */
1879    [TGSI_OPCODE_ENDLOOP] = 0,
1880    [TGSI_OPCODE_ENDSUB] = 0, /* XXX: no function calls */
1881 
1882    [TGSI_OPCODE_NOP] = 0,
1883    [TGSI_OPCODE_FSEQ] = nir_op_feq,
1884    [TGSI_OPCODE_FSGE] = nir_op_fge,
1885    [TGSI_OPCODE_FSLT] = nir_op_flt,
1886    [TGSI_OPCODE_FSNE] = nir_op_fneu,
1887 
1888    [TGSI_OPCODE_KILL_IF] = 0,
1889 
1890    [TGSI_OPCODE_END] = 0,
1891 
1892    [TGSI_OPCODE_F2I] = nir_op_f2i32,
1893    [TGSI_OPCODE_IDIV] = nir_op_idiv,
1894    [TGSI_OPCODE_IMAX] = nir_op_imax,
1895    [TGSI_OPCODE_IMIN] = nir_op_imin,
1896    [TGSI_OPCODE_INEG] = nir_op_ineg,
1897    [TGSI_OPCODE_ISGE] = nir_op_ige,
1898    [TGSI_OPCODE_ISHR] = nir_op_ishr,
1899    [TGSI_OPCODE_ISLT] = nir_op_ilt,
1900    [TGSI_OPCODE_F2U] = nir_op_f2u32,
1901    [TGSI_OPCODE_U2F] = nir_op_u2f32,
1902    [TGSI_OPCODE_UADD] = nir_op_iadd,
1903    [TGSI_OPCODE_UDIV] = nir_op_udiv,
1904    [TGSI_OPCODE_UMAD] = 0,
1905    [TGSI_OPCODE_UMAX] = nir_op_umax,
1906    [TGSI_OPCODE_UMIN] = nir_op_umin,
1907    [TGSI_OPCODE_UMOD] = nir_op_umod,
1908    [TGSI_OPCODE_UMUL] = nir_op_imul,
1909    [TGSI_OPCODE_USEQ] = nir_op_ieq,
1910    [TGSI_OPCODE_USGE] = nir_op_uge,
1911    [TGSI_OPCODE_USHR] = nir_op_ushr,
1912    [TGSI_OPCODE_USLT] = nir_op_ult,
1913    [TGSI_OPCODE_USNE] = nir_op_ine,
1914 
1915    [TGSI_OPCODE_SWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1916    [TGSI_OPCODE_CASE] = 0, /* not emitted by glsl_to_tgsi.cpp */
1917    [TGSI_OPCODE_DEFAULT] = 0, /* not emitted by glsl_to_tgsi.cpp */
1918    [TGSI_OPCODE_ENDSWITCH] = 0, /* not emitted by glsl_to_tgsi.cpp */
1919 
1920    /* XXX: SAMPLE opcodes */
1921 
1922    [TGSI_OPCODE_UARL] = nir_op_mov,
1923    [TGSI_OPCODE_UCMP] = 0,
1924    [TGSI_OPCODE_IABS] = nir_op_iabs,
1925    [TGSI_OPCODE_ISSG] = nir_op_isign,
1926 
1927    [TGSI_OPCODE_LOAD] = 0,
1928    [TGSI_OPCODE_STORE] = 0,
1929 
1930    /* XXX: atomics */
1931 
1932    [TGSI_OPCODE_TEX2] = 0,
1933    [TGSI_OPCODE_TXB2] = 0,
1934    [TGSI_OPCODE_TXL2] = 0,
1935 
1936    [TGSI_OPCODE_IMUL_HI] = nir_op_imul_high,
1937    [TGSI_OPCODE_UMUL_HI] = nir_op_umul_high,
1938 
1939    [TGSI_OPCODE_TG4] = 0,
1940    [TGSI_OPCODE_LODQ] = 0,
1941 
1942    [TGSI_OPCODE_IBFE] = nir_op_ibitfield_extract,
1943    [TGSI_OPCODE_UBFE] = nir_op_ubitfield_extract,
1944    [TGSI_OPCODE_BFI] = nir_op_bitfield_insert,
1945    [TGSI_OPCODE_BREV] = nir_op_bitfield_reverse,
1946    [TGSI_OPCODE_POPC] = nir_op_bit_count,
1947    [TGSI_OPCODE_LSB] = nir_op_find_lsb,
1948    [TGSI_OPCODE_IMSB] = nir_op_ifind_msb,
1949    [TGSI_OPCODE_UMSB] = nir_op_ufind_msb,
1950 
1951    [TGSI_OPCODE_INTERP_CENTROID] = 0, /* XXX */
1952    [TGSI_OPCODE_INTERP_SAMPLE] = 0, /* XXX */
1953    [TGSI_OPCODE_INTERP_OFFSET] = 0, /* XXX */
1954 
1955    [TGSI_OPCODE_F2D] = nir_op_f2f64,
1956    [TGSI_OPCODE_D2F] = nir_op_f2f32,
1957    [TGSI_OPCODE_DMUL] = nir_op_fmul,
1958    [TGSI_OPCODE_D2U] = nir_op_f2u32,
1959    [TGSI_OPCODE_U2D] = nir_op_u2f64,
1960 
1961    [TGSI_OPCODE_U64ADD] = nir_op_iadd,
1962    [TGSI_OPCODE_U64MUL] = nir_op_imul,
1963    [TGSI_OPCODE_U64DIV] = nir_op_udiv,
1964    [TGSI_OPCODE_U64SNE] = nir_op_ine,
1965 };
1966 
1967 static void
ttn_emit_instruction(struct ttn_compile * c)1968 ttn_emit_instruction(struct ttn_compile *c)
1969 {
1970    nir_builder *b = &c->build;
1971    struct tgsi_full_instruction *tgsi_inst = &c->token->FullInstruction;
1972    unsigned i;
1973    unsigned tgsi_op = tgsi_inst->Instruction.Opcode;
1974    struct tgsi_full_dst_register *tgsi_dst = &tgsi_inst->Dst[0];
1975 
1976    if (tgsi_op == TGSI_OPCODE_END)
1977       return;
1978 
1979    nir_ssa_def *src[TGSI_FULL_MAX_SRC_REGISTERS];
1980    for (i = 0; i < tgsi_inst->Instruction.NumSrcRegs; i++) {
1981       src[i] = ttn_get_src(c, &tgsi_inst->Src[i], i);
1982    }
1983    nir_alu_dest dest = ttn_get_dest(c, tgsi_dst);
1984 
1985    unsigned tgsi_dst_type = tgsi_opcode_infer_dst_type(tgsi_op, 0);
1986 
1987    /* The destination bitsize of the NIR opcode (not TGSI, where it's always
1988     * 32 bits). This needs to be passed into ttn_alu() because it can't be
1989     * inferred for comparison opcodes.
1990     */
1991    unsigned dst_bitsize = tgsi_type_is_64bit(tgsi_dst_type) ? 64 : 32;
1992 
1993    switch (tgsi_op) {
1994    case TGSI_OPCODE_RSQ:
1995       ttn_move_dest(b, dest, nir_frsq(b, ttn_channel(b, src[0], X)));
1996       break;
1997 
1998    case TGSI_OPCODE_SQRT:
1999       ttn_move_dest(b, dest, nir_fsqrt(b, ttn_channel(b, src[0], X)));
2000       break;
2001 
2002    case TGSI_OPCODE_RCP:
2003       ttn_move_dest(b, dest, nir_frcp(b, ttn_channel(b, src[0], X)));
2004       break;
2005 
2006    case TGSI_OPCODE_EX2:
2007       ttn_move_dest(b, dest, nir_fexp2(b, ttn_channel(b, src[0], X)));
2008       break;
2009 
2010    case TGSI_OPCODE_LG2:
2011       ttn_move_dest(b, dest, nir_flog2(b, ttn_channel(b, src[0], X)));
2012       break;
2013 
2014    case TGSI_OPCODE_POW:
2015       ttn_move_dest(b, dest, nir_fpow(b,
2016                                       ttn_channel(b, src[0], X),
2017                                       ttn_channel(b, src[1], X)));
2018       break;
2019 
2020    case TGSI_OPCODE_COS:
2021       ttn_move_dest(b, dest, nir_fcos(b, ttn_channel(b, src[0], X)));
2022       break;
2023 
2024    case TGSI_OPCODE_SIN:
2025       ttn_move_dest(b, dest, nir_fsin(b, ttn_channel(b, src[0], X)));
2026       break;
2027 
2028    case TGSI_OPCODE_ARL:
2029       ttn_arl(b, op_trans[tgsi_op], dest, src);
2030       break;
2031 
2032    case TGSI_OPCODE_EXP:
2033       ttn_exp(b, op_trans[tgsi_op], dest, src);
2034       break;
2035 
2036    case TGSI_OPCODE_LOG:
2037       ttn_log(b, op_trans[tgsi_op], dest, src);
2038       break;
2039 
2040    case TGSI_OPCODE_DST:
2041       ttn_dst(b, op_trans[tgsi_op], dest, src);
2042       break;
2043 
2044    case TGSI_OPCODE_LIT:
2045       ttn_lit(b, op_trans[tgsi_op], dest, src);
2046       break;
2047 
2048    case TGSI_OPCODE_DP2:
2049       ttn_dp2(b, op_trans[tgsi_op], dest, src);
2050       break;
2051 
2052    case TGSI_OPCODE_DP3:
2053       ttn_dp3(b, op_trans[tgsi_op], dest, src);
2054       break;
2055 
2056    case TGSI_OPCODE_DP4:
2057       ttn_dp4(b, op_trans[tgsi_op], dest, src);
2058       break;
2059 
2060    case TGSI_OPCODE_UMAD:
2061       ttn_umad(b, op_trans[tgsi_op], dest, src);
2062       break;
2063 
2064    case TGSI_OPCODE_LRP:
2065       ttn_move_dest(b, dest, nir_flrp(b, src[2], src[1], src[0]));
2066       break;
2067 
2068    case TGSI_OPCODE_KILL:
2069       ttn_kill(b, op_trans[tgsi_op], dest, src);
2070       break;
2071 
2072    case TGSI_OPCODE_ARR:
2073       ttn_arr(b, op_trans[tgsi_op], dest, src);
2074       break;
2075 
2076    case TGSI_OPCODE_CMP:
2077       ttn_cmp(b, op_trans[tgsi_op], dest, src);
2078       break;
2079 
2080    case TGSI_OPCODE_UCMP:
2081       ttn_ucmp(b, op_trans[tgsi_op], dest, src);
2082       break;
2083 
2084    case TGSI_OPCODE_SGT:
2085       ttn_sgt(b, op_trans[tgsi_op], dest, src);
2086       break;
2087 
2088    case TGSI_OPCODE_SLE:
2089       ttn_sle(b, op_trans[tgsi_op], dest, src);
2090       break;
2091 
2092    case TGSI_OPCODE_KILL_IF:
2093       ttn_kill_if(b, op_trans[tgsi_op], dest, src);
2094       break;
2095 
2096    case TGSI_OPCODE_TEX:
2097    case TGSI_OPCODE_TEX_LZ:
2098    case TGSI_OPCODE_TXP:
2099    case TGSI_OPCODE_TXL:
2100    case TGSI_OPCODE_TXB:
2101    case TGSI_OPCODE_TXD:
2102    case TGSI_OPCODE_TEX2:
2103    case TGSI_OPCODE_TXL2:
2104    case TGSI_OPCODE_TXB2:
2105    case TGSI_OPCODE_TXF:
2106    case TGSI_OPCODE_TXF_LZ:
2107    case TGSI_OPCODE_TG4:
2108    case TGSI_OPCODE_LODQ:
2109       ttn_tex(c, dest, src);
2110       break;
2111 
2112    case TGSI_OPCODE_TXQ:
2113       ttn_txq(c, dest, src);
2114       break;
2115 
2116    case TGSI_OPCODE_LOAD:
2117    case TGSI_OPCODE_STORE:
2118       ttn_mem(c, dest, src);
2119       break;
2120 
2121    case TGSI_OPCODE_NOP:
2122       break;
2123 
2124    case TGSI_OPCODE_IF:
2125       nir_push_if(b, nir_fneu(b, nir_channel(b, src[0], 0), nir_imm_float(b, 0.0)));
2126       break;
2127 
2128    case TGSI_OPCODE_UIF:
2129       nir_push_if(b, nir_ine(b, nir_channel(b, src[0], 0), nir_imm_int(b, 0)));
2130       break;
2131 
2132    case TGSI_OPCODE_ELSE:
2133       nir_push_else(&c->build, NULL);
2134       break;
2135 
2136    case TGSI_OPCODE_ENDIF:
2137       nir_pop_if(&c->build, NULL);
2138       break;
2139 
2140    case TGSI_OPCODE_BGNLOOP:
2141       nir_push_loop(&c->build);
2142       break;
2143 
2144    case TGSI_OPCODE_BRK:
2145       nir_jump(b, nir_jump_break);
2146       break;
2147 
2148    case TGSI_OPCODE_CONT:
2149       nir_jump(b, nir_jump_continue);
2150       break;
2151 
2152    case TGSI_OPCODE_ENDLOOP:
2153       nir_pop_loop(&c->build, NULL);
2154       break;
2155 
2156    case TGSI_OPCODE_BARRIER:
2157       ttn_barrier(b);
2158       break;
2159 
2160    default:
2161       if (op_trans[tgsi_op] != 0 || tgsi_op == TGSI_OPCODE_MOV) {
2162          ttn_alu(b, op_trans[tgsi_op], dest, dst_bitsize, src);
2163       } else {
2164          fprintf(stderr, "unknown TGSI opcode: %s\n",
2165                  tgsi_get_opcode_name(tgsi_op));
2166          abort();
2167       }
2168       break;
2169    }
2170 
2171    if (tgsi_inst->Instruction.Saturate) {
2172       assert(!dest.dest.is_ssa);
2173       ttn_move_dest(b, dest, nir_fsat(b, ttn_src_for_dest(b, &dest)));
2174    }
2175 
2176    /* if the dst has a matching var, append store_var to move
2177     * output from reg to var
2178     */
2179    nir_variable *var = ttn_get_var(c, tgsi_dst);
2180    if (var) {
2181       unsigned index = tgsi_dst->Register.Index;
2182       unsigned offset = c->temp_regs[index].offset;
2183       struct tgsi_ind_register *indirect = tgsi_dst->Register.Indirect ?
2184                                            &tgsi_dst->Indirect : NULL;
2185       nir_src val = nir_src_for_reg(dest.dest.reg.reg);
2186       nir_store_deref(b, ttn_array_deref(c, var, offset, indirect),
2187                       nir_ssa_for_src(b, val, 4), dest.write_mask);
2188    }
2189 }
2190 
2191 /**
2192  * Puts a NIR intrinsic to store of each TGSI_FILE_OUTPUT value to the output
2193  * variables at the end of the shader.
2194  *
2195  * We don't generate these incrementally as the TGSI_FILE_OUTPUT values are
2196  * written, because there's no output load intrinsic, which means we couldn't
2197  * handle writemasks.
2198  */
2199 static void
ttn_add_output_stores(struct ttn_compile * c)2200 ttn_add_output_stores(struct ttn_compile *c)
2201 {
2202    nir_builder *b = &c->build;
2203 
2204    for (int i = 0; i < c->build.shader->num_outputs; i++) {
2205       nir_variable *var = c->outputs[i];
2206       if (!var)
2207          continue;
2208 
2209       nir_src src = nir_src_for_reg(c->output_regs[i].reg);
2210       src.reg.base_offset = c->output_regs[i].offset;
2211 
2212       nir_ssa_def *store_value = nir_ssa_for_src(b, src, 4);
2213       if (c->build.shader->info.stage == MESA_SHADER_FRAGMENT) {
2214          /* TGSI uses TGSI_SEMANTIC_POSITION.z for the depth output
2215           * and TGSI_SEMANTIC_STENCIL.y for the stencil output,
2216           * while NIR uses a single-component output.
2217           */
2218          if (var->data.location == FRAG_RESULT_DEPTH)
2219             store_value = nir_channel(b, store_value, 2);
2220          else if (var->data.location == FRAG_RESULT_STENCIL)
2221             store_value = nir_channel(b, store_value, 1);
2222       } else {
2223          /* FOGC and PSIZ are scalar values */
2224          if (var->data.location == VARYING_SLOT_FOGC ||
2225              var->data.location == VARYING_SLOT_PSIZ) {
2226             store_value = nir_channel(b, store_value, 0);
2227          }
2228       }
2229 
2230       nir_store_deref(b, nir_build_deref_var(b, var), store_value,
2231                       (1 << store_value->num_components) - 1);
2232    }
2233 }
2234 
2235 /**
2236  * Parses the given TGSI tokens.
2237  */
2238 static void
ttn_parse_tgsi(struct ttn_compile * c,const void * tgsi_tokens)2239 ttn_parse_tgsi(struct ttn_compile *c, const void *tgsi_tokens)
2240 {
2241    struct tgsi_parse_context parser;
2242    ASSERTED int ret;
2243 
2244    ret = tgsi_parse_init(&parser, tgsi_tokens);
2245    assert(ret == TGSI_PARSE_OK);
2246 
2247    while (!tgsi_parse_end_of_tokens(&parser)) {
2248       tgsi_parse_token(&parser);
2249       c->token = &parser.FullToken;
2250 
2251       switch (parser.FullToken.Token.Type) {
2252       case TGSI_TOKEN_TYPE_DECLARATION:
2253          ttn_emit_declaration(c);
2254          break;
2255 
2256       case TGSI_TOKEN_TYPE_INSTRUCTION:
2257          ttn_emit_instruction(c);
2258          break;
2259 
2260       case TGSI_TOKEN_TYPE_IMMEDIATE:
2261          ttn_emit_immediate(c);
2262          break;
2263       }
2264    }
2265 
2266    tgsi_parse_free(&parser);
2267 }
2268 
2269 static void
ttn_read_pipe_caps(struct ttn_compile * c,struct pipe_screen * screen)2270 ttn_read_pipe_caps(struct ttn_compile *c,
2271                    struct pipe_screen *screen)
2272 {
2273    c->cap_samplers_as_deref = screen->get_param(screen, PIPE_CAP_NIR_SAMPLERS_AS_DEREF);
2274    c->cap_face_is_sysval = screen->get_param(screen, PIPE_CAP_TGSI_FS_FACE_IS_INTEGER_SYSVAL);
2275    c->cap_position_is_sysval = screen->get_param(screen, PIPE_CAP_TGSI_FS_POSITION_IS_SYSVAL);
2276    c->cap_point_is_sysval = screen->get_param(screen, PIPE_CAP_TGSI_FS_POINT_IS_SYSVAL);
2277 }
2278 
2279 /**
2280  * Initializes a TGSI-to-NIR compiler.
2281  */
2282 static struct ttn_compile *
ttn_compile_init(const void * tgsi_tokens,const nir_shader_compiler_options * options,struct pipe_screen * screen)2283 ttn_compile_init(const void *tgsi_tokens,
2284                  const nir_shader_compiler_options *options,
2285                  struct pipe_screen *screen)
2286 {
2287    struct ttn_compile *c;
2288    struct nir_shader *s;
2289    struct tgsi_shader_info scan;
2290 
2291    assert(options || screen);
2292    c = rzalloc(NULL, struct ttn_compile);
2293 
2294    tgsi_scan_shader(tgsi_tokens, &scan);
2295    c->scan = &scan;
2296 
2297    if (!options) {
2298       options =
2299          screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR, scan.processor);
2300    }
2301 
2302    nir_builder_init_simple_shader(&c->build, NULL,
2303                                   tgsi_processor_to_shader_stage(scan.processor),
2304                                   options);
2305 
2306    s = c->build.shader;
2307 
2308    if (screen) {
2309       ttn_read_pipe_caps(c, screen);
2310    } else {
2311       /* TTN used to be hard coded to always make FACE a sysval,
2312        * so it makes sense to preserve that behavior so users don't break. */
2313       c->cap_face_is_sysval = true;
2314    }
2315 
2316    if (s->info.stage == MESA_SHADER_FRAGMENT)
2317       s->info.fs.untyped_color_outputs = true;
2318 
2319    s->num_inputs = scan.file_max[TGSI_FILE_INPUT] + 1;
2320    s->num_uniforms = scan.const_file_max[0] + 1;
2321    s->num_outputs = scan.file_max[TGSI_FILE_OUTPUT] + 1;
2322    s->info.num_ssbos = util_last_bit(scan.shader_buffers_declared);
2323    s->info.num_ubos = util_last_bit(scan.const_buffers_declared >> 1);
2324    s->info.num_images = util_last_bit(scan.images_declared);
2325    s->info.num_textures = util_last_bit(scan.samplers_declared);
2326 
2327    for (unsigned i = 0; i < TGSI_PROPERTY_COUNT; i++) {
2328       unsigned value = scan.properties[i];
2329 
2330       switch (i) {
2331       case TGSI_PROPERTY_FS_COLOR0_WRITES_ALL_CBUFS:
2332          break; /* handled in ttn_emit_declaration */
2333       case TGSI_PROPERTY_FS_COORD_ORIGIN:
2334          if (s->info.stage == MESA_SHADER_FRAGMENT)
2335             s->info.fs.origin_upper_left = value == TGSI_FS_COORD_ORIGIN_UPPER_LEFT;
2336          break;
2337       case TGSI_PROPERTY_FS_COORD_PIXEL_CENTER:
2338          if (s->info.stage == MESA_SHADER_FRAGMENT)
2339             s->info.fs.pixel_center_integer = value == TGSI_FS_COORD_PIXEL_CENTER_INTEGER;
2340          break;
2341       case TGSI_PROPERTY_FS_DEPTH_LAYOUT:
2342          if (s->info.stage == MESA_SHADER_FRAGMENT)
2343             s->info.fs.depth_layout = ttn_get_depth_layout(value);
2344          break;
2345       case TGSI_PROPERTY_VS_WINDOW_SPACE_POSITION:
2346          if (s->info.stage == MESA_SHADER_VERTEX)
2347             s->info.vs.window_space_position = value;
2348          break;
2349       case TGSI_PROPERTY_NEXT_SHADER:
2350          s->info.next_stage = tgsi_processor_to_shader_stage(value);
2351          break;
2352       case TGSI_PROPERTY_VS_BLIT_SGPRS_AMD:
2353          if (s->info.stage == MESA_SHADER_VERTEX)
2354             s->info.vs.blit_sgprs_amd = value;
2355          break;
2356       case TGSI_PROPERTY_CS_FIXED_BLOCK_WIDTH:
2357          if (s->info.stage == MESA_SHADER_COMPUTE)
2358             s->info.cs.local_size[0] = value;
2359          break;
2360       case TGSI_PROPERTY_CS_FIXED_BLOCK_HEIGHT:
2361          if (s->info.stage == MESA_SHADER_COMPUTE)
2362             s->info.cs.local_size[1] = value;
2363          break;
2364       case TGSI_PROPERTY_CS_FIXED_BLOCK_DEPTH:
2365          if (s->info.stage == MESA_SHADER_COMPUTE)
2366             s->info.cs.local_size[2] = value;
2367          break;
2368       case TGSI_PROPERTY_CS_USER_DATA_COMPONENTS_AMD:
2369          if (s->info.stage == MESA_SHADER_COMPUTE)
2370             s->info.cs.user_data_components_amd = value;
2371          break;
2372       default:
2373          if (value) {
2374             fprintf(stderr, "tgsi_to_nir: unhandled TGSI property %u = %u\n",
2375                     i, value);
2376             unreachable("unhandled TGSI property");
2377          }
2378       }
2379    }
2380 
2381    if (s->info.stage == MESA_SHADER_COMPUTE &&
2382        (!s->info.cs.local_size[0] ||
2383         !s->info.cs.local_size[1] ||
2384         !s->info.cs.local_size[2]))
2385       s->info.cs.local_size_variable = true;
2386 
2387    c->inputs = rzalloc_array(c, struct nir_variable *, s->num_inputs);
2388    c->outputs = rzalloc_array(c, struct nir_variable *, s->num_outputs);
2389 
2390    c->output_regs = rzalloc_array(c, struct ttn_reg_info,
2391                                   scan.file_max[TGSI_FILE_OUTPUT] + 1);
2392    c->temp_regs = rzalloc_array(c, struct ttn_reg_info,
2393                                 scan.file_max[TGSI_FILE_TEMPORARY] + 1);
2394    c->imm_defs = rzalloc_array(c, nir_ssa_def *,
2395                                scan.file_max[TGSI_FILE_IMMEDIATE] + 1);
2396 
2397    c->num_samp_types = scan.file_max[TGSI_FILE_SAMPLER_VIEW] + 1;
2398    c->samp_types = rzalloc_array(c, nir_alu_type, c->num_samp_types);
2399 
2400    ttn_parse_tgsi(c, tgsi_tokens);
2401    ttn_add_output_stores(c);
2402 
2403    nir_validate_shader(c->build.shader, "TTN: after parsing TGSI and creating the NIR shader");
2404 
2405    return c;
2406 }
2407 
2408 static void
ttn_optimize_nir(nir_shader * nir)2409 ttn_optimize_nir(nir_shader *nir)
2410 {
2411    bool progress;
2412    do {
2413       progress = false;
2414 
2415       NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2416 
2417       if (nir->options->lower_to_scalar) {
2418          NIR_PASS_V(nir, nir_lower_alu_to_scalar, NULL, NULL);
2419          NIR_PASS_V(nir, nir_lower_phis_to_scalar);
2420       }
2421 
2422       NIR_PASS_V(nir, nir_lower_alu);
2423       NIR_PASS_V(nir, nir_lower_pack);
2424       NIR_PASS(progress, nir, nir_copy_prop);
2425       NIR_PASS(progress, nir, nir_opt_remove_phis);
2426       NIR_PASS(progress, nir, nir_opt_dce);
2427 
2428       if (nir_opt_trivial_continues(nir)) {
2429          progress = true;
2430          NIR_PASS(progress, nir, nir_copy_prop);
2431          NIR_PASS(progress, nir, nir_opt_dce);
2432       }
2433 
2434       NIR_PASS(progress, nir, nir_opt_if, false);
2435       NIR_PASS(progress, nir, nir_opt_dead_cf);
2436       NIR_PASS(progress, nir, nir_opt_cse);
2437       NIR_PASS(progress, nir, nir_opt_peephole_select, 8, true, true);
2438 
2439       NIR_PASS(progress, nir, nir_opt_algebraic);
2440       NIR_PASS(progress, nir, nir_opt_constant_folding);
2441 
2442       NIR_PASS(progress, nir, nir_opt_undef);
2443       NIR_PASS(progress, nir, nir_opt_conditional_discard);
2444 
2445       if (nir->options->max_unroll_iterations) {
2446          NIR_PASS(progress, nir, nir_opt_loop_unroll, (nir_variable_mode)0);
2447       }
2448 
2449    } while (progress);
2450 
2451 }
2452 
2453 /**
2454  * Finalizes the NIR in a similar way as st_glsl_to_nir does.
2455  *
2456  * Drivers expect that these passes are already performed,
2457  * so we have to do it here too.
2458  */
2459 static void
ttn_finalize_nir(struct ttn_compile * c,struct pipe_screen * screen)2460 ttn_finalize_nir(struct ttn_compile *c, struct pipe_screen *screen)
2461 {
2462    struct nir_shader *nir = c->build.shader;
2463 
2464    NIR_PASS_V(nir, nir_lower_vars_to_ssa);
2465    NIR_PASS_V(nir, nir_lower_regs_to_ssa);
2466 
2467    NIR_PASS_V(nir, nir_lower_global_vars_to_local);
2468    NIR_PASS_V(nir, nir_split_var_copies);
2469    NIR_PASS_V(nir, nir_lower_var_copies);
2470    NIR_PASS_V(nir, nir_lower_system_values);
2471    NIR_PASS_V(nir, nir_lower_compute_system_values, NULL);
2472 
2473    if (nir->options->lower_uniforms_to_ubo)
2474       NIR_PASS_V(nir, nir_lower_uniforms_to_ubo, 16);
2475 
2476    if (!c->cap_samplers_as_deref)
2477       NIR_PASS_V(nir, nir_lower_samplers);
2478 
2479    if (screen->finalize_nir) {
2480       screen->finalize_nir(screen, nir, true);
2481    } else {
2482       ttn_optimize_nir(nir);
2483       nir_shader_gather_info(nir, c->build.impl);
2484    }
2485 
2486    nir->info.num_images = c->num_images;
2487    nir->info.num_textures = c->num_samplers;
2488 
2489    nir_validate_shader(nir, "TTN: after all optimizations");
2490 }
2491 
save_nir_to_disk_cache(struct disk_cache * cache,uint8_t key[CACHE_KEY_SIZE],const nir_shader * s)2492 static void save_nir_to_disk_cache(struct disk_cache *cache,
2493                                    uint8_t key[CACHE_KEY_SIZE],
2494                                    const nir_shader *s)
2495 {
2496    struct blob blob = {0};
2497 
2498    blob_init(&blob);
2499    /* Because we cannot fully trust disk_cache_put
2500     * (EGL_ANDROID_blob_cache) we add the shader size,
2501     * which we'll check after disk_cache_get().
2502     */
2503    if (blob_reserve_uint32(&blob) != 0) {
2504       blob_finish(&blob);
2505       return;
2506    }
2507 
2508    nir_serialize(&blob, s, true);
2509    *(uint32_t *)blob.data = blob.size;
2510 
2511    disk_cache_put(cache, key, blob.data, blob.size, NULL);
2512    blob_finish(&blob);
2513 }
2514 
2515 static nir_shader *
load_nir_from_disk_cache(struct disk_cache * cache,struct pipe_screen * screen,uint8_t key[CACHE_KEY_SIZE],unsigned processor)2516 load_nir_from_disk_cache(struct disk_cache *cache,
2517                          struct pipe_screen *screen,
2518                          uint8_t key[CACHE_KEY_SIZE],
2519                          unsigned processor)
2520 {
2521    const nir_shader_compiler_options *options =
2522       screen->get_compiler_options(screen, PIPE_SHADER_IR_NIR, processor);
2523    struct blob_reader blob_reader;
2524    size_t size;
2525    nir_shader *s;
2526 
2527    uint32_t *buffer = (uint32_t *)disk_cache_get(cache, key, &size);
2528    if (!buffer)
2529       return NULL;
2530 
2531    /* Match found. No need to check crc32 or other things.
2532     * disk_cache_get is supposed to do that for us.
2533     * However we do still check if the first element is indeed the size,
2534     * as we cannot fully trust disk_cache_get (EGL_ANDROID_blob_cache) */
2535    if (buffer[0] != size) {
2536       return NULL;
2537    }
2538 
2539    size -= 4;
2540    blob_reader_init(&blob_reader, buffer + 1, size);
2541    s = nir_deserialize(NULL, options, &blob_reader);
2542    free(buffer); /* buffer was malloc-ed */
2543    return s;
2544 }
2545 
2546 struct nir_shader *
tgsi_to_nir(const void * tgsi_tokens,struct pipe_screen * screen,bool allow_disk_cache)2547 tgsi_to_nir(const void *tgsi_tokens,
2548             struct pipe_screen *screen,
2549             bool allow_disk_cache)
2550 {
2551    struct disk_cache *cache = NULL;
2552    struct ttn_compile *c;
2553    struct nir_shader *s = NULL;
2554    uint8_t key[CACHE_KEY_SIZE];
2555    unsigned processor;
2556 
2557    if (allow_disk_cache)
2558       cache = screen->get_disk_shader_cache(screen);
2559 
2560    /* Look first in the cache */
2561    if (cache) {
2562       disk_cache_compute_key(cache,
2563                              tgsi_tokens,
2564                              tgsi_num_tokens(tgsi_tokens) * sizeof(struct tgsi_token),
2565                              key);
2566       processor = tgsi_get_processor_type(tgsi_tokens);
2567       s = load_nir_from_disk_cache(cache, screen, key, processor);
2568    }
2569 
2570    if (s)
2571       return s;
2572 
2573    /* Not in the cache */
2574 
2575    c = ttn_compile_init(tgsi_tokens, NULL, screen);
2576    s = c->build.shader;
2577    ttn_finalize_nir(c, screen);
2578    ralloc_free(c);
2579 
2580    if (cache)
2581       save_nir_to_disk_cache(cache, key, s);
2582 
2583    return s;
2584 }
2585 
2586 struct nir_shader *
tgsi_to_nir_noscreen(const void * tgsi_tokens,const nir_shader_compiler_options * options)2587 tgsi_to_nir_noscreen(const void *tgsi_tokens,
2588                      const nir_shader_compiler_options *options)
2589 {
2590    struct ttn_compile *c;
2591    struct nir_shader *s;
2592 
2593    c = ttn_compile_init(tgsi_tokens, options, NULL);
2594    s = c->build.shader;
2595    ralloc_free(c);
2596 
2597    return s;
2598 }
2599 
2600