1 /*
2  * Copyright © 2014 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  *
23  * Authors:
24  *    Jason Ekstrand (jason@jlekstrand.net)
25  *
26  */
27 
28 #include "nir.h"
29 #include "nir_builder.h"
30 #include "nir_constant_expressions.h"
31 #include "nir_deref.h"
32 #include <math.h>
33 
34 /*
35  * Implements SSA-based constant folding.
36  */
37 
38 struct constant_fold_state {
39    bool has_load_constant;
40    bool has_indirect_load_const;
41 };
42 
43 static bool
try_fold_alu(nir_builder * b,nir_alu_instr * alu)44 try_fold_alu(nir_builder *b, nir_alu_instr *alu)
45 {
46    nir_const_value src[NIR_MAX_VEC_COMPONENTS][NIR_MAX_VEC_COMPONENTS];
47 
48    if (!alu->dest.dest.is_ssa)
49       return false;
50 
51    /* In the case that any outputs/inputs have unsized types, then we need to
52     * guess the bit-size. In this case, the validator ensures that all
53     * bit-sizes match so we can just take the bit-size from first
54     * output/input with an unsized type. If all the outputs/inputs are sized
55     * then we don't need to guess the bit-size at all because the code we
56     * generate for constant opcodes in this case already knows the sizes of
57     * the types involved and does not need the provided bit-size for anything
58     * (although it still requires to receive a valid bit-size).
59     */
60    unsigned bit_size = 0;
61    if (!nir_alu_type_get_type_size(nir_op_infos[alu->op].output_type))
62       bit_size = alu->dest.dest.ssa.bit_size;
63 
64    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; i++) {
65       if (!alu->src[i].src.is_ssa)
66          return false;
67 
68       if (bit_size == 0 &&
69           !nir_alu_type_get_type_size(nir_op_infos[alu->op].input_types[i]))
70          bit_size = alu->src[i].src.ssa->bit_size;
71 
72       nir_instr *src_instr = alu->src[i].src.ssa->parent_instr;
73 
74       if (src_instr->type != nir_instr_type_load_const)
75          return false;
76       nir_load_const_instr* load_const = nir_instr_as_load_const(src_instr);
77 
78       for (unsigned j = 0; j < nir_ssa_alu_instr_src_components(alu, i);
79            j++) {
80          src[i][j] = load_const->value[alu->src[i].swizzle[j]];
81       }
82 
83       /* We shouldn't have any source modifiers in the optimization loop. */
84       assert(!alu->src[i].abs && !alu->src[i].negate);
85    }
86 
87    if (bit_size == 0)
88       bit_size = 32;
89 
90    /* We shouldn't have any saturate modifiers in the optimization loop. */
91    assert(!alu->dest.saturate);
92 
93    nir_const_value dest[NIR_MAX_VEC_COMPONENTS];
94    nir_const_value *srcs[NIR_MAX_VEC_COMPONENTS];
95    memset(dest, 0, sizeof(dest));
96    for (unsigned i = 0; i < nir_op_infos[alu->op].num_inputs; ++i)
97       srcs[i] = src[i];
98    nir_eval_const_opcode(alu->op, dest, alu->dest.dest.ssa.num_components,
99                          bit_size, srcs,
100                          b->shader->info.float_controls_execution_mode);
101 
102    b->cursor = nir_before_instr(&alu->instr);
103    nir_ssa_def *imm = nir_build_imm(b, alu->dest.dest.ssa.num_components,
104                                        alu->dest.dest.ssa.bit_size,
105                                        dest);
106    nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(imm));
107    nir_instr_remove(&alu->instr);
108 
109    ralloc_free(alu);
110 
111    return true;
112 }
113 
114 static nir_const_value *
const_value_for_deref(nir_deref_instr * deref)115 const_value_for_deref(nir_deref_instr *deref)
116 {
117    if (!nir_deref_mode_is(deref, nir_var_mem_constant))
118       return NULL;
119 
120    nir_deref_path path;
121    nir_deref_path_init(&path, deref, NULL);
122    if (path.path[0]->deref_type != nir_deref_type_var)
123       goto fail;
124 
125    nir_variable *var = path.path[0]->var;
126    assert(var->data.mode == nir_var_mem_constant);
127    if (var->constant_initializer == NULL)
128       goto fail;
129 
130    nir_constant *c = var->constant_initializer;
131    nir_const_value *v = NULL; /* Vector value for array-deref-of-vec */
132 
133    for (unsigned i = 1; path.path[i] != NULL; i++) {
134       nir_deref_instr *p = path.path[i];
135       switch (p->deref_type) {
136       case nir_deref_type_var:
137          unreachable("Deref paths can only start with a var deref");
138 
139       case nir_deref_type_array: {
140          assert(v == NULL);
141          if (!nir_src_is_const(p->arr.index))
142             goto fail;
143 
144          uint64_t idx = nir_src_as_uint(p->arr.index);
145          if (c->num_elements > 0) {
146             assert(glsl_type_is_array(path.path[i-1]->type));
147             if (idx >= c->num_elements)
148                goto fail;
149             c = c->elements[idx];
150          } else {
151             assert(glsl_type_is_vector(path.path[i-1]->type));
152             assert(glsl_type_is_scalar(p->type));
153             if (idx >= NIR_MAX_VEC_COMPONENTS)
154                goto fail;
155             v = &c->values[idx];
156          }
157          break;
158       }
159 
160       case nir_deref_type_struct:
161          assert(glsl_type_is_struct(path.path[i-1]->type));
162          assert(v == NULL && c->num_elements > 0);
163          if (p->strct.index >= c->num_elements)
164             goto fail;
165          c = c->elements[p->strct.index];
166          break;
167 
168       default:
169          goto fail;
170       }
171    }
172 
173    /* We have to have ended at a vector */
174    assert(c->num_elements == 0);
175    return v ? v : c->values;
176 
177 fail:
178    nir_deref_path_finish(&path);
179    return NULL;
180 }
181 
182 static bool
try_fold_intrinsic(nir_builder * b,nir_intrinsic_instr * intrin,struct constant_fold_state * state)183 try_fold_intrinsic(nir_builder *b, nir_intrinsic_instr *intrin,
184                    struct constant_fold_state *state)
185 {
186    switch (intrin->intrinsic) {
187    case nir_intrinsic_demote_if:
188    case nir_intrinsic_discard_if:
189    case nir_intrinsic_terminate_if:
190       if (nir_src_is_const(intrin->src[0])) {
191          if (nir_src_as_bool(intrin->src[0])) {
192             b->cursor = nir_before_instr(&intrin->instr);
193             nir_intrinsic_op op;
194             switch (intrin->intrinsic) {
195             case nir_intrinsic_discard_if:
196                op = nir_intrinsic_discard;
197                break;
198             case nir_intrinsic_demote_if:
199                op = nir_intrinsic_demote;
200                break;
201             case nir_intrinsic_terminate_if:
202                op = nir_intrinsic_terminate;
203                break;
204             default:
205                unreachable("invalid intrinsic");
206             }
207             nir_intrinsic_instr *new_instr =
208                nir_intrinsic_instr_create(b->shader, op);
209             nir_builder_instr_insert(b, &new_instr->instr);
210          }
211          nir_instr_remove(&intrin->instr);
212          return true;
213       }
214       return false;
215 
216    case nir_intrinsic_load_deref: {
217       nir_deref_instr *deref = nir_src_as_deref(intrin->src[0]);
218       nir_const_value *v = const_value_for_deref(deref);
219       if (v) {
220          b->cursor = nir_before_instr(&intrin->instr);
221          nir_ssa_def *val = nir_build_imm(b, intrin->dest.ssa.num_components,
222                                              intrin->dest.ssa.bit_size, v);
223          nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(val));
224          nir_instr_remove(&intrin->instr);
225          return true;
226       }
227       return false;
228    }
229 
230    case nir_intrinsic_load_constant: {
231       state->has_load_constant = true;
232 
233       if (!nir_src_is_const(intrin->src[0])) {
234          state->has_indirect_load_const = true;
235          return false;
236       }
237 
238       unsigned offset = nir_src_as_uint(intrin->src[0]);
239       unsigned base = nir_intrinsic_base(intrin);
240       unsigned range = nir_intrinsic_range(intrin);
241       assert(base + range <= b->shader->constant_data_size);
242 
243       b->cursor = nir_before_instr(&intrin->instr);
244       nir_ssa_def *val;
245       if (offset >= range) {
246          val = nir_ssa_undef(b, intrin->dest.ssa.num_components,
247                                 intrin->dest.ssa.bit_size);
248       } else {
249          nir_const_value imm[NIR_MAX_VEC_COMPONENTS];
250          memset(imm, 0, sizeof(imm));
251          uint8_t *data = (uint8_t*)b->shader->constant_data + base;
252          for (unsigned i = 0; i < intrin->num_components; i++) {
253             unsigned bytes = intrin->dest.ssa.bit_size / 8;
254             bytes = MIN2(bytes, range - offset);
255 
256             memcpy(&imm[i].u64, data + offset, bytes);
257             offset += bytes;
258          }
259          val = nir_build_imm(b, intrin->dest.ssa.num_components,
260                                 intrin->dest.ssa.bit_size, imm);
261       }
262       nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(val));
263       nir_instr_remove(&intrin->instr);
264       return true;
265    }
266 
267    case nir_intrinsic_vote_any:
268    case nir_intrinsic_vote_all:
269    case nir_intrinsic_read_invocation:
270    case nir_intrinsic_read_first_invocation:
271    case nir_intrinsic_shuffle:
272    case nir_intrinsic_shuffle_xor:
273    case nir_intrinsic_shuffle_up:
274    case nir_intrinsic_shuffle_down:
275    case nir_intrinsic_quad_broadcast:
276    case nir_intrinsic_quad_swap_horizontal:
277    case nir_intrinsic_quad_swap_vertical:
278    case nir_intrinsic_quad_swap_diagonal:
279    case nir_intrinsic_quad_swizzle_amd:
280    case nir_intrinsic_masked_swizzle_amd:
281       /* All of these have the data payload in the first source.  They may
282        * have a second source with a shuffle index but that doesn't matter if
283        * the data is constant.
284        */
285       if (nir_src_is_const(intrin->src[0])) {
286          nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
287                                   nir_src_for_ssa(intrin->src[0].ssa));
288          nir_instr_remove(&intrin->instr);
289          return true;
290       }
291       return false;
292 
293    case nir_intrinsic_vote_feq:
294    case nir_intrinsic_vote_ieq:
295       if (nir_src_is_const(intrin->src[0])) {
296          b->cursor = nir_before_instr(&intrin->instr);
297          nir_ssa_def_rewrite_uses(&intrin->dest.ssa,
298                                   nir_src_for_ssa(nir_imm_true(b)));
299          nir_instr_remove(&intrin->instr);
300          return true;
301       }
302       return false;
303 
304    default:
305       return false;
306    }
307 }
308 
309 static bool
try_fold_instr(nir_builder * b,nir_instr * instr,void * _state)310 try_fold_instr(nir_builder *b, nir_instr *instr, void *_state)
311 {
312    switch (instr->type) {
313    case nir_instr_type_alu:
314       return try_fold_alu(b, nir_instr_as_alu(instr));
315    case nir_instr_type_intrinsic:
316       return try_fold_intrinsic(b, nir_instr_as_intrinsic(instr), _state);
317    default:
318       /* Don't know how to constant fold */
319       return false;
320    }
321 }
322 
323 bool
nir_opt_constant_folding(nir_shader * shader)324 nir_opt_constant_folding(nir_shader *shader)
325 {
326    struct constant_fold_state state;
327    state.has_load_constant = false;
328    state.has_indirect_load_const = false;
329 
330    bool progress = nir_shader_instructions_pass(shader, try_fold_instr,
331                                                 nir_metadata_block_index |
332                                                 nir_metadata_dominance,
333                                                 &state);
334 
335    /* This doesn't free the constant data if there are no constant loads because
336     * the data might still be used but the loads have been lowered to load_ubo
337     */
338    if (state.has_load_constant && !state.has_indirect_load_const &&
339        shader->constant_data_size) {
340       ralloc_free(shader->constant_data);
341       shader->constant_data = NULL;
342       shader->constant_data_size = 0;
343    }
344 
345    return progress;
346 }
347