1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include "nir_builder.h"
25
26 /**
27 * Some ALU operations may not be supported in hardware in specific bit-sizes.
28 * This pass allows implementations to selectively lower such operations to
29 * a bit-size that is supported natively and then converts the result back to
30 * the original bit-size.
31 */
32
convert_to_bit_size(nir_builder * bld,nir_ssa_def * src,nir_alu_type type,unsigned bit_size)33 static nir_ssa_def *convert_to_bit_size(nir_builder *bld, nir_ssa_def *src,
34 nir_alu_type type, unsigned bit_size)
35 {
36 /* create b2i32(a) instead of i2i32(b2i8(a))/i2i32(b2i16(a)) */
37 nir_alu_instr *alu = nir_src_as_alu_instr(nir_src_for_ssa(src));
38 if ((type & (nir_type_uint | nir_type_int)) && bit_size == 32 &&
39 alu && (alu->op == nir_op_b2i8 || alu->op == nir_op_b2i16)) {
40 nir_alu_instr *instr = nir_alu_instr_create(bld->shader, nir_op_b2i32);
41 nir_alu_src_copy(&instr->src[0], &alu->src[0], instr);
42 return nir_builder_alu_instr_finish_and_insert(bld, instr);
43 }
44
45 return nir_convert_to_bit_size(bld, src, type, bit_size);
46 }
47
48 static void
lower_alu_instr(nir_builder * bld,nir_alu_instr * alu,unsigned bit_size)49 lower_alu_instr(nir_builder *bld, nir_alu_instr *alu, unsigned bit_size)
50 {
51 const nir_op op = alu->op;
52 unsigned dst_bit_size = alu->dest.dest.ssa.bit_size;
53
54 bld->cursor = nir_before_instr(&alu->instr);
55
56 /* Convert each source to the requested bit-size */
57 nir_ssa_def *srcs[NIR_MAX_VEC_COMPONENTS] = { NULL };
58 for (unsigned i = 0; i < nir_op_infos[op].num_inputs; i++) {
59 nir_ssa_def *src = nir_ssa_for_alu_src(bld, alu, i);
60
61 nir_alu_type type = nir_op_infos[op].input_types[i];
62 if (nir_alu_type_get_type_size(type) == 0)
63 src = convert_to_bit_size(bld, src, type, bit_size);
64
65 if (i == 1 && (op == nir_op_ishl || op == nir_op_ishr || op == nir_op_ushr)) {
66 assert(util_is_power_of_two_nonzero(dst_bit_size));
67 src = nir_iand(bld, src, nir_imm_int(bld, dst_bit_size - 1));
68 }
69
70 srcs[i] = src;
71 }
72
73 /* Emit the lowered ALU instruction */
74 nir_ssa_def *lowered_dst = NULL;
75 if (op == nir_op_imul_high || op == nir_op_umul_high) {
76 assert(dst_bit_size * 2 <= bit_size);
77 nir_ssa_def *lowered_dst = nir_imul(bld, srcs[0], srcs[1]);
78 if (nir_op_infos[op].output_type & nir_type_uint)
79 lowered_dst = nir_ushr_imm(bld, lowered_dst, dst_bit_size);
80 else
81 lowered_dst = nir_ishr_imm(bld, lowered_dst, dst_bit_size);
82 } else {
83 lowered_dst = nir_build_alu_src_arr(bld, op, srcs);
84 }
85
86
87 /* Convert result back to the original bit-size */
88 if (nir_alu_type_get_type_size(nir_op_infos[op].output_type) == 0 &&
89 dst_bit_size != bit_size) {
90 nir_alu_type type = nir_op_infos[op].output_type;
91 nir_ssa_def *dst = nir_convert_to_bit_size(bld, lowered_dst, type, dst_bit_size);
92 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(dst));
93 } else {
94 nir_ssa_def_rewrite_uses(&alu->dest.dest.ssa, nir_src_for_ssa(lowered_dst));
95 }
96 }
97
98 static void
lower_intrinsic_instr(nir_builder * b,nir_intrinsic_instr * intrin,unsigned bit_size)99 lower_intrinsic_instr(nir_builder *b, nir_intrinsic_instr *intrin,
100 unsigned bit_size)
101 {
102 switch (intrin->intrinsic) {
103 case nir_intrinsic_read_invocation:
104 case nir_intrinsic_read_first_invocation:
105 case nir_intrinsic_vote_feq:
106 case nir_intrinsic_vote_ieq:
107 case nir_intrinsic_shuffle:
108 case nir_intrinsic_shuffle_xor:
109 case nir_intrinsic_shuffle_up:
110 case nir_intrinsic_shuffle_down:
111 case nir_intrinsic_quad_broadcast:
112 case nir_intrinsic_quad_swap_horizontal:
113 case nir_intrinsic_quad_swap_vertical:
114 case nir_intrinsic_quad_swap_diagonal:
115 case nir_intrinsic_reduce:
116 case nir_intrinsic_inclusive_scan:
117 case nir_intrinsic_exclusive_scan: {
118 assert(intrin->src[0].is_ssa && intrin->dest.is_ssa);
119 const unsigned old_bit_size = intrin->dest.ssa.bit_size;
120 assert(old_bit_size < bit_size);
121
122 nir_alu_type type = nir_type_uint;
123 if (nir_intrinsic_has_reduction_op(intrin))
124 type = nir_op_infos[nir_intrinsic_reduction_op(intrin)].input_types[0];
125 else if (intrin->intrinsic == nir_intrinsic_vote_feq)
126 type = nir_type_float;
127
128 b->cursor = nir_before_instr(&intrin->instr);
129 nir_intrinsic_instr *new_intrin =
130 nir_instr_as_intrinsic(nir_instr_clone(b->shader, &intrin->instr));
131
132 nir_ssa_def *new_src = nir_convert_to_bit_size(b, intrin->src[0].ssa,
133 type, bit_size);
134 new_intrin->src[0] = nir_src_for_ssa(new_src);
135
136 if (intrin->intrinsic == nir_intrinsic_vote_feq ||
137 intrin->intrinsic == nir_intrinsic_vote_ieq) {
138 /* These return a Boolean; it's always 1-bit */
139 assert(new_intrin->dest.ssa.bit_size == 1);
140 } else {
141 /* These return the same bit size as the source; we need to adjust
142 * the size and then we'll have to emit a down-cast.
143 */
144 assert(intrin->src[0].ssa->bit_size == intrin->dest.ssa.bit_size);
145 new_intrin->dest.ssa.bit_size = bit_size;
146 }
147
148 nir_builder_instr_insert(b, &new_intrin->instr);
149
150 nir_ssa_def *res = &new_intrin->dest.ssa;
151 if (intrin->intrinsic == nir_intrinsic_exclusive_scan) {
152 /* For exclusive scan, we have to be careful because the identity
153 * value for the higher bit size may get added into the mix by
154 * disabled channels. For some cases (imin/imax in particular),
155 * this value won't convert to the right identity value when we
156 * down-cast so we have to clamp it.
157 */
158 switch (nir_intrinsic_reduction_op(intrin)) {
159 case nir_op_imin: {
160 int64_t int_max = (1ull << (old_bit_size - 1)) - 1;
161 res = nir_imin(b, res, nir_imm_intN_t(b, int_max, bit_size));
162 break;
163 }
164 case nir_op_imax: {
165 int64_t int_min = -(int64_t)(1ull << (old_bit_size - 1));
166 res = nir_imax(b, res, nir_imm_intN_t(b, int_min, bit_size));
167 break;
168 }
169 default:
170 break;
171 }
172 }
173
174 if (intrin->intrinsic != nir_intrinsic_vote_feq &&
175 intrin->intrinsic != nir_intrinsic_vote_ieq)
176 res = nir_u2u(b, res, old_bit_size);
177
178 nir_ssa_def_rewrite_uses(&intrin->dest.ssa, nir_src_for_ssa(res));
179 break;
180 }
181
182 default:
183 unreachable("Unsupported instruction");
184 }
185 }
186
187 static bool
lower_impl(nir_function_impl * impl,nir_lower_bit_size_callback callback,void * callback_data)188 lower_impl(nir_function_impl *impl,
189 nir_lower_bit_size_callback callback,
190 void *callback_data)
191 {
192 nir_builder b;
193 nir_builder_init(&b, impl);
194 bool progress = false;
195
196 nir_foreach_block(block, impl) {
197 nir_foreach_instr_safe(instr, block) {
198 unsigned lower_bit_size = callback(instr, callback_data);
199 if (lower_bit_size == 0)
200 continue;
201
202 switch (instr->type) {
203 case nir_instr_type_alu:
204 lower_alu_instr(&b, nir_instr_as_alu(instr), lower_bit_size);
205 break;
206
207 case nir_instr_type_intrinsic:
208 lower_intrinsic_instr(&b, nir_instr_as_intrinsic(instr),
209 lower_bit_size);
210 break;
211
212 default:
213 unreachable("Unsupported instruction type");
214 }
215 progress = true;
216 }
217 }
218
219 if (progress) {
220 nir_metadata_preserve(impl, nir_metadata_block_index |
221 nir_metadata_dominance);
222 } else {
223 nir_metadata_preserve(impl, nir_metadata_all);
224 }
225
226 return progress;
227 }
228
229 bool
nir_lower_bit_size(nir_shader * shader,nir_lower_bit_size_callback callback,void * callback_data)230 nir_lower_bit_size(nir_shader *shader,
231 nir_lower_bit_size_callback callback,
232 void *callback_data)
233 {
234 bool progress = false;
235
236 nir_foreach_function(function, shader) {
237 if (function->impl)
238 progress |= lower_impl(function->impl, callback, callback_data);
239 }
240
241 return progress;
242 }
243
244 static void
split_phi(nir_builder * b,nir_phi_instr * phi)245 split_phi(nir_builder *b, nir_phi_instr *phi)
246 {
247 nir_phi_instr *lowered[2] = {
248 nir_phi_instr_create(b->shader),
249 nir_phi_instr_create(b->shader)
250 };
251 int num_components = phi->dest.ssa.num_components;
252 assert(phi->dest.ssa.bit_size == 64);
253
254 nir_foreach_phi_src(src, phi) {
255 assert(num_components == src->src.ssa->num_components);
256
257 b->cursor = nir_before_src(&src->src, false);
258
259 nir_ssa_def *x = nir_unpack_64_2x32_split_x(b, src->src.ssa);
260 nir_ssa_def *y = nir_unpack_64_2x32_split_y(b, src->src.ssa);
261
262 nir_phi_src *xsrc = rzalloc(lowered[0], nir_phi_src);
263 xsrc->pred = src->pred;
264 xsrc->src = nir_src_for_ssa(x);
265 exec_list_push_tail(&lowered[0]->srcs, &xsrc->node);
266
267 nir_phi_src *ysrc = rzalloc(lowered[1], nir_phi_src);
268 ysrc->pred = src->pred;
269 ysrc->src = nir_src_for_ssa(y);
270 exec_list_push_tail(&lowered[1]->srcs, &ysrc->node);
271 }
272
273 nir_ssa_dest_init(&lowered[0]->instr, &lowered[0]->dest,
274 num_components, 32, NULL);
275 nir_ssa_dest_init(&lowered[1]->instr, &lowered[1]->dest,
276 num_components, 32, NULL);
277
278 b->cursor = nir_before_instr(&phi->instr);
279 nir_builder_instr_insert(b, &lowered[0]->instr);
280 nir_builder_instr_insert(b, &lowered[1]->instr);
281
282 b->cursor = nir_after_phis(nir_cursor_current_block(b->cursor));
283 nir_ssa_def *merged = nir_pack_64_2x32_split(b, &lowered[0]->dest.ssa, &lowered[1]->dest.ssa);
284 nir_ssa_def_rewrite_uses(&phi->dest.ssa, nir_src_for_ssa(merged));
285 nir_instr_remove(&phi->instr);
286 }
287
288 static bool
lower_64bit_phi_impl(nir_function_impl * impl)289 lower_64bit_phi_impl(nir_function_impl *impl)
290 {
291 nir_builder b;
292 nir_builder_init(&b, impl);
293 bool progress = false;
294
295 nir_foreach_block(block, impl) {
296 nir_foreach_instr_safe(instr, block) {
297 if (instr->type != nir_instr_type_phi)
298 break;
299
300 nir_phi_instr *phi = nir_instr_as_phi(instr);
301 assert(phi->dest.is_ssa);
302
303 if (phi->dest.ssa.bit_size <= 32)
304 continue;
305
306 split_phi(&b, phi);
307 progress = true;
308 }
309 }
310
311 if (progress) {
312 nir_metadata_preserve(impl, nir_metadata_block_index |
313 nir_metadata_dominance);
314 } else {
315 nir_metadata_preserve(impl, nir_metadata_all);
316 }
317
318 return progress;
319 }
320
321 bool
nir_lower_64bit_phis(nir_shader * shader)322 nir_lower_64bit_phis(nir_shader *shader)
323 {
324 bool progress = false;
325
326 nir_foreach_function(function, shader) {
327 if (function->impl)
328 progress |= lower_64bit_phi_impl(function->impl);
329 }
330
331 return progress;
332 }
333