1 /*
2  * Copyright (C) 2017-2018 Rob Clark <robclark@freedesktop.org>
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21  * SOFTWARE.
22  *
23  * Authors:
24  *    Rob Clark <robclark@freedesktop.org>
25  */
26 
27 #define GPU 600
28 
29 #include "ir3_context.h"
30 #include "ir3_image.h"
31 
32 /*
33  * Handlers for instructions changed/added in a6xx:
34  *
35  * Starting with a6xx, isam and stbi is used for SSBOs as well; stbi and the
36  * atomic instructions (used for both SSBO and image) use a new instruction
37  * encoding compared to a4xx/a5xx.
38  */
39 
40 /* src[] = { buffer_index, offset }. No const_index */
41 static void
emit_intrinsic_load_ssbo(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)42 emit_intrinsic_load_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr,
43 		struct ir3_instruction **dst)
44 {
45 	struct ir3_block *b = ctx->block;
46 	struct ir3_instruction *offset;
47 	struct ir3_instruction *ldib;
48 
49 	offset = ir3_get_src(ctx, &intr->src[2])[0];
50 
51 	ldib = ir3_LDIB(b, ir3_ssbo_to_ibo(ctx, intr->src[0]), 0, offset, 0);
52 	ldib->regs[0]->wrmask = MASK(intr->num_components);
53 	ldib->cat6.iim_val = intr->num_components;
54 	ldib->cat6.d = 1;
55 	ldib->cat6.type = intr->dest.ssa.bit_size == 16 ? TYPE_U16 : TYPE_U32;
56 	ldib->barrier_class = IR3_BARRIER_BUFFER_R;
57 	ldib->barrier_conflict = IR3_BARRIER_BUFFER_W;
58 	ir3_handle_bindless_cat6(ldib, intr->src[0]);
59 
60 	ir3_split_dest(b, dst, ldib, 0, intr->num_components);
61 }
62 
63 /* src[] = { value, block_index, offset }. const_index[] = { write_mask } */
64 static void
emit_intrinsic_store_ssbo(struct ir3_context * ctx,nir_intrinsic_instr * intr)65 emit_intrinsic_store_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
66 {
67 	struct ir3_block *b = ctx->block;
68 	struct ir3_instruction *stib, *val, *offset;
69 	unsigned wrmask = nir_intrinsic_write_mask(intr);
70 	unsigned ncomp = ffs(~wrmask) - 1;
71 
72 	assert(wrmask == BITFIELD_MASK(intr->num_components));
73 
74 	/* src0 is offset, src1 is value:
75 	 */
76 	val = ir3_create_collect(ctx, ir3_get_src(ctx, &intr->src[0]), ncomp);
77 	offset = ir3_get_src(ctx, &intr->src[3])[0];
78 
79 	stib = ir3_STIB(b, ir3_ssbo_to_ibo(ctx, intr->src[1]), 0, offset, 0, val, 0);
80 	stib->cat6.iim_val = ncomp;
81 	stib->cat6.d = 1;
82 	stib->cat6.type = intr->src[0].ssa->bit_size == 16 ? TYPE_U16 : TYPE_U32;
83 	stib->barrier_class = IR3_BARRIER_BUFFER_W;
84 	stib->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
85 	ir3_handle_bindless_cat6(stib, intr->src[1]);
86 
87 	array_insert(b, b->keeps, stib);
88 }
89 
90 /*
91  * SSBO atomic intrinsics
92  *
93  * All of the SSBO atomic memory operations read a value from memory,
94  * compute a new value using one of the operations below, write the new
95  * value to memory, and return the original value read.
96  *
97  * All operations take 3 sources except CompSwap that takes 4. These
98  * sources represent:
99  *
100  * 0: The SSBO buffer index.
101  * 1: The offset into the SSBO buffer of the variable that the atomic
102  *    operation will operate on.
103  * 2: The data parameter to the atomic function (i.e. the value to add
104  *    in ssbo_atomic_add, etc).
105  * 3: For CompSwap only: the second data parameter.
106  */
107 static struct ir3_instruction *
emit_intrinsic_atomic_ssbo(struct ir3_context * ctx,nir_intrinsic_instr * intr)108 emit_intrinsic_atomic_ssbo(struct ir3_context *ctx, nir_intrinsic_instr *intr)
109 {
110 	struct ir3_block *b = ctx->block;
111 	struct ir3_instruction *atomic, *ibo, *src0, *src1, *data, *dummy;
112 	type_t type = TYPE_U32;
113 
114 	ibo = ir3_ssbo_to_ibo(ctx, intr->src[0]);
115 
116 	data   = ir3_get_src(ctx, &intr->src[2])[0];
117 
118 	/* So this gets a bit creative:
119 	 *
120 	 *    src0    - vecN offset/coords
121 	 *    src1.x  - is actually destination register
122 	 *    src1.y  - is 'data' except for cmpxchg where src2.y is 'compare'
123 	 *    src1.z  - is 'data' for cmpxchg
124 	 *
125 	 * The combining src and dest kinda doesn't work out so well with how
126 	 * scheduling and RA work.  So for now we create a dummy src2.x, and
127 	 * then in a later fixup path, insert an extra MOV out of src1.x.
128 	 * See ir3_a6xx_fixup_atomic_dests().
129 	 *
130 	 * Note that nir already multiplies the offset by four
131 	 */
132 	dummy = create_immed(b, 0);
133 
134 	if (intr->intrinsic == nir_intrinsic_ssbo_atomic_comp_swap_ir3) {
135 		src0 = ir3_get_src(ctx, &intr->src[4])[0];
136 		struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[3])[0];
137 		src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
138 			dummy, compare, data
139 		}, 3);
140 	} else {
141 		src0 = ir3_get_src(ctx, &intr->src[3])[0];
142 		src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
143 			dummy, data
144 		}, 2);
145 	}
146 
147 	switch (intr->intrinsic) {
148 	case nir_intrinsic_ssbo_atomic_add_ir3:
149 		atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0);
150 		break;
151 	case nir_intrinsic_ssbo_atomic_imin_ir3:
152 		atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0);
153 		type = TYPE_S32;
154 		break;
155 	case nir_intrinsic_ssbo_atomic_umin_ir3:
156 		atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0);
157 		break;
158 	case nir_intrinsic_ssbo_atomic_imax_ir3:
159 		atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0);
160 		type = TYPE_S32;
161 		break;
162 	case nir_intrinsic_ssbo_atomic_umax_ir3:
163 		atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0);
164 		break;
165 	case nir_intrinsic_ssbo_atomic_and_ir3:
166 		atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0);
167 		break;
168 	case nir_intrinsic_ssbo_atomic_or_ir3:
169 		atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0);
170 		break;
171 	case nir_intrinsic_ssbo_atomic_xor_ir3:
172 		atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0);
173 		break;
174 	case nir_intrinsic_ssbo_atomic_exchange_ir3:
175 		atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0);
176 		break;
177 	case nir_intrinsic_ssbo_atomic_comp_swap_ir3:
178 		atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0);
179 		break;
180 	default:
181 		unreachable("boo");
182 	}
183 
184 	atomic->cat6.iim_val = 1;
185 	atomic->cat6.d = 1;
186 	atomic->cat6.type = type;
187 	atomic->barrier_class = IR3_BARRIER_BUFFER_W;
188 	atomic->barrier_conflict = IR3_BARRIER_BUFFER_R | IR3_BARRIER_BUFFER_W;
189 	ir3_handle_bindless_cat6(atomic, intr->src[0]);
190 
191 	/* even if nothing consume the result, we can't DCE the instruction: */
192 	array_insert(b, b->keeps, atomic);
193 
194 	return atomic;
195 }
196 
197 /* src[] = { deref, coord, sample_index }. const_index[] = {} */
198 static void
emit_intrinsic_load_image(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)199 emit_intrinsic_load_image(struct ir3_context *ctx, nir_intrinsic_instr *intr,
200 		struct ir3_instruction **dst)
201 {
202 	struct ir3_block *b = ctx->block;
203 	struct ir3_instruction *ldib;
204 	struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
205 	unsigned ncoords = ir3_get_image_coords(intr, NULL);
206 
207 	ldib = ir3_LDIB(b, ir3_image_to_ibo(ctx, intr->src[0]), 0,
208 					ir3_create_collect(ctx, coords, ncoords), 0);
209 	ldib->regs[0]->wrmask = MASK(intr->num_components);
210 	ldib->cat6.iim_val = intr->num_components;
211 	ldib->cat6.d = ncoords;
212 	ldib->cat6.type = ir3_get_type_for_image_intrinsic(intr);
213 	ldib->cat6.typed = true;
214 	ldib->barrier_class = IR3_BARRIER_IMAGE_R;
215 	ldib->barrier_conflict = IR3_BARRIER_IMAGE_W;
216 	ir3_handle_bindless_cat6(ldib, intr->src[0]);
217 
218 	ir3_split_dest(b, dst, ldib, 0, intr->num_components);
219 }
220 
221 /* src[] = { deref, coord, sample_index, value }. const_index[] = {} */
222 static void
emit_intrinsic_store_image(struct ir3_context * ctx,nir_intrinsic_instr * intr)223 emit_intrinsic_store_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
224 {
225 	struct ir3_block *b = ctx->block;
226 	struct ir3_instruction *stib;
227 	struct ir3_instruction * const *value = ir3_get_src(ctx, &intr->src[3]);
228 	struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
229 	unsigned ncoords = ir3_get_image_coords(intr, NULL);
230 	enum pipe_format format = nir_intrinsic_format(intr);
231 	unsigned ncomp = ir3_get_num_components_for_image_format(format);
232 
233 	/* src0 is offset, src1 is value:
234 	 */
235 	stib = ir3_STIB(b, ir3_image_to_ibo(ctx, intr->src[0]), 0,
236 			ir3_create_collect(ctx, coords, ncoords), 0,
237 			ir3_create_collect(ctx, value, ncomp), 0);
238 	stib->cat6.iim_val = ncomp;
239 	stib->cat6.d = ncoords;
240 	stib->cat6.type = ir3_get_type_for_image_intrinsic(intr);
241 	stib->cat6.typed = true;
242 	stib->barrier_class = IR3_BARRIER_IMAGE_W;
243 	stib->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
244 	ir3_handle_bindless_cat6(stib, intr->src[0]);
245 
246 	array_insert(b, b->keeps, stib);
247 }
248 
249 /* src[] = { deref, coord, sample_index, value, compare }. const_index[] = {} */
250 static struct ir3_instruction *
emit_intrinsic_atomic_image(struct ir3_context * ctx,nir_intrinsic_instr * intr)251 emit_intrinsic_atomic_image(struct ir3_context *ctx, nir_intrinsic_instr *intr)
252 {
253 	struct ir3_block *b = ctx->block;
254 	struct ir3_instruction *atomic, *ibo, *src0, *src1, *dummy;
255 	struct ir3_instruction * const *coords = ir3_get_src(ctx, &intr->src[1]);
256 	struct ir3_instruction *value = ir3_get_src(ctx, &intr->src[3])[0];
257 	unsigned ncoords = ir3_get_image_coords(intr, NULL);
258 
259 	ibo = ir3_image_to_ibo(ctx, intr->src[0]);
260 
261 	/* So this gets a bit creative:
262 	 *
263 	 *    src0    - vecN offset/coords
264 	 *    src1.x  - is actually destination register
265 	 *    src1.y  - is 'value' except for cmpxchg where src2.y is 'compare'
266 	 *    src1.z  - is 'value' for cmpxchg
267 	 *
268 	 * The combining src and dest kinda doesn't work out so well with how
269 	 * scheduling and RA work.  So for now we create a dummy src2.x, and
270 	 * then in a later fixup path, insert an extra MOV out of src1.x.
271 	 * See ir3_a6xx_fixup_atomic_dests().
272 	 */
273 	dummy = create_immed(b, 0);
274 	src0 = ir3_create_collect(ctx, coords, ncoords);
275 
276 	if (intr->intrinsic == nir_intrinsic_image_atomic_comp_swap ||
277 		intr->intrinsic == nir_intrinsic_bindless_image_atomic_comp_swap) {
278 		struct ir3_instruction *compare = ir3_get_src(ctx, &intr->src[4])[0];
279 		src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
280 			dummy, compare, value
281 		}, 3);
282 	} else {
283 		src1 = ir3_create_collect(ctx, (struct ir3_instruction*[]){
284 			dummy, value
285 		}, 2);
286 	}
287 
288 	switch (intr->intrinsic) {
289 	case nir_intrinsic_image_atomic_add:
290 	case nir_intrinsic_bindless_image_atomic_add:
291 		atomic = ir3_ATOMIC_ADD_G(b, ibo, 0, src0, 0, src1, 0);
292 		break;
293 	case nir_intrinsic_image_atomic_imin:
294 	case nir_intrinsic_image_atomic_umin:
295 	case nir_intrinsic_bindless_image_atomic_imin:
296 	case nir_intrinsic_bindless_image_atomic_umin:
297 		atomic = ir3_ATOMIC_MIN_G(b, ibo, 0, src0, 0, src1, 0);
298 		break;
299 	case nir_intrinsic_image_atomic_imax:
300 	case nir_intrinsic_image_atomic_umax:
301 	case nir_intrinsic_bindless_image_atomic_imax:
302 	case nir_intrinsic_bindless_image_atomic_umax:
303 		atomic = ir3_ATOMIC_MAX_G(b, ibo, 0, src0, 0, src1, 0);
304 		break;
305 	case nir_intrinsic_image_atomic_and:
306 	case nir_intrinsic_bindless_image_atomic_and:
307 		atomic = ir3_ATOMIC_AND_G(b, ibo, 0, src0, 0, src1, 0);
308 		break;
309 	case nir_intrinsic_image_atomic_or:
310 	case nir_intrinsic_bindless_image_atomic_or:
311 		atomic = ir3_ATOMIC_OR_G(b, ibo, 0, src0, 0, src1, 0);
312 		break;
313 	case nir_intrinsic_image_atomic_xor:
314 	case nir_intrinsic_bindless_image_atomic_xor:
315 		atomic = ir3_ATOMIC_XOR_G(b, ibo, 0, src0, 0, src1, 0);
316 		break;
317 	case nir_intrinsic_image_atomic_exchange:
318 	case nir_intrinsic_bindless_image_atomic_exchange:
319 		atomic = ir3_ATOMIC_XCHG_G(b, ibo, 0, src0, 0, src1, 0);
320 		break;
321 	case nir_intrinsic_image_atomic_comp_swap:
322 	case nir_intrinsic_bindless_image_atomic_comp_swap:
323 		atomic = ir3_ATOMIC_CMPXCHG_G(b, ibo, 0, src0, 0, src1, 0);
324 		break;
325 	default:
326 		unreachable("boo");
327 	}
328 
329 	atomic->cat6.iim_val = 1;
330 	atomic->cat6.d = ncoords;
331 	atomic->cat6.type = ir3_get_type_for_image_intrinsic(intr);
332 	atomic->cat6.typed = true;
333 	atomic->barrier_class = IR3_BARRIER_IMAGE_W;
334 	atomic->barrier_conflict = IR3_BARRIER_IMAGE_R | IR3_BARRIER_IMAGE_W;
335 	ir3_handle_bindless_cat6(atomic, intr->src[0]);
336 
337 	/* even if nothing consume the result, we can't DCE the instruction: */
338 	array_insert(b, b->keeps, atomic);
339 
340 	return atomic;
341 }
342 
343 static void
emit_intrinsic_image_size(struct ir3_context * ctx,nir_intrinsic_instr * intr,struct ir3_instruction ** dst)344 emit_intrinsic_image_size(struct ir3_context *ctx, nir_intrinsic_instr *intr,
345 		struct ir3_instruction **dst)
346 {
347 	struct ir3_block *b = ctx->block;
348 	struct ir3_instruction *ibo = ir3_image_to_ibo(ctx, intr->src[0]);
349 	struct ir3_instruction *resinfo = ir3_RESINFO(b, ibo, 0);
350 	resinfo->cat6.iim_val = 1;
351 	resinfo->cat6.d = intr->num_components;
352 	resinfo->cat6.type = TYPE_U32;
353 	resinfo->cat6.typed = false;
354 	/* resinfo has no writemask and always writes out 3 components: */
355 	compile_assert(ctx, intr->num_components <= 3);
356 	resinfo->regs[0]->wrmask = MASK(3);
357 	ir3_handle_bindless_cat6(resinfo, intr->src[0]);
358 
359 	ir3_split_dest(b, dst, resinfo, 0, intr->num_components);
360 }
361 
362 const struct ir3_context_funcs ir3_a6xx_funcs = {
363 		.emit_intrinsic_load_ssbo = emit_intrinsic_load_ssbo,
364 		.emit_intrinsic_store_ssbo = emit_intrinsic_store_ssbo,
365 		.emit_intrinsic_atomic_ssbo = emit_intrinsic_atomic_ssbo,
366 		.emit_intrinsic_load_image = emit_intrinsic_load_image,
367 		.emit_intrinsic_store_image = emit_intrinsic_store_image,
368 		.emit_intrinsic_atomic_image = emit_intrinsic_atomic_image,
369 		.emit_intrinsic_image_size = emit_intrinsic_image_size,
370 };
371 
372 /*
373  * Special pass to run after instruction scheduling to insert an
374  * extra mov from src1.x to dst.  This way the other compiler passes
375  * can ignore this quirk of the new instruction encoding.
376  *
377  * This should run after RA.
378  */
379 
380 static struct ir3_instruction *
get_atomic_dest_mov(struct ir3_instruction * atomic)381 get_atomic_dest_mov(struct ir3_instruction *atomic)
382 {
383 	struct ir3_instruction *mov;
384 
385 	/* if we've already created the mov-out, then re-use it: */
386 	if (atomic->data)
387 		return atomic->data;
388 
389 	/* We are already out of SSA here, so we can't use the nice builders: */
390 	mov = ir3_instr_create(atomic->block, OPC_MOV);
391 	ir3_reg_create(mov, 0, 0);    /* dst */
392 	ir3_reg_create(mov, 0, 0);    /* src */
393 
394 	mov->cat1.src_type = TYPE_U32;
395 	mov->cat1.dst_type = TYPE_U32;
396 
397 	/* extract back out the 'dummy' which serves as stand-in for dest: */
398 	struct ir3_instruction *src = atomic->regs[3]->instr;
399 	debug_assert(src->opc == OPC_META_COLLECT);
400 
401 	*mov->regs[0] = *atomic->regs[0];
402 	*mov->regs[1] = *src->regs[1]->instr->regs[0];
403 
404 	mov->flags |= IR3_INSTR_SY;
405 
406 	/* it will have already been appended to the end of the block, which
407 	 * isn't where we want it, so fix-up the location:
408 	 */
409 	ir3_instr_move_after(mov, atomic);
410 
411 	return atomic->data = mov;
412 }
413 
414 bool
ir3_a6xx_fixup_atomic_dests(struct ir3 * ir,struct ir3_shader_variant * so)415 ir3_a6xx_fixup_atomic_dests(struct ir3 *ir, struct ir3_shader_variant *so)
416 {
417 	bool progress = false;
418 
419 	if (ir3_shader_nibo(so) == 0)
420 		return false;
421 
422 	foreach_block (block, &ir->block_list) {
423 		foreach_instr (instr, &block->instr_list) {
424 			instr->data = NULL;
425 		}
426 	}
427 
428 	foreach_block (block, &ir->block_list) {
429 		foreach_instr_safe (instr, &block->instr_list) {
430 			foreach_src (reg, instr) {
431 				struct ir3_instruction *src = reg->instr;
432 
433 				if (!src)
434 					continue;
435 
436 				if (is_atomic(src->opc) && (src->flags & IR3_INSTR_G)) {
437 					reg->instr = get_atomic_dest_mov(src);
438 					progress = true;
439 				}
440 			}
441 		}
442 	}
443 
444 	/* we also need to fixup shader outputs: */
445 	foreach_output_n (out, n, ir) {
446 		if (is_atomic(out->opc) && (out->flags & IR3_INSTR_G)) {
447 			ir->outputs[n] = get_atomic_dest_mov(out);
448 			progress = true;
449 		}
450 	}
451 
452 	return progress;
453 }
454