1 /*
2 Copyright (C) Intel Corp. 2006. All Rights Reserved.
3 Intel funded Tungsten Graphics (http://www.tungstengraphics.com) to
4 develop this 3D driver.
5
6 Permission is hereby granted, free of charge, to any person obtaining
7 a copy of this software and associated documentation files (the
8 "Software"), to deal in the Software without restriction, including
9 without limitation the rights to use, copy, modify, merge, publish,
10 distribute, sublicense, and/or sell copies of the Software, and to
11 permit persons to whom the Software is furnished to do so, subject to
12 the following conditions:
13
14 The above copyright notice and this permission notice (including the
15 next paragraph) shall be included in all copies or substantial
16 portions of the Software.
17
18 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
19 EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
20 MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
21 IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
22 LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
23 OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
24 WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
25
26 **********************************************************************/
27 /*
28 * Authors:
29 * Keith Whitwell <keith@tungstengraphics.com>
30 */
31
32
33 #include "main/macros.h"
34 #include "program/program.h"
35 #include "program/prog_parameter.h"
36 #include "program/prog_print.h"
37 #include "brw_context.h"
38 #include "brw_vs.h"
39
40 /* Return the SrcReg index of the channels that can be immediate float operands
41 * instead of usage of PROGRAM_CONSTANT values through push/pull.
42 */
43 static bool
brw_vs_arg_can_be_immediate(enum prog_opcode opcode,int arg)44 brw_vs_arg_can_be_immediate(enum prog_opcode opcode, int arg)
45 {
46 int opcode_array[] = {
47 [OPCODE_MOV] = 1,
48 [OPCODE_ADD] = 2,
49 [OPCODE_CMP] = 3,
50 [OPCODE_DP2] = 2,
51 [OPCODE_DP3] = 2,
52 [OPCODE_DP4] = 2,
53 [OPCODE_DPH] = 2,
54 [OPCODE_MAX] = 2,
55 [OPCODE_MIN] = 2,
56 [OPCODE_MUL] = 2,
57 [OPCODE_SEQ] = 2,
58 [OPCODE_SGE] = 2,
59 [OPCODE_SGT] = 2,
60 [OPCODE_SLE] = 2,
61 [OPCODE_SLT] = 2,
62 [OPCODE_SNE] = 2,
63 [OPCODE_XPD] = 2,
64 };
65
66 /* These opcodes get broken down in a way that allow two
67 * args to be immediates.
68 */
69 if (opcode == OPCODE_MAD || opcode == OPCODE_LRP) {
70 if (arg == 1 || arg == 2)
71 return true;
72 }
73
74 if (opcode > ARRAY_SIZE(opcode_array))
75 return false;
76
77 return arg == opcode_array[opcode] - 1;
78 }
79
get_tmp(struct brw_vs_compile * c)80 static struct brw_reg get_tmp( struct brw_vs_compile *c )
81 {
82 struct brw_reg tmp = brw_vec8_grf(c->last_tmp, 0);
83
84 if (++c->last_tmp > c->prog_data.total_grf)
85 c->prog_data.total_grf = c->last_tmp;
86
87 return tmp;
88 }
89
release_tmp(struct brw_vs_compile * c,struct brw_reg tmp)90 static void release_tmp( struct brw_vs_compile *c, struct brw_reg tmp )
91 {
92 if (tmp.nr == c->last_tmp-1)
93 c->last_tmp--;
94 }
95
release_tmps(struct brw_vs_compile * c)96 static void release_tmps( struct brw_vs_compile *c )
97 {
98 c->last_tmp = c->first_tmp;
99 }
100
101 static int
get_first_reladdr_output(struct gl_vertex_program * vp)102 get_first_reladdr_output(struct gl_vertex_program *vp)
103 {
104 int i;
105 int first_reladdr_output = VERT_RESULT_MAX;
106
107 for (i = 0; i < vp->Base.NumInstructions; i++) {
108 struct prog_instruction *inst = vp->Base.Instructions + i;
109
110 if (inst->DstReg.File == PROGRAM_OUTPUT &&
111 inst->DstReg.RelAddr &&
112 inst->DstReg.Index < first_reladdr_output)
113 first_reladdr_output = inst->DstReg.Index;
114 }
115
116 return first_reladdr_output;
117 }
118
119 /* Clears the record of which vp_const_buffer elements have been
120 * loaded into our constant buffer registers, for the starts of new
121 * blocks after control flow.
122 */
123 static void
clear_current_const(struct brw_vs_compile * c)124 clear_current_const(struct brw_vs_compile *c)
125 {
126 unsigned int i;
127
128 if (c->vp->use_const_buffer) {
129 for (i = 0; i < 3; i++) {
130 c->current_const[i].index = -1;
131 }
132 }
133 }
134
135 /* The message length for all SEND messages is restricted to [1,15]. This
136 * includes 1 for the header, so anything in slots 14 and above needs to be
137 * placed in a general-purpose register and emitted using a second URB write.
138 */
139 #define MAX_SLOTS_IN_FIRST_URB_WRITE 14
140
141 /**
142 * Determine whether the given vertex output can be written directly to a MRF
143 * or whether it has to be stored in a general-purpose register.
144 */
can_use_direct_mrf(int vert_result,int first_reladdr_output,int slot)145 static inline bool can_use_direct_mrf(int vert_result,
146 int first_reladdr_output, int slot)
147 {
148 if (vert_result == VERT_RESULT_HPOS || vert_result == VERT_RESULT_PSIZ) {
149 /* These never go straight into MRF's. They are placed in the MRF by
150 * epilog code.
151 */
152 return false;
153 }
154 if (first_reladdr_output <= vert_result && vert_result < VERT_RESULT_MAX) {
155 /* Relative addressing might be used to access this vert_result, so it
156 * needs to go into a general-purpose register.
157 */
158 return false;
159 }
160 if (slot >= MAX_SLOTS_IN_FIRST_URB_WRITE) {
161 /* This output won't go out until the second URB write so it must be
162 * stored in a general-purpose register until then.
163 */
164 return false;
165 }
166 return true;
167 }
168
169 /**
170 * Preallocate GRF register before code emit.
171 * Do things as simply as possible. Allocate and populate all regs
172 * ahead of time.
173 */
brw_vs_alloc_regs(struct brw_vs_compile * c)174 static void brw_vs_alloc_regs( struct brw_vs_compile *c )
175 {
176 struct intel_context *intel = &c->func.brw->intel;
177 GLuint i, reg = 0, slot;
178 int attributes_in_vue;
179 int first_reladdr_output;
180 int max_constant;
181 int constant = 0;
182 struct brw_vertex_program *vp = c->vp;
183 const struct gl_program_parameter_list *params = vp->program.Base.Parameters;
184
185 /* Determine whether to use a real constant buffer or use a block
186 * of GRF registers for constants. The later is faster but only
187 * works if everything fits in the GRF.
188 * XXX this heuristic/check may need some fine tuning...
189 */
190 if (c->vp->program.Base.Parameters->NumParameters +
191 c->vp->program.Base.NumTemporaries + 20 > BRW_MAX_GRF)
192 c->vp->use_const_buffer = true;
193 else
194 c->vp->use_const_buffer = false;
195
196 /*printf("use_const_buffer = %d\n", c->vp->use_const_buffer);*/
197
198 /* r0 -- reserved as usual
199 */
200 c->r0 = brw_vec8_grf(reg, 0);
201 reg++;
202
203 /* User clip planes from curbe:
204 */
205 if (c->key.userclip_active) {
206 if (intel->gen >= 6) {
207 for (i = 0; i <= c->key.nr_userclip_plane_consts; i++) {
208 c->userplane[i] = stride(brw_vec4_grf(reg + i / 2,
209 (i % 2) * 4), 0, 4, 1);
210 }
211 reg += ALIGN(c->key.nr_userclip_plane_consts, 2) / 2;
212 } else {
213 for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
214 c->userplane[i] = stride(brw_vec4_grf(reg + (6 + i) / 2,
215 (i % 2) * 4), 0, 4, 1);
216 }
217 reg += (ALIGN(6 + c->key.nr_userclip_plane_consts, 4) / 4) * 2;
218 }
219
220 }
221
222 /* Assign some (probably all) of the vertex program constants to
223 * the push constant buffer/CURBE.
224 *
225 * There's an obvious limit to the numer of push constants equal to
226 * the number of register available, and that number is smaller
227 * than the minimum maximum number of vertex program parameters, so
228 * support for pull constants is required if we overflow.
229 * Additionally, on gen6 the number of push constants is even
230 * lower.
231 *
232 * When there's relative addressing, we don't know what range of
233 * Mesa IR registers can be accessed. And generally, when relative
234 * addressing is used we also have too many constants to load them
235 * all as push constants. So, we'll just support relative
236 * addressing out of the pull constant buffers, and try to load as
237 * many statically-accessed constants into the push constant buffer
238 * as we can.
239 */
240 if (intel->gen >= 6) {
241 /* We can only load 32 regs of push constants. */
242 max_constant = 32 * 2 - c->key.nr_userclip_plane_consts;
243 } else {
244 max_constant = BRW_MAX_GRF - 20 - c->vp->program.Base.NumTemporaries;
245 }
246
247 /* constant_map maps from ParameterValues[] index to index in the
248 * push constant buffer, or -1 if it's only in the pull constant
249 * buffer.
250 */
251 memset(c->constant_map, -1, c->vp->program.Base.Parameters->NumParameters);
252 for (i = 0;
253 i < c->vp->program.Base.NumInstructions && constant < max_constant;
254 i++) {
255 struct prog_instruction *inst = &c->vp->program.Base.Instructions[i];
256 int arg;
257
258 for (arg = 0; arg < 3 && constant < max_constant; arg++) {
259 if (inst->SrcReg[arg].File != PROGRAM_STATE_VAR &&
260 inst->SrcReg[arg].File != PROGRAM_CONSTANT &&
261 inst->SrcReg[arg].File != PROGRAM_UNIFORM &&
262 inst->SrcReg[arg].File != PROGRAM_ENV_PARAM &&
263 inst->SrcReg[arg].File != PROGRAM_LOCAL_PARAM) {
264 continue;
265 }
266
267 if (inst->SrcReg[arg].RelAddr) {
268 c->vp->use_const_buffer = true;
269 continue;
270 }
271
272 if (c->constant_map[inst->SrcReg[arg].Index] == -1) {
273 c->constant_map[inst->SrcReg[arg].Index] = constant++;
274 }
275 }
276 }
277
278 /* If we ran out of push constant space, then we'll also upload all
279 * constants through the pull constant buffer so that they can be
280 * accessed no matter what. For relative addressing (the common
281 * case) we need them all in place anyway.
282 */
283 if (constant == max_constant)
284 c->vp->use_const_buffer = true;
285
286 /* Set up the references to the pull parameters if present. This backend
287 * uses a 1:1 mapping from Mesa IR's index to location in the pull constant
288 * buffer, while the new VS backend allocates values to the pull buffer on
289 * demand.
290 */
291 if (c->vp->use_const_buffer) {
292 for (i = 0; i < params->NumParameters * 4; i++) {
293 c->prog_data.pull_param[i] = ¶ms->ParameterValues[i / 4][i % 4].f;
294 }
295 c->prog_data.nr_pull_params = i;
296 }
297
298 for (i = 0; i < constant; i++) {
299 c->regs[PROGRAM_STATE_VAR][i] = stride(brw_vec4_grf(reg + i / 2,
300 (i % 2) * 4),
301 0, 4, 1);
302 }
303 reg += (constant + 1) / 2;
304 c->prog_data.curb_read_length = reg - 1;
305 c->prog_data.nr_params = constant * 4;
306 /* XXX 0 causes a bug elsewhere... */
307 if (intel->gen < 6 && c->prog_data.nr_params == 0)
308 c->prog_data.nr_params = 4;
309
310 /* Allocate input regs:
311 */
312 c->nr_inputs = 0;
313 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
314 if (c->prog_data.inputs_read & BITFIELD64_BIT(i)) {
315 c->nr_inputs++;
316 c->regs[PROGRAM_INPUT][i] = brw_vec8_grf(reg, 0);
317 reg++;
318 }
319 }
320 /* If there are no inputs, we'll still be reading one attribute's worth
321 * because it's required -- see urb_read_length setting.
322 */
323 if (c->nr_inputs == 0)
324 reg++;
325
326 /* Allocate outputs. The non-position outputs go straight into message regs.
327 */
328 c->first_output = reg;
329
330 first_reladdr_output = get_first_reladdr_output(&c->vp->program);
331
332 for (slot = 0; slot < c->prog_data.vue_map.num_slots; slot++) {
333 int vert_result = c->prog_data.vue_map.slot_to_vert_result[slot];
334 assert(vert_result < Elements(c->regs[PROGRAM_OUTPUT]));
335 if (can_use_direct_mrf(vert_result, first_reladdr_output, slot)) {
336 c->regs[PROGRAM_OUTPUT][vert_result] = brw_message_reg(slot + 1);
337 } else {
338 c->regs[PROGRAM_OUTPUT][vert_result] = brw_vec8_grf(reg, 0);
339 reg++;
340 }
341 }
342
343 /* Allocate program temporaries:
344 */
345 for (i = 0; i < c->vp->program.Base.NumTemporaries; i++) {
346 c->regs[PROGRAM_TEMPORARY][i] = brw_vec8_grf(reg, 0);
347 reg++;
348 }
349
350 /* Address reg(s). Don't try to use the internal address reg until
351 * deref time.
352 */
353 for (i = 0; i < c->vp->program.Base.NumAddressRegs; i++) {
354 c->regs[PROGRAM_ADDRESS][i] = brw_reg(BRW_GENERAL_REGISTER_FILE,
355 reg,
356 0,
357 BRW_REGISTER_TYPE_D,
358 BRW_VERTICAL_STRIDE_8,
359 BRW_WIDTH_8,
360 BRW_HORIZONTAL_STRIDE_1,
361 BRW_SWIZZLE_XXXX,
362 WRITEMASK_X);
363 reg++;
364 }
365
366 if (c->vp->use_const_buffer) {
367 for (i = 0; i < 3; i++) {
368 c->current_const[i].reg = brw_vec8_grf(reg, 0);
369 reg++;
370 }
371 clear_current_const(c);
372 }
373
374 for (i = 0; i < 128; i++) {
375 if (c->output_regs[i].used_in_src) {
376 c->output_regs[i].reg = brw_vec8_grf(reg, 0);
377 reg++;
378 }
379 }
380
381 /* Some opcodes need an internal temporary:
382 */
383 c->first_tmp = reg;
384 c->last_tmp = reg; /* for allocation purposes */
385
386 /* Each input reg holds data from two vertices. The
387 * urb_read_length is the number of registers read from *each*
388 * vertex urb, so is half the amount:
389 */
390 c->prog_data.urb_read_length = (c->nr_inputs + 1) / 2;
391 /* Setting this field to 0 leads to undefined behavior according to the
392 * the VS_STATE docs. Our VUEs will always have at least one attribute
393 * sitting in them, even if it's padding.
394 */
395 if (c->prog_data.urb_read_length == 0)
396 c->prog_data.urb_read_length = 1;
397
398 /* The VS VUEs are shared by VF (outputting our inputs) and VS, so size
399 * them to fit the biggest thing they need to.
400 */
401 attributes_in_vue = MAX2(c->prog_data.vue_map.num_slots, c->nr_inputs);
402
403 if (intel->gen == 6) {
404 /* Each attribute is 32 bytes (2 vec4s), so dividing by 8 gives us the
405 * number of 128-byte (1024-bit) units.
406 */
407 c->prog_data.urb_entry_size = ALIGN(attributes_in_vue, 8) / 8;
408 } else {
409 /* Each attribute is 16 bytes (1 vec4), so dividing by 4 gives us the
410 * number of 64-byte (512-bit) units.
411 */
412 c->prog_data.urb_entry_size = ALIGN(attributes_in_vue, 4) / 4;
413 }
414
415 c->prog_data.total_grf = reg;
416
417 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
418 printf("%s NumAddrRegs %d\n", __FUNCTION__, c->vp->program.Base.NumAddressRegs);
419 printf("%s NumTemps %d\n", __FUNCTION__, c->vp->program.Base.NumTemporaries);
420 printf("%s reg = %d\n", __FUNCTION__, reg);
421 }
422 }
423
424
425 /**
426 * If an instruction uses a temp reg both as a src and the dest, we
427 * sometimes need to allocate an intermediate temporary.
428 */
unalias1(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,void (* func)(struct brw_vs_compile *,struct brw_reg,struct brw_reg))429 static void unalias1( struct brw_vs_compile *c,
430 struct brw_reg dst,
431 struct brw_reg arg0,
432 void (*func)( struct brw_vs_compile *,
433 struct brw_reg,
434 struct brw_reg ))
435 {
436 if (dst.file == arg0.file && dst.nr == arg0.nr) {
437 struct brw_compile *p = &c->func;
438 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
439 func(c, tmp, arg0);
440 brw_MOV(p, dst, tmp);
441 release_tmp(c, tmp);
442 }
443 else {
444 func(c, dst, arg0);
445 }
446 }
447
448 /**
449 * \sa unalias2
450 * Checkes if 2-operand instruction needs an intermediate temporary.
451 */
unalias2(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,void (* func)(struct brw_vs_compile *,struct brw_reg,struct brw_reg,struct brw_reg))452 static void unalias2( struct brw_vs_compile *c,
453 struct brw_reg dst,
454 struct brw_reg arg0,
455 struct brw_reg arg1,
456 void (*func)( struct brw_vs_compile *,
457 struct brw_reg,
458 struct brw_reg,
459 struct brw_reg ))
460 {
461 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
462 (dst.file == arg1.file && dst.nr == arg1.nr)) {
463 struct brw_compile *p = &c->func;
464 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
465 func(c, tmp, arg0, arg1);
466 brw_MOV(p, dst, tmp);
467 release_tmp(c, tmp);
468 }
469 else {
470 func(c, dst, arg0, arg1);
471 }
472 }
473
474 /**
475 * \sa unalias2
476 * Checkes if 3-operand instruction needs an intermediate temporary.
477 */
unalias3(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,struct brw_reg arg2,void (* func)(struct brw_vs_compile *,struct brw_reg,struct brw_reg,struct brw_reg,struct brw_reg))478 static void unalias3( struct brw_vs_compile *c,
479 struct brw_reg dst,
480 struct brw_reg arg0,
481 struct brw_reg arg1,
482 struct brw_reg arg2,
483 void (*func)( struct brw_vs_compile *,
484 struct brw_reg,
485 struct brw_reg,
486 struct brw_reg,
487 struct brw_reg ))
488 {
489 if ((dst.file == arg0.file && dst.nr == arg0.nr) ||
490 (dst.file == arg1.file && dst.nr == arg1.nr) ||
491 (dst.file == arg2.file && dst.nr == arg2.nr)) {
492 struct brw_compile *p = &c->func;
493 struct brw_reg tmp = brw_writemask(get_tmp(c), dst.dw1.bits.writemask);
494 func(c, tmp, arg0, arg1, arg2);
495 brw_MOV(p, dst, tmp);
496 release_tmp(c, tmp);
497 }
498 else {
499 func(c, dst, arg0, arg1, arg2);
500 }
501 }
502
emit_sop(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,GLuint cond)503 static void emit_sop( struct brw_vs_compile *c,
504 struct brw_reg dst,
505 struct brw_reg arg0,
506 struct brw_reg arg1,
507 GLuint cond)
508 {
509 struct brw_compile *p = &c->func;
510
511 brw_MOV(p, dst, brw_imm_f(0.0f));
512 brw_CMP(p, brw_null_reg(), cond, arg0, arg1);
513 brw_MOV(p, dst, brw_imm_f(1.0f));
514 brw_set_predicate_control_flag_value(p, 0xff);
515 }
516
emit_seq(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)517 static void emit_seq( struct brw_vs_compile *c,
518 struct brw_reg dst,
519 struct brw_reg arg0,
520 struct brw_reg arg1 )
521 {
522 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_EQ);
523 }
524
emit_sne(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)525 static void emit_sne( struct brw_vs_compile *c,
526 struct brw_reg dst,
527 struct brw_reg arg0,
528 struct brw_reg arg1 )
529 {
530 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_NEQ);
531 }
emit_slt(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)532 static void emit_slt( struct brw_vs_compile *c,
533 struct brw_reg dst,
534 struct brw_reg arg0,
535 struct brw_reg arg1 )
536 {
537 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_L);
538 }
539
emit_sle(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)540 static void emit_sle( struct brw_vs_compile *c,
541 struct brw_reg dst,
542 struct brw_reg arg0,
543 struct brw_reg arg1 )
544 {
545 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_LE);
546 }
547
emit_sgt(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)548 static void emit_sgt( struct brw_vs_compile *c,
549 struct brw_reg dst,
550 struct brw_reg arg0,
551 struct brw_reg arg1 )
552 {
553 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_G);
554 }
555
emit_sge(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)556 static void emit_sge( struct brw_vs_compile *c,
557 struct brw_reg dst,
558 struct brw_reg arg0,
559 struct brw_reg arg1 )
560 {
561 emit_sop(c, dst, arg0, arg1, BRW_CONDITIONAL_GE);
562 }
563
emit_cmp(struct brw_compile * p,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,struct brw_reg arg2)564 static void emit_cmp( struct brw_compile *p,
565 struct brw_reg dst,
566 struct brw_reg arg0,
567 struct brw_reg arg1,
568 struct brw_reg arg2 )
569 {
570 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
571 brw_SEL(p, dst, arg1, arg2);
572 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
573 }
574
emit_sign(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0)575 static void emit_sign(struct brw_vs_compile *c,
576 struct brw_reg dst,
577 struct brw_reg arg0)
578 {
579 struct brw_compile *p = &c->func;
580
581 brw_MOV(p, dst, brw_imm_f(0));
582
583 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, brw_imm_f(0));
584 brw_MOV(p, dst, brw_imm_f(-1.0));
585 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
586
587 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, arg0, brw_imm_f(0));
588 brw_MOV(p, dst, brw_imm_f(1.0));
589 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
590 }
591
emit_max(struct brw_compile * p,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)592 static void emit_max( struct brw_compile *p,
593 struct brw_reg dst,
594 struct brw_reg arg0,
595 struct brw_reg arg1 )
596 {
597 struct intel_context *intel = &p->brw->intel;
598
599 if (intel->gen >= 6) {
600 brw_set_conditionalmod(p, BRW_CONDITIONAL_GE);
601 brw_SEL(p, dst, arg0, arg1);
602 brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
603 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
604 } else {
605 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_GE, arg0, arg1);
606 brw_SEL(p, dst, arg0, arg1);
607 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
608 }
609 }
610
emit_min(struct brw_compile * p,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)611 static void emit_min( struct brw_compile *p,
612 struct brw_reg dst,
613 struct brw_reg arg0,
614 struct brw_reg arg1 )
615 {
616 struct intel_context *intel = &p->brw->intel;
617
618 if (intel->gen >= 6) {
619 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
620 brw_SEL(p, dst, arg0, arg1);
621 brw_set_conditionalmod(p, BRW_CONDITIONAL_NONE);
622 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
623 } else {
624 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_L, arg0, arg1);
625 brw_SEL(p, dst, arg0, arg1);
626 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
627 }
628 }
629
emit_arl(struct brw_compile * p,struct brw_reg dst,struct brw_reg src)630 static void emit_arl(struct brw_compile *p,
631 struct brw_reg dst,
632 struct brw_reg src)
633 {
634 struct intel_context *intel = &p->brw->intel;
635
636 if (intel->gen >= 6) {
637 struct brw_reg dst_f = retype(dst, BRW_REGISTER_TYPE_F);
638
639 brw_RNDD(p, dst_f, src);
640 brw_MOV(p, dst, dst_f);
641 } else {
642 brw_RNDD(p, dst, src);
643 }
644 }
645
emit_math1_gen4(struct brw_vs_compile * c,GLuint function,struct brw_reg dst,struct brw_reg arg0,GLuint precision)646 static void emit_math1_gen4(struct brw_vs_compile *c,
647 GLuint function,
648 struct brw_reg dst,
649 struct brw_reg arg0,
650 GLuint precision)
651 {
652 /* There are various odd behaviours with SEND on the simulator. In
653 * addition there are documented issues with the fact that the GEN4
654 * processor doesn't do dependency control properly on SEND
655 * results. So, on balance, this kludge to get around failures
656 * with writemasked math results looks like it might be necessary
657 * whether that turns out to be a simulator bug or not:
658 */
659 struct brw_compile *p = &c->func;
660 struct brw_reg tmp = dst;
661 bool need_tmp = false;
662
663 if (dst.file != BRW_GENERAL_REGISTER_FILE ||
664 dst.dw1.bits.writemask != 0xf)
665 need_tmp = true;
666
667 if (need_tmp)
668 tmp = get_tmp(c);
669
670 brw_math(p,
671 tmp,
672 function,
673 2,
674 arg0,
675 BRW_MATH_DATA_SCALAR,
676 precision);
677
678 if (need_tmp) {
679 brw_MOV(p, dst, tmp);
680 release_tmp(c, tmp);
681 }
682 }
683
684 static void
emit_math1_gen6(struct brw_vs_compile * c,GLuint function,struct brw_reg dst,struct brw_reg arg0,GLuint precision)685 emit_math1_gen6(struct brw_vs_compile *c,
686 GLuint function,
687 struct brw_reg dst,
688 struct brw_reg arg0,
689 GLuint precision)
690 {
691 struct brw_compile *p = &c->func;
692 struct brw_reg tmp_src, tmp_dst;
693
694 /* Something is strange on gen6 math in 16-wide mode, though the
695 * docs say it's supposed to work. Punt to using align1 mode,
696 * which doesn't do writemasking and swizzles.
697 */
698 tmp_src = get_tmp(c);
699 tmp_dst = get_tmp(c);
700
701 brw_MOV(p, tmp_src, arg0);
702
703 brw_set_access_mode(p, BRW_ALIGN_1);
704 brw_math(p,
705 tmp_dst,
706 function,
707 2,
708 tmp_src,
709 BRW_MATH_DATA_SCALAR,
710 precision);
711 brw_set_access_mode(p, BRW_ALIGN_16);
712
713 brw_MOV(p, dst, tmp_dst);
714
715 release_tmp(c, tmp_src);
716 release_tmp(c, tmp_dst);
717 }
718
719 static void
emit_math1(struct brw_vs_compile * c,GLuint function,struct brw_reg dst,struct brw_reg arg0,GLuint precision)720 emit_math1(struct brw_vs_compile *c,
721 GLuint function,
722 struct brw_reg dst,
723 struct brw_reg arg0,
724 GLuint precision)
725 {
726 struct brw_compile *p = &c->func;
727 struct intel_context *intel = &p->brw->intel;
728
729 if (intel->gen >= 6)
730 emit_math1_gen6(c, function, dst, arg0, precision);
731 else
732 emit_math1_gen4(c, function, dst, arg0, precision);
733 }
734
emit_math2_gen4(struct brw_vs_compile * c,GLuint function,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,GLuint precision)735 static void emit_math2_gen4( struct brw_vs_compile *c,
736 GLuint function,
737 struct brw_reg dst,
738 struct brw_reg arg0,
739 struct brw_reg arg1,
740 GLuint precision)
741 {
742 struct brw_compile *p = &c->func;
743 struct brw_reg tmp = dst;
744 bool need_tmp = false;
745
746 if (dst.file != BRW_GENERAL_REGISTER_FILE ||
747 dst.dw1.bits.writemask != 0xf)
748 need_tmp = true;
749
750 if (need_tmp)
751 tmp = get_tmp(c);
752
753 brw_MOV(p, brw_message_reg(3), arg1);
754
755 brw_math(p,
756 tmp,
757 function,
758 2,
759 arg0,
760 BRW_MATH_DATA_SCALAR,
761 precision);
762
763 if (need_tmp) {
764 brw_MOV(p, dst, tmp);
765 release_tmp(c, tmp);
766 }
767 }
768
emit_math2_gen6(struct brw_vs_compile * c,GLuint function,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,GLuint precision)769 static void emit_math2_gen6( struct brw_vs_compile *c,
770 GLuint function,
771 struct brw_reg dst,
772 struct brw_reg arg0,
773 struct brw_reg arg1,
774 GLuint precision)
775 {
776 struct brw_compile *p = &c->func;
777 struct brw_reg tmp_src0, tmp_src1, tmp_dst;
778
779 tmp_src0 = get_tmp(c);
780 tmp_src1 = get_tmp(c);
781 tmp_dst = get_tmp(c);
782
783 brw_MOV(p, tmp_src0, arg0);
784 brw_MOV(p, tmp_src1, arg1);
785
786 brw_set_access_mode(p, BRW_ALIGN_1);
787 brw_math2(p,
788 tmp_dst,
789 function,
790 tmp_src0,
791 tmp_src1);
792 brw_set_access_mode(p, BRW_ALIGN_16);
793
794 brw_MOV(p, dst, tmp_dst);
795
796 release_tmp(c, tmp_src0);
797 release_tmp(c, tmp_src1);
798 release_tmp(c, tmp_dst);
799 }
800
emit_math2(struct brw_vs_compile * c,GLuint function,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,GLuint precision)801 static void emit_math2( struct brw_vs_compile *c,
802 GLuint function,
803 struct brw_reg dst,
804 struct brw_reg arg0,
805 struct brw_reg arg1,
806 GLuint precision)
807 {
808 struct brw_compile *p = &c->func;
809 struct intel_context *intel = &p->brw->intel;
810
811 if (intel->gen >= 6)
812 emit_math2_gen6(c, function, dst, arg0, arg1, precision);
813 else
814 emit_math2_gen4(c, function, dst, arg0, arg1, precision);
815 }
816
emit_exp_noalias(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0)817 static void emit_exp_noalias( struct brw_vs_compile *c,
818 struct brw_reg dst,
819 struct brw_reg arg0 )
820 {
821 struct brw_compile *p = &c->func;
822
823
824 if (dst.dw1.bits.writemask & WRITEMASK_X) {
825 struct brw_reg tmp = get_tmp(c);
826 struct brw_reg tmp_d = retype(tmp, BRW_REGISTER_TYPE_D);
827
828 /* tmp_d = floor(arg0.x) */
829 brw_RNDD(p, tmp_d, brw_swizzle1(arg0, 0));
830
831 /* result[0] = 2.0 ^ tmp */
832
833 /* Adjust exponent for floating point:
834 * exp += 127
835 */
836 brw_ADD(p, brw_writemask(tmp_d, WRITEMASK_X), tmp_d, brw_imm_d(127));
837
838 /* Install exponent and sign.
839 * Excess drops off the edge:
840 */
841 brw_SHL(p, brw_writemask(retype(dst, BRW_REGISTER_TYPE_D), WRITEMASK_X),
842 tmp_d, brw_imm_d(23));
843
844 release_tmp(c, tmp);
845 }
846
847 if (dst.dw1.bits.writemask & WRITEMASK_Y) {
848 /* result[1] = arg0.x - floor(arg0.x) */
849 brw_FRC(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0, 0));
850 }
851
852 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
853 /* As with the LOG instruction, we might be better off just
854 * doing a taylor expansion here, seeing as we have to do all
855 * the prep work.
856 *
857 * If mathbox partial precision is too low, consider also:
858 * result[3] = result[0] * EXP(result[1])
859 */
860 emit_math1(c,
861 BRW_MATH_FUNCTION_EXP,
862 brw_writemask(dst, WRITEMASK_Z),
863 brw_swizzle1(arg0, 0),
864 BRW_MATH_PRECISION_FULL);
865 }
866
867 if (dst.dw1.bits.writemask & WRITEMASK_W) {
868 /* result[3] = 1.0; */
869 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), brw_imm_f(1));
870 }
871 }
872
873
emit_log_noalias(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0)874 static void emit_log_noalias( struct brw_vs_compile *c,
875 struct brw_reg dst,
876 struct brw_reg arg0 )
877 {
878 struct brw_compile *p = &c->func;
879 struct brw_reg tmp = dst;
880 struct brw_reg tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
881 struct brw_reg arg0_ud = retype(arg0, BRW_REGISTER_TYPE_UD);
882 bool need_tmp = (dst.dw1.bits.writemask != 0xf ||
883 dst.file != BRW_GENERAL_REGISTER_FILE);
884
885 if (need_tmp) {
886 tmp = get_tmp(c);
887 tmp_ud = retype(tmp, BRW_REGISTER_TYPE_UD);
888 }
889
890 /* Perform mant = frexpf(fabsf(x), &exp), adjust exp and mnt
891 * according to spec:
892 *
893 * These almost look likey they could be joined up, but not really
894 * practical:
895 *
896 * result[0].f = (x.i & ((1<<31)-1) >> 23) - 127
897 * result[1].i = (x.i & ((1<<23)-1) + (127<<23)
898 */
899 if (dst.dw1.bits.writemask & WRITEMASK_XZ) {
900 brw_AND(p,
901 brw_writemask(tmp_ud, WRITEMASK_X),
902 brw_swizzle1(arg0_ud, 0),
903 brw_imm_ud((1U<<31)-1));
904
905 brw_SHR(p,
906 brw_writemask(tmp_ud, WRITEMASK_X),
907 tmp_ud,
908 brw_imm_ud(23));
909
910 brw_ADD(p,
911 brw_writemask(tmp, WRITEMASK_X),
912 retype(tmp_ud, BRW_REGISTER_TYPE_D), /* does it matter? */
913 brw_imm_d(-127));
914 }
915
916 if (dst.dw1.bits.writemask & WRITEMASK_YZ) {
917 brw_AND(p,
918 brw_writemask(tmp_ud, WRITEMASK_Y),
919 brw_swizzle1(arg0_ud, 0),
920 brw_imm_ud((1<<23)-1));
921
922 brw_OR(p,
923 brw_writemask(tmp_ud, WRITEMASK_Y),
924 tmp_ud,
925 brw_imm_ud(127<<23));
926 }
927
928 if (dst.dw1.bits.writemask & WRITEMASK_Z) {
929 /* result[2] = result[0] + LOG2(result[1]); */
930
931 /* Why bother? The above is just a hint how to do this with a
932 * taylor series. Maybe we *should* use a taylor series as by
933 * the time all the above has been done it's almost certainly
934 * quicker than calling the mathbox, even with low precision.
935 *
936 * Options are:
937 * - result[0] + mathbox.LOG2(result[1])
938 * - mathbox.LOG2(arg0.x)
939 * - result[0] + inline_taylor_approx(result[1])
940 */
941 emit_math1(c,
942 BRW_MATH_FUNCTION_LOG,
943 brw_writemask(tmp, WRITEMASK_Z),
944 brw_swizzle1(tmp, 1),
945 BRW_MATH_PRECISION_FULL);
946
947 brw_ADD(p,
948 brw_writemask(tmp, WRITEMASK_Z),
949 brw_swizzle1(tmp, 2),
950 brw_swizzle1(tmp, 0));
951 }
952
953 if (dst.dw1.bits.writemask & WRITEMASK_W) {
954 /* result[3] = 1.0; */
955 brw_MOV(p, brw_writemask(tmp, WRITEMASK_W), brw_imm_f(1));
956 }
957
958 if (need_tmp) {
959 brw_MOV(p, dst, tmp);
960 release_tmp(c, tmp);
961 }
962 }
963
964
965 /* Need to unalias - consider swizzles: r0 = DST r0.xxxx r1
966 */
emit_dst_noalias(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1)967 static void emit_dst_noalias( struct brw_vs_compile *c,
968 struct brw_reg dst,
969 struct brw_reg arg0,
970 struct brw_reg arg1)
971 {
972 struct brw_compile *p = &c->func;
973
974 /* There must be a better way to do this:
975 */
976 if (dst.dw1.bits.writemask & WRITEMASK_X)
977 brw_MOV(p, brw_writemask(dst, WRITEMASK_X), brw_imm_f(1.0));
978 if (dst.dw1.bits.writemask & WRITEMASK_Y)
979 brw_MUL(p, brw_writemask(dst, WRITEMASK_Y), arg0, arg1);
980 if (dst.dw1.bits.writemask & WRITEMASK_Z)
981 brw_MOV(p, brw_writemask(dst, WRITEMASK_Z), arg0);
982 if (dst.dw1.bits.writemask & WRITEMASK_W)
983 brw_MOV(p, brw_writemask(dst, WRITEMASK_W), arg1);
984 }
985
986
emit_xpd(struct brw_compile * p,struct brw_reg dst,struct brw_reg t,struct brw_reg u)987 static void emit_xpd( struct brw_compile *p,
988 struct brw_reg dst,
989 struct brw_reg t,
990 struct brw_reg u)
991 {
992 brw_MUL(p, brw_null_reg(), brw_swizzle(t, 1,2,0,3), brw_swizzle(u,2,0,1,3));
993 brw_MAC(p, dst, negate(brw_swizzle(t, 2,0,1,3)), brw_swizzle(u,1,2,0,3));
994 }
995
996
emit_lit_noalias(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0)997 static void emit_lit_noalias( struct brw_vs_compile *c,
998 struct brw_reg dst,
999 struct brw_reg arg0 )
1000 {
1001 struct brw_compile *p = &c->func;
1002 struct brw_reg tmp = dst;
1003 bool need_tmp = (dst.file != BRW_GENERAL_REGISTER_FILE);
1004
1005 if (need_tmp)
1006 tmp = get_tmp(c);
1007
1008 brw_MOV(p, brw_writemask(dst, WRITEMASK_YZ), brw_imm_f(0));
1009 brw_MOV(p, brw_writemask(dst, WRITEMASK_XW), brw_imm_f(1));
1010
1011 /* Need to use BRW_EXECUTE_8 and also do an 8-wide compare in order
1012 * to get all channels active inside the IF. In the clipping code
1013 * we run with NoMask, so it's not an option and we can use
1014 * BRW_EXECUTE_1 for all comparisions.
1015 */
1016 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,0), brw_imm_f(0));
1017 brw_IF(p, BRW_EXECUTE_8);
1018 {
1019 brw_MOV(p, brw_writemask(dst, WRITEMASK_Y), brw_swizzle1(arg0,0));
1020
1021 brw_CMP(p, brw_null_reg(), BRW_CONDITIONAL_G, brw_swizzle1(arg0,1), brw_imm_f(0));
1022 brw_MOV(p, brw_writemask(tmp, WRITEMASK_Z), brw_swizzle1(arg0,1));
1023 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1024
1025 emit_math2(c,
1026 BRW_MATH_FUNCTION_POW,
1027 brw_writemask(dst, WRITEMASK_Z),
1028 brw_swizzle1(tmp, 2),
1029 brw_swizzle1(arg0, 3),
1030 BRW_MATH_PRECISION_PARTIAL);
1031 }
1032 brw_ENDIF(p);
1033
1034 release_tmp(c, tmp);
1035 }
1036
emit_lrp_noalias(struct brw_vs_compile * c,struct brw_reg dst,struct brw_reg arg0,struct brw_reg arg1,struct brw_reg arg2)1037 static void emit_lrp_noalias(struct brw_vs_compile *c,
1038 struct brw_reg dst,
1039 struct brw_reg arg0,
1040 struct brw_reg arg1,
1041 struct brw_reg arg2)
1042 {
1043 struct brw_compile *p = &c->func;
1044
1045 brw_ADD(p, dst, negate(arg0), brw_imm_f(1.0));
1046 brw_MUL(p, brw_null_reg(), dst, arg2);
1047 brw_MAC(p, dst, arg0, arg1);
1048 }
1049
1050 static struct brw_reg
get_constant(struct brw_vs_compile * c,const struct prog_instruction * inst,GLuint argIndex)1051 get_constant(struct brw_vs_compile *c,
1052 const struct prog_instruction *inst,
1053 GLuint argIndex)
1054 {
1055 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1056 struct brw_compile *p = &c->func;
1057 struct brw_reg const_reg = c->current_const[argIndex].reg;
1058
1059 assert(argIndex < 3);
1060
1061 if (c->current_const[argIndex].index != src->Index) {
1062 /* Keep track of the last constant loaded in this slot, for reuse. */
1063 c->current_const[argIndex].index = src->Index;
1064
1065 #if 0
1066 printf(" fetch const[%d] for arg %d into reg %d\n",
1067 src->Index, argIndex, c->current_const[argIndex].reg.nr);
1068 #endif
1069 /* need to fetch the constant now */
1070 brw_dp_READ_4_vs(p,
1071 const_reg, /* writeback dest */
1072 16 * src->Index, /* byte offset */
1073 SURF_INDEX_VERT_CONST_BUFFER /* binding table index */
1074 );
1075 }
1076
1077 /* replicate lower four floats into upper half (to get XYZWXYZW) */
1078 const_reg = stride(const_reg, 0, 4, 1);
1079 const_reg.subnr = 0;
1080
1081 return const_reg;
1082 }
1083
1084 static struct brw_reg
get_reladdr_constant(struct brw_vs_compile * c,const struct prog_instruction * inst,GLuint argIndex)1085 get_reladdr_constant(struct brw_vs_compile *c,
1086 const struct prog_instruction *inst,
1087 GLuint argIndex)
1088 {
1089 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1090 struct brw_compile *p = &c->func;
1091 struct brw_context *brw = p->brw;
1092 struct intel_context *intel = &brw->intel;
1093 struct brw_reg const_reg = c->current_const[argIndex].reg;
1094 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1095 uint32_t offset;
1096
1097 assert(argIndex < 3);
1098
1099 /* Can't reuse a reladdr constant load. */
1100 c->current_const[argIndex].index = -1;
1101
1102 #if 0
1103 printf(" fetch const[a0.x+%d] for arg %d into reg %d\n",
1104 src->Index, argIndex, c->current_const[argIndex].reg.nr);
1105 #endif
1106
1107 if (intel->gen >= 6) {
1108 offset = src->Index;
1109 } else {
1110 struct brw_reg byte_addr_reg = retype(get_tmp(c), BRW_REGISTER_TYPE_D);
1111 brw_MUL(p, byte_addr_reg, addr_reg, brw_imm_d(16));
1112 addr_reg = byte_addr_reg;
1113 offset = 16 * src->Index;
1114 }
1115
1116 /* fetch the first vec4 */
1117 brw_dp_READ_4_vs_relative(p,
1118 const_reg,
1119 addr_reg,
1120 offset,
1121 SURF_INDEX_VERT_CONST_BUFFER);
1122
1123 return const_reg;
1124 }
1125
1126
1127
1128 /* TODO: relative addressing!
1129 */
get_reg(struct brw_vs_compile * c,gl_register_file file,GLuint index)1130 static struct brw_reg get_reg( struct brw_vs_compile *c,
1131 gl_register_file file,
1132 GLuint index )
1133 {
1134 switch (file) {
1135 case PROGRAM_TEMPORARY:
1136 case PROGRAM_INPUT:
1137 case PROGRAM_OUTPUT:
1138 assert(c->regs[file][index].nr != 0);
1139 return c->regs[file][index];
1140 case PROGRAM_STATE_VAR:
1141 case PROGRAM_CONSTANT:
1142 case PROGRAM_UNIFORM:
1143 assert(c->regs[PROGRAM_STATE_VAR][index].nr != 0);
1144 return c->regs[PROGRAM_STATE_VAR][index];
1145 case PROGRAM_ADDRESS:
1146 assert(index == 0);
1147 return c->regs[file][index];
1148
1149 case PROGRAM_UNDEFINED: /* undef values */
1150 return brw_null_reg();
1151
1152 case PROGRAM_LOCAL_PARAM:
1153 case PROGRAM_ENV_PARAM:
1154 case PROGRAM_WRITE_ONLY:
1155 default:
1156 assert(0);
1157 return brw_null_reg();
1158 }
1159 }
1160
1161
1162 /**
1163 * Indirect addressing: get reg[[arg] + offset].
1164 */
deref(struct brw_vs_compile * c,struct brw_reg arg,GLint offset,GLuint reg_size)1165 static struct brw_reg deref( struct brw_vs_compile *c,
1166 struct brw_reg arg,
1167 GLint offset,
1168 GLuint reg_size )
1169 {
1170 struct brw_compile *p = &c->func;
1171 struct brw_reg tmp = get_tmp(c);
1172 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1173 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1174 GLuint byte_offset = arg.nr * 32 + arg.subnr + offset * reg_size;
1175 struct brw_reg indirect = brw_vec4_indirect(0,0);
1176 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1177
1178 /* Set the vertical stride on the register access so that the first
1179 * 4 components come from a0.0 and the second 4 from a0.1.
1180 */
1181 indirect.vstride = BRW_VERTICAL_STRIDE_ONE_DIMENSIONAL;
1182
1183 {
1184 brw_push_insn_state(p);
1185 brw_set_access_mode(p, BRW_ALIGN_1);
1186
1187 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1188 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1189
1190 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1191 brw_ADD(p, brw_address_reg(1), acc, brw_imm_uw(byte_offset));
1192
1193 brw_MOV(p, tmp, indirect);
1194
1195 brw_pop_insn_state(p);
1196 }
1197
1198 /* NOTE: tmp not released */
1199 return tmp;
1200 }
1201
1202 static void
move_to_reladdr_dst(struct brw_vs_compile * c,const struct prog_instruction * inst,struct brw_reg val)1203 move_to_reladdr_dst(struct brw_vs_compile *c,
1204 const struct prog_instruction *inst,
1205 struct brw_reg val)
1206 {
1207 struct brw_compile *p = &c->func;
1208 int reg_size = 32;
1209 struct brw_reg addr_reg = c->regs[PROGRAM_ADDRESS][0];
1210 struct brw_reg vp_address = retype(vec1(addr_reg), BRW_REGISTER_TYPE_D);
1211 struct brw_reg base = c->regs[inst->DstReg.File][inst->DstReg.Index];
1212 GLuint byte_offset = base.nr * 32 + base.subnr;
1213 struct brw_reg indirect = brw_vec4_indirect(0,0);
1214 struct brw_reg acc = retype(vec1(get_tmp(c)), BRW_REGISTER_TYPE_UW);
1215
1216 /* Because destination register indirect addressing can only use
1217 * one index, we'll write each vertex's vec4 value separately.
1218 */
1219 val.width = BRW_WIDTH_4;
1220 val.vstride = BRW_VERTICAL_STRIDE_4;
1221
1222 brw_push_insn_state(p);
1223 brw_set_access_mode(p, BRW_ALIGN_1);
1224
1225 brw_MUL(p, acc, vp_address, brw_imm_uw(reg_size));
1226 brw_ADD(p, brw_address_reg(0), acc, brw_imm_uw(byte_offset));
1227 brw_MOV(p, indirect, val);
1228
1229 brw_MUL(p, acc, suboffset(vp_address, 4), brw_imm_uw(reg_size));
1230 brw_ADD(p, brw_address_reg(0), acc,
1231 brw_imm_uw(byte_offset + reg_size / 2));
1232 brw_MOV(p, indirect, suboffset(val, 4));
1233
1234 brw_pop_insn_state(p);
1235 }
1236
1237 /**
1238 * Get brw reg corresponding to the instruction's [argIndex] src reg.
1239 * TODO: relative addressing!
1240 */
1241 static struct brw_reg
get_src_reg(struct brw_vs_compile * c,const struct prog_instruction * inst,GLuint argIndex)1242 get_src_reg( struct brw_vs_compile *c,
1243 const struct prog_instruction *inst,
1244 GLuint argIndex )
1245 {
1246 const GLuint file = inst->SrcReg[argIndex].File;
1247 const GLint index = inst->SrcReg[argIndex].Index;
1248 const bool relAddr = inst->SrcReg[argIndex].RelAddr;
1249
1250 if (brw_vs_arg_can_be_immediate(inst->Opcode, argIndex)) {
1251 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1252
1253 if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ZERO,
1254 SWIZZLE_ZERO,
1255 SWIZZLE_ZERO,
1256 SWIZZLE_ZERO)) {
1257 return brw_imm_f(0.0f);
1258 } else if (src->Swizzle == MAKE_SWIZZLE4(SWIZZLE_ONE,
1259 SWIZZLE_ONE,
1260 SWIZZLE_ONE,
1261 SWIZZLE_ONE)) {
1262 if (src->Negate)
1263 return brw_imm_f(-1.0F);
1264 else
1265 return brw_imm_f(1.0F);
1266 } else if (src->File == PROGRAM_CONSTANT) {
1267 const struct gl_program_parameter_list *params;
1268 float f;
1269 int component = -1;
1270
1271 switch (src->Swizzle) {
1272 case SWIZZLE_XXXX:
1273 component = 0;
1274 break;
1275 case SWIZZLE_YYYY:
1276 component = 1;
1277 break;
1278 case SWIZZLE_ZZZZ:
1279 component = 2;
1280 break;
1281 case SWIZZLE_WWWW:
1282 component = 3;
1283 break;
1284 }
1285
1286 if (component >= 0) {
1287 params = c->vp->program.Base.Parameters;
1288 f = params->ParameterValues[src->Index][component].f;
1289
1290 if (src->Abs)
1291 f = fabs(f);
1292 if (src->Negate)
1293 f = -f;
1294 return brw_imm_f(f);
1295 }
1296 }
1297 }
1298
1299 switch (file) {
1300 case PROGRAM_TEMPORARY:
1301 case PROGRAM_INPUT:
1302 case PROGRAM_OUTPUT:
1303 if (relAddr) {
1304 return deref(c, c->regs[file][0], index, 32);
1305 }
1306 else {
1307 assert(c->regs[file][index].nr != 0);
1308 return c->regs[file][index];
1309 }
1310
1311 case PROGRAM_STATE_VAR:
1312 case PROGRAM_CONSTANT:
1313 case PROGRAM_UNIFORM:
1314 case PROGRAM_ENV_PARAM:
1315 case PROGRAM_LOCAL_PARAM:
1316 if (!relAddr && c->constant_map[index] != -1) {
1317 /* Take from the push constant buffer if possible. */
1318 assert(c->regs[PROGRAM_STATE_VAR][c->constant_map[index]].nr != 0);
1319 return c->regs[PROGRAM_STATE_VAR][c->constant_map[index]];
1320 } else {
1321 /* Must be in the pull constant buffer then .*/
1322 assert(c->vp->use_const_buffer);
1323 if (relAddr)
1324 return get_reladdr_constant(c, inst, argIndex);
1325 else
1326 return get_constant(c, inst, argIndex);
1327 }
1328 case PROGRAM_ADDRESS:
1329 assert(index == 0);
1330 return c->regs[file][index];
1331
1332 case PROGRAM_UNDEFINED:
1333 /* this is a normal case since we loop over all three src args */
1334 return brw_null_reg();
1335
1336 case PROGRAM_WRITE_ONLY:
1337 default:
1338 assert(0);
1339 return brw_null_reg();
1340 }
1341 }
1342
1343 /**
1344 * Return the brw reg for the given instruction's src argument.
1345 * Will return mangled results for SWZ op. The emit_swz() function
1346 * ignores this result and recalculates taking extended swizzles into
1347 * account.
1348 */
get_arg(struct brw_vs_compile * c,const struct prog_instruction * inst,GLuint argIndex)1349 static struct brw_reg get_arg( struct brw_vs_compile *c,
1350 const struct prog_instruction *inst,
1351 GLuint argIndex )
1352 {
1353 const struct prog_src_register *src = &inst->SrcReg[argIndex];
1354 struct brw_reg reg;
1355
1356 if (src->File == PROGRAM_UNDEFINED)
1357 return brw_null_reg();
1358
1359 reg = get_src_reg(c, inst, argIndex);
1360
1361 /* Convert 3-bit swizzle to 2-bit.
1362 */
1363 if (reg.file != BRW_IMMEDIATE_VALUE) {
1364 reg.dw1.bits.swizzle = BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1365 GET_SWZ(src->Swizzle, 1),
1366 GET_SWZ(src->Swizzle, 2),
1367 GET_SWZ(src->Swizzle, 3));
1368
1369 /* Note this is ok for non-swizzle ARB_vp instructions */
1370 reg.negate = src->Negate ? 1 : 0;
1371 }
1372
1373 return reg;
1374 }
1375
1376
1377 /**
1378 * Get brw register for the given program dest register.
1379 */
get_dst(struct brw_vs_compile * c,struct prog_dst_register dst)1380 static struct brw_reg get_dst( struct brw_vs_compile *c,
1381 struct prog_dst_register dst )
1382 {
1383 struct brw_reg reg;
1384
1385 switch (dst.File) {
1386 case PROGRAM_TEMPORARY:
1387 case PROGRAM_OUTPUT:
1388 /* register-indirect addressing is only 1x1, not VxH, for
1389 * destination regs. So, for RelAddr we'll return a temporary
1390 * for the dest and do a move of the result to the RelAddr
1391 * register after the instruction emit.
1392 */
1393 if (dst.RelAddr) {
1394 reg = get_tmp(c);
1395 } else {
1396 assert(c->regs[dst.File][dst.Index].nr != 0);
1397 reg = c->regs[dst.File][dst.Index];
1398 }
1399 break;
1400 case PROGRAM_ADDRESS:
1401 assert(dst.Index == 0);
1402 reg = c->regs[dst.File][dst.Index];
1403 break;
1404 case PROGRAM_UNDEFINED:
1405 /* we may hit this for OPCODE_END, OPCODE_KIL, etc */
1406 reg = brw_null_reg();
1407 break;
1408 default:
1409 assert(0);
1410 reg = brw_null_reg();
1411 }
1412
1413 assert(reg.type != BRW_IMMEDIATE_VALUE);
1414 reg.dw1.bits.writemask = dst.WriteMask;
1415
1416 return reg;
1417 }
1418
1419
emit_swz(struct brw_vs_compile * c,struct brw_reg dst,const struct prog_instruction * inst)1420 static void emit_swz( struct brw_vs_compile *c,
1421 struct brw_reg dst,
1422 const struct prog_instruction *inst)
1423 {
1424 const GLuint argIndex = 0;
1425 const struct prog_src_register src = inst->SrcReg[argIndex];
1426 struct brw_compile *p = &c->func;
1427 GLuint zeros_mask = 0;
1428 GLuint ones_mask = 0;
1429 GLuint src_mask = 0;
1430 GLubyte src_swz[4];
1431 bool need_tmp = (src.Negate &&
1432 dst.file != BRW_GENERAL_REGISTER_FILE);
1433 struct brw_reg tmp = dst;
1434 GLuint i;
1435
1436 if (need_tmp)
1437 tmp = get_tmp(c);
1438
1439 for (i = 0; i < 4; i++) {
1440 if (dst.dw1.bits.writemask & (1<<i)) {
1441 GLubyte s = GET_SWZ(src.Swizzle, i);
1442 switch (s) {
1443 case SWIZZLE_X:
1444 case SWIZZLE_Y:
1445 case SWIZZLE_Z:
1446 case SWIZZLE_W:
1447 src_mask |= 1<<i;
1448 src_swz[i] = s;
1449 break;
1450 case SWIZZLE_ZERO:
1451 zeros_mask |= 1<<i;
1452 break;
1453 case SWIZZLE_ONE:
1454 ones_mask |= 1<<i;
1455 break;
1456 }
1457 }
1458 }
1459
1460 /* Do src first, in case dst aliases src:
1461 */
1462 if (src_mask) {
1463 struct brw_reg arg0;
1464
1465 arg0 = get_src_reg(c, inst, argIndex);
1466
1467 arg0 = brw_swizzle(arg0,
1468 src_swz[0], src_swz[1],
1469 src_swz[2], src_swz[3]);
1470
1471 brw_MOV(p, brw_writemask(tmp, src_mask), arg0);
1472 }
1473
1474 if (zeros_mask)
1475 brw_MOV(p, brw_writemask(tmp, zeros_mask), brw_imm_f(0));
1476
1477 if (ones_mask)
1478 brw_MOV(p, brw_writemask(tmp, ones_mask), brw_imm_f(1));
1479
1480 if (src.Negate)
1481 brw_MOV(p, brw_writemask(tmp, src.Negate), negate(tmp));
1482
1483 if (need_tmp) {
1484 brw_MOV(p, dst, tmp);
1485 release_tmp(c, tmp);
1486 }
1487 }
1488
1489 static int
align_interleaved_urb_mlen(struct brw_context * brw,int mlen)1490 align_interleaved_urb_mlen(struct brw_context *brw, int mlen)
1491 {
1492 struct intel_context *intel = &brw->intel;
1493
1494 if (intel->gen >= 6) {
1495 /* URB data written (does not include the message header reg) must
1496 * be a multiple of 256 bits, or 2 VS registers. See vol5c.5,
1497 * section 5.4.3.2.2: URB_INTERLEAVED.
1498 *
1499 * URB entries are allocated on a multiple of 1024 bits, so an
1500 * extra 128 bits written here to make the end align to 256 is
1501 * no problem.
1502 */
1503 if ((mlen % 2) != 1)
1504 mlen++;
1505 }
1506
1507 return mlen;
1508 }
1509
1510 /**
1511 * Post-vertex-program processing. Send the results to the URB.
1512 */
emit_vertex_write(struct brw_vs_compile * c)1513 static void emit_vertex_write( struct brw_vs_compile *c)
1514 {
1515 struct brw_compile *p = &c->func;
1516 struct brw_context *brw = p->brw;
1517 struct intel_context *intel = &brw->intel;
1518 struct brw_reg pos = c->regs[PROGRAM_OUTPUT][VERT_RESULT_HPOS];
1519 struct brw_reg ndc;
1520 int eot;
1521 GLuint len_vertex_header = 2;
1522 int i;
1523 int msg_len;
1524 int slot;
1525
1526 if (c->key.copy_edgeflag) {
1527 brw_MOV(p,
1528 get_reg(c, PROGRAM_OUTPUT, VERT_RESULT_EDGE),
1529 get_reg(c, PROGRAM_INPUT, VERT_ATTRIB_EDGEFLAG));
1530 }
1531
1532 if (intel->gen < 6) {
1533 /* Build ndc coords */
1534 ndc = get_tmp(c);
1535 /* ndc = 1.0 / pos.w */
1536 emit_math1(c, BRW_MATH_FUNCTION_INV, ndc, brw_swizzle1(pos, 3), BRW_MATH_PRECISION_FULL);
1537 /* ndc.xyz = pos * ndc */
1538 brw_MUL(p, brw_writemask(ndc, WRITEMASK_XYZ), pos, ndc);
1539 }
1540
1541 /* Update the header for point size, user clipping flags, and -ve rhw
1542 * workaround.
1543 */
1544 if (intel->gen >= 6) {
1545 struct brw_reg m1 = brw_message_reg(1);
1546
1547 /* On gen6, m1 has each value in a separate dword, so we never
1548 * need to mess with a temporary for computing the m1 value.
1549 */
1550 brw_MOV(p, retype(m1, BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1551 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1552 brw_MOV(p, brw_writemask(m1, WRITEMASK_W),
1553 brw_swizzle1(c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ], 0));
1554 }
1555
1556 /* Set the user clip distances in dword 8-15. (m3-4)*/
1557 if (c->key.userclip_active) {
1558 for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
1559 struct brw_reg m;
1560 if (i < 4)
1561 m = brw_message_reg(3);
1562 else
1563 m = brw_message_reg(4);
1564
1565 brw_DP4(p, brw_writemask(m, (1 << (i & 3))),pos, c->userplane[i]);
1566 }
1567 }
1568 } else if ((c->prog_data.outputs_written &
1569 BITFIELD64_BIT(VERT_RESULT_PSIZ)) ||
1570 c->key.userclip_active || brw->has_negative_rhw_bug) {
1571 struct brw_reg header1 = retype(get_tmp(c), BRW_REGISTER_TYPE_UD);
1572 GLuint i;
1573
1574 brw_MOV(p, header1, brw_imm_ud(0));
1575
1576 brw_set_access_mode(p, BRW_ALIGN_16);
1577
1578 if (c->prog_data.outputs_written & BITFIELD64_BIT(VERT_RESULT_PSIZ)) {
1579 struct brw_reg psiz = c->regs[PROGRAM_OUTPUT][VERT_RESULT_PSIZ];
1580 brw_MUL(p, brw_writemask(header1, WRITEMASK_W),
1581 brw_swizzle1(psiz, 0), brw_imm_f(1<<11));
1582 brw_AND(p, brw_writemask(header1, WRITEMASK_W),
1583 header1, brw_imm_ud(0x7ff<<8));
1584 }
1585
1586 for (i = 0; i < c->key.nr_userclip_plane_consts; i++) {
1587 brw_set_conditionalmod(p, BRW_CONDITIONAL_L);
1588 brw_DP4(p, brw_null_reg(), pos, c->userplane[i]);
1589 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<i));
1590 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1591 }
1592
1593 /* i965 clipping workaround:
1594 * 1) Test for -ve rhw
1595 * 2) If set,
1596 * set ndc = (0,0,0,0)
1597 * set ucp[6] = 1
1598 *
1599 * Later, clipping will detect ucp[6] and ensure the primitive is
1600 * clipped against all fixed planes.
1601 */
1602 if (brw->has_negative_rhw_bug) {
1603 brw_CMP(p,
1604 vec8(brw_null_reg()),
1605 BRW_CONDITIONAL_L,
1606 brw_swizzle1(ndc, 3),
1607 brw_imm_f(0));
1608
1609 brw_OR(p, brw_writemask(header1, WRITEMASK_W), header1, brw_imm_ud(1<<6));
1610 brw_MOV(p, ndc, brw_imm_f(0));
1611 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
1612 }
1613
1614 brw_set_access_mode(p, BRW_ALIGN_1); /* why? */
1615 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), header1);
1616 brw_set_access_mode(p, BRW_ALIGN_16);
1617
1618 release_tmp(c, header1);
1619 }
1620 else {
1621 brw_MOV(p, retype(brw_message_reg(1), BRW_REGISTER_TYPE_UD), brw_imm_ud(0));
1622 }
1623
1624 /* Emit the (interleaved) headers for the two vertices - an 8-reg
1625 * of zeros followed by two sets of NDC coordinates:
1626 */
1627 brw_set_access_mode(p, BRW_ALIGN_1);
1628 brw_set_acc_write_control(p, 0);
1629
1630 /* The VUE layout is documented in Volume 2a. */
1631 if (intel->gen >= 6) {
1632 /* There are 8 or 16 DWs (D0-D15) in VUE header on Sandybridge:
1633 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1634 * dword 4-7 (m2) is the 4D space position
1635 * dword 8-15 (m3,m4) of the vertex header is the user clip distance if
1636 * enabled.
1637 * m3 or 5 is the first vertex element data we fill, which is
1638 * the vertex position.
1639 */
1640 brw_MOV(p, brw_message_reg(2), pos);
1641 len_vertex_header = 1;
1642 if (c->key.userclip_active)
1643 len_vertex_header += 2;
1644 } else if (intel->gen == 5) {
1645 /* There are 20 DWs (D0-D19) in VUE header on Ironlake:
1646 * dword 0-3 (m1) of the header is indices, point width, clip flags.
1647 * dword 4-7 (m2) is the ndc position (set above)
1648 * dword 8-11 (m3) of the vertex header is the 4D space position
1649 * dword 12-19 (m4,m5) of the vertex header is the user clip distance.
1650 * m6 is a pad so that the vertex element data is aligned
1651 * m7 is the first vertex data we fill, which is the vertex position.
1652 */
1653 brw_MOV(p, brw_message_reg(2), ndc);
1654 brw_MOV(p, brw_message_reg(3), pos);
1655 brw_MOV(p, brw_message_reg(7), pos);
1656 len_vertex_header = 6;
1657 } else {
1658 /* There are 8 dwords in VUE header pre-Ironlake:
1659 * dword 0-3 (m1) is indices, point width, clip flags.
1660 * dword 4-7 (m2) is ndc position (set above)
1661 *
1662 * dword 8-11 (m3) is the first vertex data, which we always have be the
1663 * vertex position.
1664 */
1665 brw_MOV(p, brw_message_reg(2), ndc);
1666 brw_MOV(p, brw_message_reg(3), pos);
1667 len_vertex_header = 2;
1668 }
1669
1670 /* Move variable-addressed, non-overflow outputs to their MRFs. */
1671 for (slot = len_vertex_header; slot < c->prog_data.vue_map.num_slots; ++slot) {
1672 if (slot >= MAX_SLOTS_IN_FIRST_URB_WRITE)
1673 break;
1674
1675 int mrf = slot + 1;
1676 int vert_result = c->prog_data.vue_map.slot_to_vert_result[slot];
1677 if (c->regs[PROGRAM_OUTPUT][vert_result].file ==
1678 BRW_GENERAL_REGISTER_FILE) {
1679 brw_MOV(p, brw_message_reg(mrf),
1680 c->regs[PROGRAM_OUTPUT][vert_result]);
1681 }
1682 }
1683
1684 eot = (slot >= c->prog_data.vue_map.num_slots);
1685
1686 /* Message header, plus the (first part of the) VUE. */
1687 msg_len = 1 + slot;
1688 msg_len = align_interleaved_urb_mlen(brw, msg_len);
1689 /* Any outputs beyond BRW_MAX_MRF should be in the second URB write */
1690 assert (msg_len <= BRW_MAX_MRF - 1);
1691
1692 brw_urb_WRITE(p,
1693 brw_null_reg(), /* dest */
1694 0, /* starting mrf reg nr */
1695 c->r0, /* src */
1696 0, /* allocate */
1697 1, /* used */
1698 msg_len,
1699 0, /* response len */
1700 eot, /* eot */
1701 eot, /* writes complete */
1702 0, /* urb destination offset */
1703 BRW_URB_SWIZZLE_INTERLEAVE);
1704
1705 if (slot < c->prog_data.vue_map.num_slots) {
1706 /* Not all of the vertex outputs/results fit into the MRF.
1707 * Move the overflowed attributes from the GRF to the MRF and
1708 * issue another brw_urb_WRITE().
1709 */
1710 GLuint mrf = 1;
1711 for (; slot < c->prog_data.vue_map.num_slots; ++slot) {
1712 int vert_result = c->prog_data.vue_map.slot_to_vert_result[slot];
1713 /* move from GRF to MRF */
1714 brw_MOV(p, brw_message_reg(mrf),
1715 c->regs[PROGRAM_OUTPUT][vert_result]);
1716 mrf++;
1717 }
1718
1719 brw_urb_WRITE(p,
1720 brw_null_reg(), /* dest */
1721 0, /* starting mrf reg nr */
1722 c->r0, /* src */
1723 0, /* allocate */
1724 1, /* used */
1725 align_interleaved_urb_mlen(brw, mrf),
1726 0, /* response len */
1727 1, /* eot */
1728 1, /* writes complete */
1729 MAX_SLOTS_IN_FIRST_URB_WRITE / 2, /* urb destination offset */
1730 BRW_URB_SWIZZLE_INTERLEAVE);
1731 }
1732 }
1733
1734 static bool
accumulator_contains(struct brw_vs_compile * c,struct brw_reg val)1735 accumulator_contains(struct brw_vs_compile *c, struct brw_reg val)
1736 {
1737 struct brw_compile *p = &c->func;
1738 struct brw_instruction *prev_insn = &p->store[p->nr_insn - 1];
1739
1740 if (p->nr_insn == 0)
1741 return false;
1742
1743 if (val.address_mode != BRW_ADDRESS_DIRECT)
1744 return false;
1745
1746 if (val.negate || val.abs || val.dw1.bits.swizzle != BRW_SWIZZLE_XYZW)
1747 return false;
1748
1749 switch (prev_insn->header.opcode) {
1750 case BRW_OPCODE_MOV:
1751 case BRW_OPCODE_MAC:
1752 case BRW_OPCODE_MUL:
1753 if (prev_insn->header.access_mode == BRW_ALIGN_16 &&
1754 prev_insn->header.execution_size == val.width &&
1755 prev_insn->bits1.da1.dest_reg_file == val.file &&
1756 prev_insn->bits1.da1.dest_reg_type == val.type &&
1757 prev_insn->bits1.da1.dest_address_mode == val.address_mode &&
1758 prev_insn->bits1.da1.dest_reg_nr == val.nr &&
1759 prev_insn->bits1.da16.dest_subreg_nr == val.subnr / 16 &&
1760 prev_insn->bits1.da16.dest_writemask == 0xf)
1761 return true;
1762 else
1763 return false;
1764 default:
1765 return false;
1766 }
1767 }
1768
1769 static uint32_t
get_predicate(const struct prog_instruction * inst)1770 get_predicate(const struct prog_instruction *inst)
1771 {
1772 if (inst->DstReg.CondMask == COND_TR)
1773 return BRW_PREDICATE_NONE;
1774
1775 /* All of GLSL only produces predicates for COND_NE and one channel per
1776 * vector. Fail badly if someone starts doing something else, as it might
1777 * mean infinite looping or something.
1778 *
1779 * We'd like to support all the condition codes, but our hardware doesn't
1780 * quite match the Mesa IR, which is modeled after the NV extensions. For
1781 * those, the instruction may update the condition codes or not, then any
1782 * later instruction may use one of those condition codes. For gen4, the
1783 * instruction may update the flags register based on one of the condition
1784 * codes output by the instruction, and then further instructions may
1785 * predicate on that. We can probably support this, but it won't
1786 * necessarily be easy.
1787 */
1788 assert(inst->DstReg.CondMask == COND_NE);
1789
1790 switch (inst->DstReg.CondSwizzle) {
1791 case SWIZZLE_XXXX:
1792 return BRW_PREDICATE_ALIGN16_REPLICATE_X;
1793 case SWIZZLE_YYYY:
1794 return BRW_PREDICATE_ALIGN16_REPLICATE_Y;
1795 case SWIZZLE_ZZZZ:
1796 return BRW_PREDICATE_ALIGN16_REPLICATE_Z;
1797 case SWIZZLE_WWWW:
1798 return BRW_PREDICATE_ALIGN16_REPLICATE_W;
1799 default:
1800 _mesa_problem(NULL, "Unexpected predicate: 0x%08x\n",
1801 inst->DstReg.CondMask);
1802 return BRW_PREDICATE_NORMAL;
1803 }
1804 }
1805
1806 static void
brw_vs_rescale_gl_fixed(struct brw_vs_compile * c)1807 brw_vs_rescale_gl_fixed(struct brw_vs_compile *c)
1808 {
1809 struct brw_compile *p = &c->func;
1810 int i;
1811
1812 for (i = 0; i < VERT_ATTRIB_MAX; i++) {
1813 if (!(c->prog_data.inputs_read & BITFIELD64_BIT(i)))
1814 continue;
1815
1816 if (c->key.gl_fixed_input_size[i] != 0) {
1817 struct brw_reg reg = c->regs[PROGRAM_INPUT][i];
1818
1819 brw_MUL(p,
1820 brw_writemask(reg, (1 << c->key.gl_fixed_input_size[i]) - 1),
1821 reg, brw_imm_f(1.0 / 65536.0));
1822 }
1823 }
1824 }
1825
1826 /* Emit the vertex program instructions here.
1827 */
brw_old_vs_emit(struct brw_vs_compile * c)1828 void brw_old_vs_emit(struct brw_vs_compile *c )
1829 {
1830 #define MAX_IF_DEPTH 32
1831 #define MAX_LOOP_DEPTH 32
1832 struct brw_compile *p = &c->func;
1833 struct brw_context *brw = p->brw;
1834 struct intel_context *intel = &brw->intel;
1835 const GLuint nr_insns = c->vp->program.Base.NumInstructions;
1836 GLuint insn;
1837 GLuint index;
1838 GLuint file;
1839
1840 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
1841 printf("vs-mesa:\n");
1842 _mesa_fprint_program_opt(stdout, &c->vp->program.Base, PROG_PRINT_DEBUG,
1843 true);
1844 printf("\n");
1845 }
1846
1847 brw_set_compression_control(p, BRW_COMPRESSION_NONE);
1848 brw_set_access_mode(p, BRW_ALIGN_16);
1849
1850 brw_set_acc_write_control(p, 1);
1851
1852 for (insn = 0; insn < nr_insns; insn++) {
1853 GLuint i;
1854 struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1855
1856 /* Message registers can't be read, so copy the output into GRF
1857 * register if they are used in source registers
1858 */
1859 for (i = 0; i < 3; i++) {
1860 struct prog_src_register *src = &inst->SrcReg[i];
1861 GLuint index = src->Index;
1862 GLuint file = src->File;
1863 if (file == PROGRAM_OUTPUT && index != VERT_RESULT_HPOS)
1864 c->output_regs[index].used_in_src = true;
1865 }
1866 }
1867
1868 /* Static register allocation
1869 */
1870 brw_vs_alloc_regs(c);
1871
1872 brw_vs_rescale_gl_fixed(c);
1873
1874 for (insn = 0; insn < nr_insns; insn++) {
1875
1876 const struct prog_instruction *inst = &c->vp->program.Base.Instructions[insn];
1877 struct brw_reg args[3], dst;
1878 GLuint i;
1879
1880 #if 0
1881 printf("%d: ", insn);
1882 _mesa_print_instruction(inst);
1883 #endif
1884
1885 /* Get argument regs. SWZ is special and does this itself.
1886 */
1887 if (inst->Opcode != OPCODE_SWZ)
1888 for (i = 0; i < 3; i++) {
1889 const struct prog_src_register *src = &inst->SrcReg[i];
1890 index = src->Index;
1891 file = src->File;
1892 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src) {
1893 /* Can't just make get_arg "do the right thing" here because
1894 * other callers of get_arg and get_src_reg don't expect any
1895 * special behavior for the c->output_regs[index].used_in_src
1896 * case.
1897 */
1898 args[i] = c->output_regs[index].reg;
1899 args[i].dw1.bits.swizzle =
1900 BRW_SWIZZLE4(GET_SWZ(src->Swizzle, 0),
1901 GET_SWZ(src->Swizzle, 1),
1902 GET_SWZ(src->Swizzle, 2),
1903 GET_SWZ(src->Swizzle, 3));
1904
1905 /* Note this is ok for non-swizzle ARB_vp instructions */
1906 args[i].negate = src->Negate ? 1 : 0;
1907 } else
1908 args[i] = get_arg(c, inst, i);
1909 }
1910
1911 /* Get dest regs. Note that it is possible for a reg to be both
1912 * dst and arg, given the static allocation of registers. So
1913 * care needs to be taken emitting multi-operation instructions.
1914 */
1915 index = inst->DstReg.Index;
1916 file = inst->DstReg.File;
1917 if (file == PROGRAM_OUTPUT && c->output_regs[index].used_in_src)
1918 /* Can't just make get_dst "do the right thing" here because other
1919 * callers of get_dst don't expect any special behavior for the
1920 * c->output_regs[index].used_in_src case.
1921 */
1922 dst = brw_writemask(c->output_regs[index].reg, inst->DstReg.WriteMask);
1923 else
1924 dst = get_dst(c, inst->DstReg);
1925
1926 if (inst->SaturateMode != SATURATE_OFF) {
1927 _mesa_problem(NULL, "Unsupported saturate %d in vertex shader",
1928 inst->SaturateMode);
1929 }
1930
1931 switch (inst->Opcode) {
1932 case OPCODE_ABS:
1933 args[0].negate = false;
1934 brw_MOV(p, dst, brw_abs(args[0]));
1935 break;
1936 case OPCODE_ADD:
1937 brw_ADD(p, dst, args[0], args[1]);
1938 break;
1939 case OPCODE_COS:
1940 emit_math1(c, BRW_MATH_FUNCTION_COS, dst, args[0], BRW_MATH_PRECISION_FULL);
1941 break;
1942 case OPCODE_DP2:
1943 brw_DP2(p, dst, args[0], args[1]);
1944 break;
1945 case OPCODE_DP3:
1946 brw_DP3(p, dst, args[0], args[1]);
1947 break;
1948 case OPCODE_DP4:
1949 brw_DP4(p, dst, args[0], args[1]);
1950 break;
1951 case OPCODE_DPH:
1952 brw_DPH(p, dst, args[0], args[1]);
1953 break;
1954 case OPCODE_DST:
1955 unalias2(c, dst, args[0], args[1], emit_dst_noalias);
1956 break;
1957 case OPCODE_EXP:
1958 unalias1(c, dst, args[0], emit_exp_noalias);
1959 break;
1960 case OPCODE_EX2:
1961 emit_math1(c, BRW_MATH_FUNCTION_EXP, dst, args[0], BRW_MATH_PRECISION_FULL);
1962 break;
1963 case OPCODE_ARL:
1964 emit_arl(p, dst, args[0]);
1965 break;
1966 case OPCODE_FLR:
1967 brw_RNDD(p, dst, args[0]);
1968 break;
1969 case OPCODE_FRC:
1970 brw_FRC(p, dst, args[0]);
1971 break;
1972 case OPCODE_LOG:
1973 unalias1(c, dst, args[0], emit_log_noalias);
1974 break;
1975 case OPCODE_LG2:
1976 emit_math1(c, BRW_MATH_FUNCTION_LOG, dst, args[0], BRW_MATH_PRECISION_FULL);
1977 break;
1978 case OPCODE_LIT:
1979 unalias1(c, dst, args[0], emit_lit_noalias);
1980 break;
1981 case OPCODE_LRP:
1982 unalias3(c, dst, args[0], args[1], args[2], emit_lrp_noalias);
1983 break;
1984 case OPCODE_MAD:
1985 if (!accumulator_contains(c, args[2]))
1986 brw_MOV(p, brw_acc_reg(), args[2]);
1987 brw_MAC(p, dst, args[0], args[1]);
1988 break;
1989 case OPCODE_CMP:
1990 emit_cmp(p, dst, args[0], args[1], args[2]);
1991 break;
1992 case OPCODE_MAX:
1993 emit_max(p, dst, args[0], args[1]);
1994 break;
1995 case OPCODE_MIN:
1996 emit_min(p, dst, args[0], args[1]);
1997 break;
1998 case OPCODE_MOV:
1999 brw_MOV(p, dst, args[0]);
2000 break;
2001 case OPCODE_MUL:
2002 brw_MUL(p, dst, args[0], args[1]);
2003 break;
2004 case OPCODE_POW:
2005 emit_math2(c, BRW_MATH_FUNCTION_POW, dst, args[0], args[1], BRW_MATH_PRECISION_FULL);
2006 break;
2007 case OPCODE_RCP:
2008 emit_math1(c, BRW_MATH_FUNCTION_INV, dst, args[0], BRW_MATH_PRECISION_FULL);
2009 break;
2010 case OPCODE_RSQ:
2011 emit_math1(c, BRW_MATH_FUNCTION_RSQ, dst, brw_abs(args[0]), BRW_MATH_PRECISION_FULL);
2012 break;
2013
2014 case OPCODE_SEQ:
2015 unalias2(c, dst, args[0], args[1], emit_seq);
2016 break;
2017 case OPCODE_SIN:
2018 emit_math1(c, BRW_MATH_FUNCTION_SIN, dst, args[0], BRW_MATH_PRECISION_FULL);
2019 break;
2020 case OPCODE_SNE:
2021 unalias2(c, dst, args[0], args[1], emit_sne);
2022 break;
2023 case OPCODE_SGE:
2024 unalias2(c, dst, args[0], args[1], emit_sge);
2025 break;
2026 case OPCODE_SGT:
2027 unalias2(c, dst, args[0], args[1], emit_sgt);
2028 break;
2029 case OPCODE_SLT:
2030 unalias2(c, dst, args[0], args[1], emit_slt);
2031 break;
2032 case OPCODE_SLE:
2033 unalias2(c, dst, args[0], args[1], emit_sle);
2034 break;
2035 case OPCODE_SSG:
2036 unalias1(c, dst, args[0], emit_sign);
2037 break;
2038 case OPCODE_SUB:
2039 brw_ADD(p, dst, args[0], negate(args[1]));
2040 break;
2041 case OPCODE_SWZ:
2042 /* The args[0] value can't be used here as it won't have
2043 * correctly encoded the full swizzle:
2044 */
2045 emit_swz(c, dst, inst);
2046 break;
2047 case OPCODE_TRUNC:
2048 /* round toward zero */
2049 brw_RNDZ(p, dst, args[0]);
2050 break;
2051 case OPCODE_XPD:
2052 emit_xpd(p, dst, args[0], args[1]);
2053 break;
2054 case OPCODE_IF: {
2055 struct brw_instruction *if_inst = brw_IF(p, BRW_EXECUTE_8);
2056 /* Note that brw_IF smashes the predicate_control field. */
2057 if_inst->header.predicate_control = get_predicate(inst);
2058 break;
2059 }
2060 case OPCODE_ELSE:
2061 clear_current_const(c);
2062 brw_ELSE(p);
2063 break;
2064 case OPCODE_ENDIF:
2065 clear_current_const(c);
2066 brw_ENDIF(p);
2067 break;
2068 case OPCODE_BGNLOOP:
2069 clear_current_const(c);
2070 brw_DO(p, BRW_EXECUTE_8);
2071 break;
2072 case OPCODE_BRK:
2073 brw_set_predicate_control(p, get_predicate(inst));
2074 brw_BREAK(p);
2075 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2076 break;
2077 case OPCODE_CONT:
2078 brw_set_predicate_control(p, get_predicate(inst));
2079 if (intel->gen >= 6) {
2080 gen6_CONT(p);
2081 } else {
2082 brw_CONT(p);
2083 }
2084 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2085 break;
2086
2087 case OPCODE_ENDLOOP:
2088 clear_current_const(c);
2089 brw_WHILE(p);
2090 break;
2091
2092 case OPCODE_BRA:
2093 brw_set_predicate_control(p, get_predicate(inst));
2094 brw_ADD(p, brw_ip_reg(), brw_ip_reg(), brw_imm_d(1*16));
2095 brw_set_predicate_control(p, BRW_PREDICATE_NONE);
2096 break;
2097 case OPCODE_END:
2098 emit_vertex_write(c);
2099 break;
2100 case OPCODE_PRINT:
2101 /* no-op */
2102 break;
2103 default:
2104 _mesa_problem(NULL, "Unsupported opcode %i (%s) in vertex shader",
2105 inst->Opcode, inst->Opcode < MAX_OPCODE ?
2106 _mesa_opcode_string(inst->Opcode) :
2107 "unknown");
2108 }
2109
2110 /* Set the predication update on the last instruction of the native
2111 * instruction sequence.
2112 *
2113 * This would be problematic if it was set on a math instruction,
2114 * but that shouldn't be the case with the current GLSL compiler.
2115 */
2116 if (inst->CondUpdate) {
2117 struct brw_instruction *hw_insn = &p->store[p->nr_insn - 1];
2118
2119 assert(hw_insn->header.destreg__conditionalmod == 0);
2120 hw_insn->header.destreg__conditionalmod = BRW_CONDITIONAL_NZ;
2121 }
2122
2123 if ((inst->DstReg.File == PROGRAM_OUTPUT)
2124 && (inst->DstReg.Index != VERT_RESULT_HPOS)
2125 && c->output_regs[inst->DstReg.Index].used_in_src) {
2126 brw_MOV(p, get_dst(c, inst->DstReg), dst);
2127 }
2128
2129 /* Result color clamping.
2130 *
2131 * When destination register is an output register and
2132 * it's primary/secondary front/back color, we have to clamp
2133 * the result to [0,1]. This is done by enabling the
2134 * saturation bit for the last instruction.
2135 *
2136 * We don't use brw_set_saturate() as it modifies
2137 * p->current->header.saturate, which affects all the subsequent
2138 * instructions. Instead, we directly modify the header
2139 * of the last (already stored) instruction.
2140 */
2141 if (inst->DstReg.File == PROGRAM_OUTPUT &&
2142 c->key.clamp_vertex_color) {
2143 if ((inst->DstReg.Index == VERT_RESULT_COL0)
2144 || (inst->DstReg.Index == VERT_RESULT_COL1)
2145 || (inst->DstReg.Index == VERT_RESULT_BFC0)
2146 || (inst->DstReg.Index == VERT_RESULT_BFC1)) {
2147 p->store[p->nr_insn-1].header.saturate = 1;
2148 }
2149 }
2150
2151 if (inst->DstReg.RelAddr) {
2152 assert(inst->DstReg.File == PROGRAM_TEMPORARY||
2153 inst->DstReg.File == PROGRAM_OUTPUT);
2154 move_to_reladdr_dst(c, inst, dst);
2155 }
2156
2157 release_tmps(c);
2158 }
2159
2160 brw_set_uip_jip(p);
2161
2162 brw_optimize(p);
2163
2164 if (unlikely(INTEL_DEBUG & DEBUG_VS)) {
2165 int i;
2166
2167 printf("vs-native:\n");
2168 for (i = 0; i < p->nr_insn; i++)
2169 brw_disasm(stdout, &p->store[i], intel->gen);
2170 printf("\n");
2171 }
2172 }
2173