1 /*
2  * Copyright © 2016-2018 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "v3d_compiler.h"
25 
26 /* We don't do any address packing. */
27 #define __gen_user_data void
28 #define __gen_address_type uint32_t
29 #define __gen_address_offset(reloc) (*reloc)
30 #define __gen_emit_reloc(cl, reloc)
31 #include "cle/v3d_packet_v41_pack.h"
32 
33 static void
vir_TMU_WRITE(struct v3d_compile * c,enum v3d_qpu_waddr waddr,struct qreg val)34 vir_TMU_WRITE(struct v3d_compile *c, enum v3d_qpu_waddr waddr, struct qreg val)
35 {
36         vir_MOV_dest(c, vir_reg(QFILE_MAGIC, waddr), val);
37 }
38 
39 static void
vir_WRTMUC(struct v3d_compile * c,enum quniform_contents contents,uint32_t data)40 vir_WRTMUC(struct v3d_compile *c, enum quniform_contents contents, uint32_t data)
41 {
42         struct qinst *inst = vir_NOP(c);
43         inst->qpu.sig.wrtmuc = true;
44         inst->has_implicit_uniform = true;
45         inst->src[0] = vir_uniform(c, contents, data);
46 }
47 
48 void
v3d40_vir_emit_tex(struct v3d_compile * c,nir_tex_instr * instr)49 v3d40_vir_emit_tex(struct v3d_compile *c, nir_tex_instr *instr)
50 {
51         unsigned unit = instr->texture_index;
52 
53         struct V3D41_TMU_CONFIG_PARAMETER_0 p0_unpacked = {
54         };
55 
56         struct V3D41_TMU_CONFIG_PARAMETER_1 p1_unpacked = {
57                 .output_type_32_bit = (c->key->tex[unit].return_size == 32 &&
58                                        !instr->is_shadow),
59 
60                 .unnormalized_coordinates = (instr->sampler_dim ==
61                                              GLSL_SAMPLER_DIM_RECT),
62         };
63 
64         struct V3D41_TMU_CONFIG_PARAMETER_2 p2_unpacked = {
65                 .op = V3D_TMU_OP_REGULAR,
66 
67                 .gather_mode = instr->op == nir_texop_tg4,
68                 .gather_component = instr->component,
69 
70                 .coefficient_mode = instr->op == nir_texop_txd,
71         };
72 
73         int non_array_components = instr->coord_components - instr->is_array;
74         struct qreg s;
75 
76         for (unsigned i = 0; i < instr->num_srcs; i++) {
77                 switch (instr->src[i].src_type) {
78                 case nir_tex_src_coord:
79                         /* S triggers the lookup, so save it for the end. */
80                         s = ntq_get_src(c, instr->src[i].src, 0);
81 
82                         if (non_array_components > 1) {
83                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUT,
84                                               ntq_get_src(c, instr->src[i].src,
85                                                           1));
86                         }
87                         if (non_array_components > 2) {
88                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUR,
89                                               ntq_get_src(c, instr->src[i].src,
90                                                           2));
91                         }
92 
93                         if (instr->is_array) {
94                                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUI,
95                                               ntq_get_src(c, instr->src[i].src,
96                                                           instr->coord_components - 1));
97                         }
98                         break;
99 
100                 case nir_tex_src_bias:
101                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
102                                       ntq_get_src(c, instr->src[i].src, 0));
103                         break;
104 
105                 case nir_tex_src_lod:
106                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUB,
107                                       ntq_get_src(c, instr->src[i].src, 0));
108 
109                         if (instr->op != nir_texop_txf &&
110                             instr->op != nir_texop_tg4) {
111                                 p2_unpacked.disable_autolod = true;
112                         }
113                         break;
114 
115                 case nir_tex_src_comparator:
116                         vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUDREF,
117                                       ntq_get_src(c, instr->src[i].src, 0));
118                         break;
119 
120                 case nir_tex_src_offset: {
121                         nir_const_value *offset =
122                                 nir_src_as_const_value(instr->src[i].src);
123 
124                         p2_unpacked.offset_s = offset->i32[0];
125                         if (instr->coord_components >= 2)
126                                 p2_unpacked.offset_t = offset->i32[1];
127                         if (instr->coord_components >= 3)
128                                 p2_unpacked.offset_r = offset->i32[2];
129                         break;
130                 }
131 
132                 default:
133                         unreachable("unknown texture source");
134                 }
135         }
136 
137         /* Limit the number of channels returned to both how many the NIR
138          * instruction writes and how many the instruction could produce.
139          */
140         uint32_t instr_return_channels = nir_tex_instr_dest_size(instr);
141         if (!p1_unpacked.output_type_32_bit)
142                 instr_return_channels = (instr_return_channels + 1) / 2;
143 
144         p0_unpacked.return_words_of_texture_data =
145                 (1 << MIN2(instr_return_channels,
146                            c->key->tex[unit].return_channels)) - 1;
147 
148         uint32_t p0_packed;
149         V3D41_TMU_CONFIG_PARAMETER_0_pack(NULL,
150                                           (uint8_t *)&p0_packed,
151                                           &p0_unpacked);
152 
153         uint32_t p1_packed;
154         V3D41_TMU_CONFIG_PARAMETER_1_pack(NULL,
155                                           (uint8_t *)&p1_packed,
156                                           &p1_unpacked);
157 
158         uint32_t p2_packed;
159         V3D41_TMU_CONFIG_PARAMETER_2_pack(NULL,
160                                           (uint8_t *)&p2_packed,
161                                           &p2_unpacked);
162 
163         /* Load unit number into the high bits of the texture or sampler
164          * address field, which will be be used by the driver to decide which
165          * texture to put in the actual address field.
166          */
167         p0_packed |= unit << 24;
168         p1_packed |= unit << 24;
169 
170         vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P0, p0_packed);
171         vir_WRTMUC(c, QUNIFORM_TMU_CONFIG_P1, p1_packed);
172         vir_WRTMUC(c, QUNIFORM_CONSTANT, p2_packed);
173 
174         if (instr->op == nir_texop_txf) {
175                 assert(instr->sampler_dim != GLSL_SAMPLER_DIM_CUBE);
176                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSF, s);
177         } else if (instr->sampler_dim == GLSL_SAMPLER_DIM_CUBE) {
178                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUSCM, s);
179         } else {
180                 vir_TMU_WRITE(c, V3D_QPU_WADDR_TMUS, s);
181         }
182 
183         vir_emit_thrsw(c);
184 
185         struct qreg return_values[4];
186         for (int i = 0; i < 4; i++) {
187                 /* Swizzling .zw of an RG texture should give undefined
188                  * results, not crash the compiler.
189                  */
190                 if (p0_unpacked.return_words_of_texture_data & (1 << i))
191                         return_values[i] = vir_LDTMU(c);
192                 else
193                         return_values[i] = c->undef;
194         }
195 
196         for (int i = 0; i < nir_tex_instr_dest_size(instr); i++) {
197                 struct qreg chan;
198 
199                 if (!p1_unpacked.output_type_32_bit) {
200                         STATIC_ASSERT(PIPE_SWIZZLE_X == 0);
201                         chan = return_values[i / 2];
202 
203                         if (nir_alu_type_get_base_type(instr->dest_type) ==
204                             nir_type_float) {
205                                 enum v3d_qpu_input_unpack unpack;
206                                 if (i & 1)
207                                         unpack = V3D_QPU_UNPACK_H;
208                                 else
209                                         unpack = V3D_QPU_UNPACK_L;
210 
211                                 chan = vir_FMOV(c, chan);
212                                 vir_set_unpack(c->defs[chan.index], 0, unpack);
213                         } else {
214                                 /* If we're unpacking the low field, shift it
215                                  * up to the top first.
216                                  */
217                                 if ((i & 1) == 0) {
218                                         chan = vir_SHL(c, chan,
219                                                        vir_uniform_ui(c, 16));
220                                 }
221 
222                                 /* Do proper sign extension to a 32-bit int. */
223                                 if (nir_alu_type_get_base_type(instr->dest_type) ==
224                                     nir_type_int) {
225                                         chan = vir_ASR(c, chan,
226                                                        vir_uniform_ui(c, 16));
227                                 } else {
228                                         chan = vir_SHR(c, chan,
229                                                        vir_uniform_ui(c, 16));
230                                 }
231                         }
232                 } else {
233                         chan = vir_MOV(c, return_values[i]);
234                 }
235                 ntq_store_dest(c, &instr->dest, i, chan);
236         }
237 }
238