1 /*
2  * Copyright © 2015 Broadcom
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "util/format/u_format.h"
25 #include "util/u_surface.h"
26 #include "util/u_blitter.h"
27 #include "compiler/nir/nir_builder.h"
28 #include "vc4_context.h"
29 
30 static struct pipe_surface *
vc4_get_blit_surface(struct pipe_context * pctx,struct pipe_resource * prsc,unsigned level)31 vc4_get_blit_surface(struct pipe_context *pctx,
32                      struct pipe_resource *prsc, unsigned level)
33 {
34         struct pipe_surface tmpl;
35 
36         memset(&tmpl, 0, sizeof(tmpl));
37         tmpl.format = prsc->format;
38         tmpl.u.tex.level = level;
39         tmpl.u.tex.first_layer = 0;
40         tmpl.u.tex.last_layer = 0;
41 
42         return pctx->create_surface(pctx, prsc, &tmpl);
43 }
44 
45 static bool
is_tile_unaligned(unsigned size,unsigned tile_size)46 is_tile_unaligned(unsigned size, unsigned tile_size)
47 {
48         return size & (tile_size - 1);
49 }
50 
51 static bool
vc4_tile_blit(struct pipe_context * pctx,const struct pipe_blit_info * info)52 vc4_tile_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
53 {
54         struct vc4_context *vc4 = vc4_context(pctx);
55         bool msaa = (info->src.resource->nr_samples > 1 ||
56                      info->dst.resource->nr_samples > 1);
57         int tile_width = msaa ? 32 : 64;
58         int tile_height = msaa ? 32 : 64;
59 
60         if (util_format_is_depth_or_stencil(info->dst.resource->format))
61                 return false;
62 
63         if (info->scissor_enable)
64                 return false;
65 
66         if ((info->mask & PIPE_MASK_RGBA) == 0)
67                 return false;
68 
69         if (info->dst.box.x != info->src.box.x ||
70             info->dst.box.y != info->src.box.y ||
71             info->dst.box.width != info->src.box.width ||
72             info->dst.box.height != info->src.box.height) {
73                 return false;
74         }
75 
76         int dst_surface_width = u_minify(info->dst.resource->width0,
77                                          info->dst.level);
78         int dst_surface_height = u_minify(info->dst.resource->height0,
79                                          info->dst.level);
80         if (is_tile_unaligned(info->dst.box.x, tile_width) ||
81             is_tile_unaligned(info->dst.box.y, tile_height) ||
82             (is_tile_unaligned(info->dst.box.width, tile_width) &&
83              info->dst.box.x + info->dst.box.width != dst_surface_width) ||
84             (is_tile_unaligned(info->dst.box.height, tile_height) &&
85              info->dst.box.y + info->dst.box.height != dst_surface_height)) {
86                 return false;
87         }
88 
89         /* VC4_PACKET_LOAD_TILE_BUFFER_GENERAL uses the
90          * VC4_PACKET_TILE_RENDERING_MODE_CONFIG's width (determined by our
91          * destination surface) to determine the stride.  This may be wrong
92          * when reading from texture miplevels > 0, which are stored in
93          * POT-sized areas.  For MSAA, the tile addresses are computed
94          * explicitly by the RCL, but still use the destination width to
95          * determine the stride (which could be fixed by explicitly supplying
96          * it in the ABI).
97          */
98         struct vc4_resource *rsc = vc4_resource(info->src.resource);
99 
100         uint32_t stride;
101 
102         if (info->src.resource->nr_samples > 1)
103                 stride = align(dst_surface_width, 32) * 4 * rsc->cpp;
104         else if (rsc->slices[info->src.level].tiling == VC4_TILING_FORMAT_T)
105                 stride = align(dst_surface_width * rsc->cpp, 128);
106         else
107                 stride = align(dst_surface_width * rsc->cpp, 16);
108 
109         if (stride != rsc->slices[info->src.level].stride)
110                 return false;
111 
112         if (info->dst.resource->format != info->src.resource->format)
113                 return false;
114 
115         if (false) {
116                 fprintf(stderr, "RCL blit from %d,%d to %d,%d (%d,%d)\n",
117                         info->src.box.x,
118                         info->src.box.y,
119                         info->dst.box.x,
120                         info->dst.box.y,
121                         info->dst.box.width,
122                         info->dst.box.height);
123         }
124 
125         struct pipe_surface *dst_surf =
126                 vc4_get_blit_surface(pctx, info->dst.resource, info->dst.level);
127         struct pipe_surface *src_surf =
128                 vc4_get_blit_surface(pctx, info->src.resource, info->src.level);
129 
130         vc4_flush_jobs_reading_resource(vc4, info->src.resource);
131 
132         struct vc4_job *job = vc4_get_job(vc4, dst_surf, NULL);
133         pipe_surface_reference(&job->color_read, src_surf);
134 
135         /* If we're resolving from MSAA to single sample, we still need to run
136          * the engine in MSAA mode for the load.
137          */
138         if (!job->msaa && info->src.resource->nr_samples > 1) {
139                 job->msaa = true;
140                 job->tile_width = 32;
141                 job->tile_height = 32;
142         }
143 
144         job->draw_min_x = info->dst.box.x;
145         job->draw_min_y = info->dst.box.y;
146         job->draw_max_x = info->dst.box.x + info->dst.box.width;
147         job->draw_max_y = info->dst.box.y + info->dst.box.height;
148         job->draw_width = dst_surf->width;
149         job->draw_height = dst_surf->height;
150 
151         job->tile_width = tile_width;
152         job->tile_height = tile_height;
153         job->msaa = msaa;
154         job->needs_flush = true;
155         job->resolve |= PIPE_CLEAR_COLOR;
156 
157         vc4_job_submit(vc4, job);
158 
159         pipe_surface_reference(&dst_surf, NULL);
160         pipe_surface_reference(&src_surf, NULL);
161 
162         return true;
163 }
164 
165 void
vc4_blitter_save(struct vc4_context * vc4)166 vc4_blitter_save(struct vc4_context *vc4)
167 {
168         util_blitter_save_fragment_constant_buffer_slot(vc4->blitter,
169                                                         vc4->constbuf[PIPE_SHADER_FRAGMENT].cb);
170         util_blitter_save_vertex_buffer_slot(vc4->blitter, vc4->vertexbuf.vb);
171         util_blitter_save_vertex_elements(vc4->blitter, vc4->vtx);
172         util_blitter_save_vertex_shader(vc4->blitter, vc4->prog.bind_vs);
173         util_blitter_save_rasterizer(vc4->blitter, vc4->rasterizer);
174         util_blitter_save_viewport(vc4->blitter, &vc4->viewport);
175         util_blitter_save_scissor(vc4->blitter, &vc4->scissor);
176         util_blitter_save_fragment_shader(vc4->blitter, vc4->prog.bind_fs);
177         util_blitter_save_blend(vc4->blitter, vc4->blend);
178         util_blitter_save_depth_stencil_alpha(vc4->blitter, vc4->zsa);
179         util_blitter_save_stencil_ref(vc4->blitter, &vc4->stencil_ref);
180         util_blitter_save_sample_mask(vc4->blitter, vc4->sample_mask);
181         util_blitter_save_framebuffer(vc4->blitter, &vc4->framebuffer);
182         util_blitter_save_fragment_sampler_states(vc4->blitter,
183                         vc4->fragtex.num_samplers,
184                         (void **)vc4->fragtex.samplers);
185         util_blitter_save_fragment_sampler_views(vc4->blitter,
186                         vc4->fragtex.num_textures, vc4->fragtex.textures);
187 }
188 
vc4_get_yuv_vs(struct pipe_context * pctx)189 static void *vc4_get_yuv_vs(struct pipe_context *pctx)
190 {
191    struct vc4_context *vc4 = vc4_context(pctx);
192    struct pipe_screen *pscreen = pctx->screen;
193 
194    if (vc4->yuv_linear_blit_vs)
195            return vc4->yuv_linear_blit_vs;
196 
197    const struct nir_shader_compiler_options *options =
198            pscreen->get_compiler_options(pscreen,
199                                          PIPE_SHADER_IR_NIR,
200                                          PIPE_SHADER_VERTEX);
201 
202    nir_builder b;
203    nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_VERTEX, options);
204    b.shader->info.name = ralloc_strdup(b.shader, "linear_blit_vs");
205 
206    const struct glsl_type *vec4 = glsl_vec4_type();
207    nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
208                                               vec4, "pos");
209 
210    nir_variable *pos_out = nir_variable_create(b.shader, nir_var_shader_out,
211                                                vec4, "gl_Position");
212    pos_out->data.location = VARYING_SLOT_POS;
213 
214    nir_store_var(&b, pos_out, nir_load_var(&b, pos_in), 0xf);
215 
216    struct pipe_shader_state shader_tmpl = {
217            .type = PIPE_SHADER_IR_NIR,
218            .ir.nir = b.shader,
219    };
220 
221    vc4->yuv_linear_blit_vs = pctx->create_vs_state(pctx, &shader_tmpl);
222 
223    return vc4->yuv_linear_blit_vs;
224 }
225 
vc4_get_yuv_fs(struct pipe_context * pctx,int cpp)226 static void *vc4_get_yuv_fs(struct pipe_context *pctx, int cpp)
227 {
228    struct vc4_context *vc4 = vc4_context(pctx);
229    struct pipe_screen *pscreen = pctx->screen;
230    struct pipe_shader_state **cached_shader;
231    const char *name;
232 
233    if (cpp == 1) {
234            cached_shader = &vc4->yuv_linear_blit_fs_8bit;
235            name = "linear_blit_8bit_fs";
236    } else {
237            cached_shader = &vc4->yuv_linear_blit_fs_16bit;
238            name = "linear_blit_16bit_fs";
239    }
240 
241    if (*cached_shader)
242            return *cached_shader;
243 
244    const struct nir_shader_compiler_options *options =
245            pscreen->get_compiler_options(pscreen,
246                                          PIPE_SHADER_IR_NIR,
247                                          PIPE_SHADER_FRAGMENT);
248 
249    nir_builder b;
250    nir_builder_init_simple_shader(&b, NULL, MESA_SHADER_FRAGMENT, options);
251    b.shader->info.name = ralloc_strdup(b.shader, name);
252 
253    const struct glsl_type *vec4 = glsl_vec4_type();
254    const struct glsl_type *glsl_int = glsl_int_type();
255 
256    nir_variable *color_out = nir_variable_create(b.shader, nir_var_shader_out,
257                                                  vec4, "f_color");
258    color_out->data.location = FRAG_RESULT_COLOR;
259 
260    nir_variable *pos_in = nir_variable_create(b.shader, nir_var_shader_in,
261                                               vec4, "pos");
262    pos_in->data.location = VARYING_SLOT_POS;
263    nir_ssa_def *pos = nir_load_var(&b, pos_in);
264 
265    nir_ssa_def *one = nir_imm_int(&b, 1);
266    nir_ssa_def *two = nir_imm_int(&b, 2);
267 
268    nir_ssa_def *x = nir_f2i32(&b, nir_channel(&b, pos, 0));
269    nir_ssa_def *y = nir_f2i32(&b, nir_channel(&b, pos, 1));
270 
271    nir_variable *stride_in = nir_variable_create(b.shader, nir_var_uniform,
272                                                  glsl_int, "stride");
273    nir_ssa_def *stride = nir_load_var(&b, stride_in);
274 
275    nir_ssa_def *x_offset;
276    nir_ssa_def *y_offset;
277    if (cpp == 1) {
278            nir_ssa_def *intra_utile_x_offset =
279                    nir_ishl(&b, nir_iand(&b, x, one), two);
280            nir_ssa_def *inter_utile_x_offset =
281                    nir_ishl(&b, nir_iand(&b, x, nir_imm_int(&b, ~3)), one);
282 
283            x_offset = nir_iadd(&b,
284                                intra_utile_x_offset,
285                                inter_utile_x_offset);
286            y_offset = nir_imul(&b,
287                                nir_iadd(&b,
288                                         nir_ishl(&b, y, one),
289                                         nir_ushr(&b, nir_iand(&b, x, two), one)),
290                                stride);
291    } else {
292            x_offset = nir_ishl(&b, x, two);
293            y_offset = nir_imul(&b, y, stride);
294    }
295 
296    nir_intrinsic_instr *load =
297            nir_intrinsic_instr_create(b.shader, nir_intrinsic_load_ubo);
298    load->num_components = 1;
299    nir_ssa_dest_init(&load->instr, &load->dest, load->num_components, 32, NULL);
300    load->src[0] = nir_src_for_ssa(one);
301    load->src[1] = nir_src_for_ssa(nir_iadd(&b, x_offset, y_offset));
302    nir_intrinsic_set_align(load,  4, 0);
303    nir_intrinsic_set_range_base(load, 0);
304    nir_intrinsic_set_range(load, ~0);
305    nir_builder_instr_insert(&b, &load->instr);
306 
307    nir_store_var(&b, color_out,
308                  nir_unpack_unorm_4x8(&b, &load->dest.ssa),
309                  0xf);
310 
311    struct pipe_shader_state shader_tmpl = {
312            .type = PIPE_SHADER_IR_NIR,
313            .ir.nir = b.shader,
314    };
315 
316    *cached_shader = pctx->create_fs_state(pctx, &shader_tmpl);
317 
318    return *cached_shader;
319 }
320 
321 static bool
vc4_yuv_blit(struct pipe_context * pctx,const struct pipe_blit_info * info)322 vc4_yuv_blit(struct pipe_context *pctx, const struct pipe_blit_info *info)
323 {
324         struct vc4_context *vc4 = vc4_context(pctx);
325         struct vc4_resource *src = vc4_resource(info->src.resource);
326         struct vc4_resource *dst = vc4_resource(info->dst.resource);
327         bool ok;
328 
329         if (src->tiled)
330                 return false;
331         if (src->base.format != PIPE_FORMAT_R8_UNORM &&
332             src->base.format != PIPE_FORMAT_R8G8_UNORM)
333                 return false;
334 
335         /* YUV blits always turn raster-order to tiled */
336         assert(dst->base.format == src->base.format);
337         assert(dst->tiled);
338 
339         /* Always 1:1 and at the origin */
340         assert(info->src.box.x == 0 && info->dst.box.x == 0);
341         assert(info->src.box.y == 0 && info->dst.box.y == 0);
342         assert(info->src.box.width == info->dst.box.width);
343         assert(info->src.box.height == info->dst.box.height);
344 
345         if ((src->slices[info->src.level].offset & 3) ||
346             (src->slices[info->src.level].stride & 3)) {
347                 perf_debug("YUV-blit src texture offset/stride misaligned: 0x%08x/%d\n",
348                            src->slices[info->src.level].offset,
349                            src->slices[info->src.level].stride);
350                 goto fallback;
351         }
352 
353         vc4_blitter_save(vc4);
354 
355         /* Create a renderable surface mapping the T-tiled shadow buffer.
356          */
357         struct pipe_surface dst_tmpl;
358         util_blitter_default_dst_texture(&dst_tmpl, info->dst.resource,
359                                          info->dst.level, info->dst.box.z);
360         dst_tmpl.format = PIPE_FORMAT_RGBA8888_UNORM;
361         struct pipe_surface *dst_surf =
362                 pctx->create_surface(pctx, info->dst.resource, &dst_tmpl);
363         if (!dst_surf) {
364                 fprintf(stderr, "Failed to create YUV dst surface\n");
365                 util_blitter_unset_running_flag(vc4->blitter);
366                 return false;
367         }
368         dst_surf->width = align(dst_surf->width, 8) / 2;
369         if (dst->cpp == 1)
370                 dst_surf->height /= 2;
371 
372         /* Set the constant buffer. */
373         uint32_t stride = src->slices[info->src.level].stride;
374         struct pipe_constant_buffer cb_uniforms = {
375                 .user_buffer = &stride,
376                 .buffer_size = sizeof(stride),
377         };
378         pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 0, &cb_uniforms);
379         struct pipe_constant_buffer cb_src = {
380                 .buffer = info->src.resource,
381                 .buffer_offset = src->slices[info->src.level].offset,
382                 .buffer_size = (src->bo->size -
383                                 src->slices[info->src.level].offset),
384         };
385         pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_src);
386 
387         /* Unbind the textures, to make sure we don't try to recurse into the
388          * shadow blit.
389          */
390         pctx->set_sampler_views(pctx, PIPE_SHADER_FRAGMENT, 0, 0, NULL);
391         pctx->bind_sampler_states(pctx, PIPE_SHADER_FRAGMENT, 0, 0, NULL);
392 
393         util_blitter_custom_shader(vc4->blitter, dst_surf,
394                                    vc4_get_yuv_vs(pctx),
395                                    vc4_get_yuv_fs(pctx, src->cpp));
396 
397         util_blitter_restore_textures(vc4->blitter);
398         util_blitter_restore_constant_buffer_state(vc4->blitter);
399         /* Restore cb1 (util_blitter doesn't handle this one). */
400         struct pipe_constant_buffer cb_disabled = { 0 };
401         pctx->set_constant_buffer(pctx, PIPE_SHADER_FRAGMENT, 1, &cb_disabled);
402 
403         pipe_surface_reference(&dst_surf, NULL);
404 
405         return true;
406 
407 fallback:
408         /* Do an immediate SW fallback, since the render blit path
409          * would just recurse.
410          */
411         ok = util_try_blit_via_copy_region(pctx, info);
412         assert(ok); (void)ok;
413 
414         return true;
415 }
416 
417 static bool
vc4_render_blit(struct pipe_context * ctx,struct pipe_blit_info * info)418 vc4_render_blit(struct pipe_context *ctx, struct pipe_blit_info *info)
419 {
420         struct vc4_context *vc4 = vc4_context(ctx);
421 
422         if (!util_blitter_is_blit_supported(vc4->blitter, info)) {
423                 fprintf(stderr, "blit unsupported %s -> %s\n",
424                     util_format_short_name(info->src.resource->format),
425                     util_format_short_name(info->dst.resource->format));
426                 return false;
427         }
428 
429         /* Enable the scissor, so we get a minimal set of tiles rendered. */
430         if (!info->scissor_enable) {
431                 info->scissor_enable = true;
432                 info->scissor.minx = info->dst.box.x;
433                 info->scissor.miny = info->dst.box.y;
434                 info->scissor.maxx = info->dst.box.x + info->dst.box.width;
435                 info->scissor.maxy = info->dst.box.y + info->dst.box.height;
436         }
437 
438         vc4_blitter_save(vc4);
439         util_blitter_blit(vc4->blitter, info);
440 
441         return true;
442 }
443 
444 /* Optimal hardware path for blitting pixels.
445  * Scaling, format conversion, up- and downsampling (resolve) are allowed.
446  */
447 void
vc4_blit(struct pipe_context * pctx,const struct pipe_blit_info * blit_info)448 vc4_blit(struct pipe_context *pctx, const struct pipe_blit_info *blit_info)
449 {
450         struct pipe_blit_info info = *blit_info;
451 
452         if (vc4_yuv_blit(pctx, blit_info))
453                 return;
454 
455         if (vc4_tile_blit(pctx, blit_info))
456                 return;
457 
458         if (info.mask & PIPE_MASK_S) {
459                 if (util_try_blit_via_copy_region(pctx, &info))
460                         return;
461 
462                 info.mask &= ~PIPE_MASK_S;
463                 fprintf(stderr, "cannot blit stencil, skipping\n");
464         }
465 
466         if (vc4_render_blit(pctx, &info))
467                 return;
468 
469         fprintf(stderr, "Unsupported blit\n");
470 }
471