1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdbool.h>
26 
27 #include "anv_private.h"
28 #include "vk_format_info.h"
29 #include "vk_util.h"
30 #include "util/fast_idiv_by_const.h"
31 
32 #include "common/gen_aux_map.h"
33 #include "common/gen_l3_config.h"
34 #include "genxml/gen_macros.h"
35 #include "genxml/genX_pack.h"
36 
37 /* We reserve :
38  *    - GPR 14 for secondary command buffer returns
39  *    - GPR 15 for conditional rendering
40  */
41 #define GEN_MI_BUILDER_NUM_ALLOC_GPRS 14
42 #define __gen_get_batch_dwords anv_batch_emit_dwords
43 #define __gen_address_offset anv_address_add
44 #include "common/gen_mi_builder.h"
45 
46 static void genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
47                                         uint32_t pipeline);
48 
49 static void
emit_lri(struct anv_batch * batch,uint32_t reg,uint32_t imm)50 emit_lri(struct anv_batch *batch, uint32_t reg, uint32_t imm)
51 {
52    anv_batch_emit(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
53       lri.RegisterOffset   = reg;
54       lri.DataDWord        = imm;
55    }
56 }
57 
58 void
genX(cmd_buffer_emit_state_base_address)59 genX(cmd_buffer_emit_state_base_address)(struct anv_cmd_buffer *cmd_buffer)
60 {
61    struct anv_device *device = cmd_buffer->device;
62    UNUSED const struct gen_device_info *devinfo = &device->info;
63    uint32_t mocs = isl_mocs(&device->isl_dev, 0);
64 
65    /* If we are emitting a new state base address we probably need to re-emit
66     * binding tables.
67     */
68    cmd_buffer->state.descriptors_dirty |= ~0;
69 
70    /* Emit a render target cache flush.
71     *
72     * This isn't documented anywhere in the PRM.  However, it seems to be
73     * necessary prior to changing the surface state base adress.  Without
74     * this, we get GPU hangs when using multi-level command buffers which
75     * clear depth, reset state base address, and then go render stuff.
76     */
77    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
78       pc.DCFlushEnable = true;
79       pc.RenderTargetCacheFlushEnable = true;
80       pc.CommandStreamerStallEnable = true;
81 #if GEN_GEN >= 12
82       pc.TileCacheFlushEnable = true;
83 #endif
84 #if GEN_GEN == 12
85       /* GEN:BUG:1606662791:
86        *
87        *   Software must program PIPE_CONTROL command with "HDC Pipeline
88        *   Flush" prior to programming of the below two non-pipeline state :
89        *      * STATE_BASE_ADDRESS
90        *      * 3DSTATE_BINDING_TABLE_POOL_ALLOC
91        */
92       if (devinfo->revision == 0 /* A0 */)
93          pc.HDCPipelineFlushEnable = true;
94 #endif
95    }
96 
97 #if GEN_GEN == 12
98    /* GEN:BUG:1607854226:
99     *
100     *  Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
101     *  mode by putting the pipeline temporarily in 3D mode.
102     */
103    uint32_t gen12_wa_pipeline = cmd_buffer->state.current_pipeline;
104    genX(flush_pipeline_select_3d)(cmd_buffer);
105 #endif
106 
107    anv_batch_emit(&cmd_buffer->batch, GENX(STATE_BASE_ADDRESS), sba) {
108       sba.GeneralStateBaseAddress = (struct anv_address) { NULL, 0 };
109       sba.GeneralStateMOCS = mocs;
110       sba.GeneralStateBaseAddressModifyEnable = true;
111 
112       sba.StatelessDataPortAccessMOCS = mocs;
113 
114       sba.SurfaceStateBaseAddress =
115          anv_cmd_buffer_surface_base_address(cmd_buffer);
116       sba.SurfaceStateMOCS = mocs;
117       sba.SurfaceStateBaseAddressModifyEnable = true;
118 
119       sba.DynamicStateBaseAddress =
120          (struct anv_address) { device->dynamic_state_pool.block_pool.bo, 0 };
121       sba.DynamicStateMOCS = mocs;
122       sba.DynamicStateBaseAddressModifyEnable = true;
123 
124       sba.IndirectObjectBaseAddress = (struct anv_address) { NULL, 0 };
125       sba.IndirectObjectMOCS = mocs;
126       sba.IndirectObjectBaseAddressModifyEnable = true;
127 
128       sba.InstructionBaseAddress =
129          (struct anv_address) { device->instruction_state_pool.block_pool.bo, 0 };
130       sba.InstructionMOCS = mocs;
131       sba.InstructionBaseAddressModifyEnable = true;
132 
133 #  if (GEN_GEN >= 8)
134       /* Broadwell requires that we specify a buffer size for a bunch of
135        * these fields.  However, since we will be growing the BO's live, we
136        * just set them all to the maximum.
137        */
138       sba.GeneralStateBufferSize       = 0xfffff;
139       sba.IndirectObjectBufferSize     = 0xfffff;
140       if (device->physical->use_softpin) {
141          /* With softpin, we use fixed addresses so we actually know how big
142           * our base addresses are.
143           */
144          sba.DynamicStateBufferSize    = DYNAMIC_STATE_POOL_SIZE / 4096;
145          sba.InstructionBufferSize     = INSTRUCTION_STATE_POOL_SIZE / 4096;
146       } else {
147          sba.DynamicStateBufferSize    = 0xfffff;
148          sba.InstructionBufferSize     = 0xfffff;
149       }
150       sba.GeneralStateBufferSizeModifyEnable    = true;
151       sba.IndirectObjectBufferSizeModifyEnable  = true;
152       sba.DynamicStateBufferSizeModifyEnable    = true;
153       sba.InstructionBuffersizeModifyEnable     = true;
154 #  else
155       /* On gen7, we have upper bounds instead.  According to the docs,
156        * setting an upper bound of zero means that no bounds checking is
157        * performed so, in theory, we should be able to leave them zero.
158        * However, border color is broken and the GPU bounds-checks anyway.
159        * To avoid this and other potential problems, we may as well set it
160        * for everything.
161        */
162       sba.GeneralStateAccessUpperBound =
163          (struct anv_address) { .bo = NULL, .offset = 0xfffff000 };
164       sba.GeneralStateAccessUpperBoundModifyEnable = true;
165       sba.DynamicStateAccessUpperBound =
166          (struct anv_address) { .bo = NULL, .offset = 0xfffff000 };
167       sba.DynamicStateAccessUpperBoundModifyEnable = true;
168       sba.InstructionAccessUpperBound =
169          (struct anv_address) { .bo = NULL, .offset = 0xfffff000 };
170       sba.InstructionAccessUpperBoundModifyEnable = true;
171 #  endif
172 #  if (GEN_GEN >= 9)
173       if (cmd_buffer->device->physical->use_softpin) {
174          sba.BindlessSurfaceStateBaseAddress = (struct anv_address) {
175             .bo = device->surface_state_pool.block_pool.bo,
176             .offset = 0,
177          };
178          sba.BindlessSurfaceStateSize = (1 << 20) - 1;
179       } else {
180          sba.BindlessSurfaceStateBaseAddress = ANV_NULL_ADDRESS;
181          sba.BindlessSurfaceStateSize = 0;
182       }
183       sba.BindlessSurfaceStateMOCS = mocs;
184       sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
185 #  endif
186 #  if (GEN_GEN >= 10)
187       sba.BindlessSamplerStateBaseAddress = (struct anv_address) { NULL, 0 };
188       sba.BindlessSamplerStateMOCS = mocs;
189       sba.BindlessSamplerStateBaseAddressModifyEnable = true;
190       sba.BindlessSamplerStateBufferSize = 0;
191 #  endif
192    }
193 
194 #if GEN_GEN == 12
195    /* GEN:BUG:1607854226:
196     *
197     *  Put the pipeline back into its current mode.
198     */
199    if (gen12_wa_pipeline != UINT32_MAX)
200       genX(flush_pipeline_select)(cmd_buffer, gen12_wa_pipeline);
201 #endif
202 
203    /* After re-setting the surface state base address, we have to do some
204     * cache flusing so that the sampler engine will pick up the new
205     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
206     * Shared Function > 3D Sampler > State > State Caching (page 96):
207     *
208     *    Coherency with system memory in the state cache, like the texture
209     *    cache is handled partially by software. It is expected that the
210     *    command stream or shader will issue Cache Flush operation or
211     *    Cache_Flush sampler message to ensure that the L1 cache remains
212     *    coherent with system memory.
213     *
214     *    [...]
215     *
216     *    Whenever the value of the Dynamic_State_Base_Addr,
217     *    Surface_State_Base_Addr are altered, the L1 state cache must be
218     *    invalidated to ensure the new surface or sampler state is fetched
219     *    from system memory.
220     *
221     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
222     * which, according the PIPE_CONTROL instruction documentation in the
223     * Broadwell PRM:
224     *
225     *    Setting this bit is independent of any other bit in this packet.
226     *    This bit controls the invalidation of the L1 and L2 state caches
227     *    at the top of the pipe i.e. at the parsing time.
228     *
229     * Unfortunately, experimentation seems to indicate that state cache
230     * invalidation through a PIPE_CONTROL does nothing whatsoever in
231     * regards to surface state and binding tables.  In stead, it seems that
232     * invalidating the texture cache is what is actually needed.
233     *
234     * XXX:  As far as we have been able to determine through
235     * experimentation, shows that flush the texture cache appears to be
236     * sufficient.  The theory here is that all of the sampling/rendering
237     * units cache the binding table in the texture cache.  However, we have
238     * yet to be able to actually confirm this.
239     */
240    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
241       pc.TextureCacheInvalidationEnable = true;
242       pc.ConstantCacheInvalidationEnable = true;
243       pc.StateCacheInvalidationEnable = true;
244    }
245 }
246 
247 static void
add_surface_reloc(struct anv_cmd_buffer * cmd_buffer,struct anv_state state,struct anv_address addr)248 add_surface_reloc(struct anv_cmd_buffer *cmd_buffer,
249                   struct anv_state state, struct anv_address addr)
250 {
251    const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
252 
253    VkResult result =
254       anv_reloc_list_add(&cmd_buffer->surface_relocs, &cmd_buffer->pool->alloc,
255                          state.offset + isl_dev->ss.addr_offset,
256                          addr.bo, addr.offset, NULL);
257    if (result != VK_SUCCESS)
258       anv_batch_set_error(&cmd_buffer->batch, result);
259 }
260 
261 static void
add_surface_state_relocs(struct anv_cmd_buffer * cmd_buffer,struct anv_surface_state state)262 add_surface_state_relocs(struct anv_cmd_buffer *cmd_buffer,
263                          struct anv_surface_state state)
264 {
265    const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
266 
267    assert(!anv_address_is_null(state.address));
268    add_surface_reloc(cmd_buffer, state.state, state.address);
269 
270    if (!anv_address_is_null(state.aux_address)) {
271       VkResult result =
272          anv_reloc_list_add(&cmd_buffer->surface_relocs,
273                             &cmd_buffer->pool->alloc,
274                             state.state.offset + isl_dev->ss.aux_addr_offset,
275                             state.aux_address.bo,
276                             state.aux_address.offset,
277                             NULL);
278       if (result != VK_SUCCESS)
279          anv_batch_set_error(&cmd_buffer->batch, result);
280    }
281 
282    if (!anv_address_is_null(state.clear_address)) {
283       VkResult result =
284          anv_reloc_list_add(&cmd_buffer->surface_relocs,
285                             &cmd_buffer->pool->alloc,
286                             state.state.offset +
287                             isl_dev->ss.clear_color_state_offset,
288                             state.clear_address.bo,
289                             state.clear_address.offset,
290                             NULL);
291       if (result != VK_SUCCESS)
292          anv_batch_set_error(&cmd_buffer->batch, result);
293    }
294 }
295 
296 static bool
isl_color_value_requires_conversion(union isl_color_value color,const struct isl_surf * surf,const struct isl_view * view)297 isl_color_value_requires_conversion(union isl_color_value color,
298                                     const struct isl_surf *surf,
299                                     const struct isl_view *view)
300 {
301    if (surf->format == view->format && isl_swizzle_is_identity(view->swizzle))
302       return false;
303 
304    uint32_t surf_pack[4] = { 0, 0, 0, 0 };
305    isl_color_value_pack(&color, surf->format, surf_pack);
306 
307    uint32_t view_pack[4] = { 0, 0, 0, 0 };
308    union isl_color_value swiz_color =
309       isl_color_value_swizzle_inv(color, view->swizzle);
310    isl_color_value_pack(&swiz_color, view->format, view_pack);
311 
312    return memcmp(surf_pack, view_pack, sizeof(surf_pack)) != 0;
313 }
314 
315 static bool
anv_can_fast_clear_color_view(struct anv_device * device,struct anv_image_view * iview,VkImageLayout layout,union isl_color_value clear_color,uint32_t num_layers,VkRect2D render_area)316 anv_can_fast_clear_color_view(struct anv_device * device,
317                               struct anv_image_view *iview,
318                               VkImageLayout layout,
319                               union isl_color_value clear_color,
320                               uint32_t num_layers,
321                               VkRect2D render_area)
322 {
323    if (iview->planes[0].isl.base_array_layer >=
324        anv_image_aux_layers(iview->image, VK_IMAGE_ASPECT_COLOR_BIT,
325                             iview->planes[0].isl.base_level))
326       return false;
327 
328    /* Start by getting the fast clear type.  We use the first subpass
329     * layout here because we don't want to fast-clear if the first subpass
330     * to use the attachment can't handle fast-clears.
331     */
332    enum anv_fast_clear_type fast_clear_type =
333       anv_layout_to_fast_clear_type(&device->info, iview->image,
334                                     VK_IMAGE_ASPECT_COLOR_BIT,
335                                     layout);
336    switch (fast_clear_type) {
337    case ANV_FAST_CLEAR_NONE:
338       return false;
339    case ANV_FAST_CLEAR_DEFAULT_VALUE:
340       if (!isl_color_value_is_zero(clear_color, iview->planes[0].isl.format))
341          return false;
342       break;
343    case ANV_FAST_CLEAR_ANY:
344       break;
345    }
346 
347    /* Potentially, we could do partial fast-clears but doing so has crazy
348     * alignment restrictions.  It's easier to just restrict to full size
349     * fast clears for now.
350     */
351    if (render_area.offset.x != 0 ||
352        render_area.offset.y != 0 ||
353        render_area.extent.width != iview->extent.width ||
354        render_area.extent.height != iview->extent.height)
355       return false;
356 
357    /* On Broadwell and earlier, we can only handle 0/1 clear colors */
358    if (GEN_GEN <= 8 &&
359        !isl_color_value_is_zero_one(clear_color, iview->planes[0].isl.format))
360       return false;
361 
362    /* If the clear color is one that would require non-trivial format
363     * conversion on resolve, we don't bother with the fast clear.  This
364     * shouldn't be common as most clear colors are 0/1 and the most common
365     * format re-interpretation is for sRGB.
366     */
367    if (isl_color_value_requires_conversion(clear_color,
368                                            &iview->image->planes[0].surface.isl,
369                                            &iview->planes[0].isl)) {
370       anv_perf_warn(device, iview,
371                     "Cannot fast-clear to colors which would require "
372                     "format conversion on resolve");
373       return false;
374    }
375 
376    /* We only allow fast clears to the first slice of an image (level 0,
377     * layer 0) and only for the entire slice.  This guarantees us that, at
378     * any given time, there is only one clear color on any given image at
379     * any given time.  At the time of our testing (Jan 17, 2018), there
380     * were no known applications which would benefit from fast-clearing
381     * more than just the first slice.
382     */
383    if (iview->planes[0].isl.base_level > 0 ||
384        iview->planes[0].isl.base_array_layer > 0) {
385       anv_perf_warn(device, iview->image,
386                     "Rendering with multi-lod or multi-layer framebuffer "
387                     "with LOAD_OP_LOAD and baseMipLevel > 0 or "
388                     "baseArrayLayer > 0.  Not fast clearing.");
389       return false;
390    }
391 
392    if (num_layers > 1) {
393       anv_perf_warn(device, iview->image,
394                     "Rendering to a multi-layer framebuffer with "
395                     "LOAD_OP_CLEAR.  Only fast-clearing the first slice");
396    }
397 
398    return true;
399 }
400 
401 static bool
anv_can_hiz_clear_ds_view(struct anv_device * device,struct anv_image_view * iview,VkImageLayout layout,VkImageAspectFlags clear_aspects,float depth_clear_value,VkRect2D render_area)402 anv_can_hiz_clear_ds_view(struct anv_device *device,
403                           struct anv_image_view *iview,
404                           VkImageLayout layout,
405                           VkImageAspectFlags clear_aspects,
406                           float depth_clear_value,
407                           VkRect2D render_area)
408 {
409    /* We don't do any HiZ or depth fast-clears on gen7 yet */
410    if (GEN_GEN == 7)
411       return false;
412 
413    /* If we're just clearing stencil, we can always HiZ clear */
414    if (!(clear_aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
415       return true;
416 
417    /* We must have depth in order to have HiZ */
418    if (!(iview->image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT))
419       return false;
420 
421    const enum isl_aux_usage clear_aux_usage =
422       anv_layout_to_aux_usage(&device->info, iview->image,
423                               VK_IMAGE_ASPECT_DEPTH_BIT,
424                               VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
425                               layout);
426    if (!blorp_can_hiz_clear_depth(&device->info,
427                                   &iview->image->planes[0].surface.isl,
428                                   clear_aux_usage,
429                                   iview->planes[0].isl.base_level,
430                                   iview->planes[0].isl.base_array_layer,
431                                   render_area.offset.x,
432                                   render_area.offset.y,
433                                   render_area.offset.x +
434                                   render_area.extent.width,
435                                   render_area.offset.y +
436                                   render_area.extent.height))
437       return false;
438 
439    if (depth_clear_value != ANV_HZ_FC_VAL)
440       return false;
441 
442    /* Only gen9+ supports returning ANV_HZ_FC_VAL when sampling a fast-cleared
443     * portion of a HiZ buffer. Testing has revealed that Gen8 only supports
444     * returning 0.0f. Gens prior to gen8 do not support this feature at all.
445     */
446    if (GEN_GEN == 8 && anv_can_sample_with_hiz(&device->info, iview->image))
447       return false;
448 
449    /* If we got here, then we can fast clear */
450    return true;
451 }
452 
453 #define READ_ONCE(x) (*(volatile __typeof__(x) *)&(x))
454 
455 #if GEN_GEN == 12
456 static void
anv_image_init_aux_tt(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,uint32_t base_level,uint32_t level_count,uint32_t base_layer,uint32_t layer_count)457 anv_image_init_aux_tt(struct anv_cmd_buffer *cmd_buffer,
458                       const struct anv_image *image,
459                       VkImageAspectFlagBits aspect,
460                       uint32_t base_level, uint32_t level_count,
461                       uint32_t base_layer, uint32_t layer_count)
462 {
463    uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
464 
465    const struct anv_surface *surface = &image->planes[plane].surface;
466    uint64_t base_address =
467       anv_address_physical(anv_address_add(image->planes[plane].address,
468                                            surface->offset));
469 
470    const struct isl_surf *isl_surf = &image->planes[plane].surface.isl;
471    uint64_t format_bits = gen_aux_map_format_bits_for_isl_surf(isl_surf);
472 
473    /* We're about to live-update the AUX-TT.  We really don't want anyone else
474     * trying to read it while we're doing this.  We could probably get away
475     * with not having this stall in some cases if we were really careful but
476     * it's better to play it safe.  Full stall the GPU.
477     */
478    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_END_OF_PIPE_SYNC_BIT;
479    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
480 
481    struct gen_mi_builder b;
482    gen_mi_builder_init(&b, &cmd_buffer->batch);
483 
484    for (uint32_t a = 0; a < layer_count; a++) {
485       const uint32_t layer = base_layer + a;
486 
487       uint64_t start_offset_B = UINT64_MAX, end_offset_B = 0;
488       for (uint32_t l = 0; l < level_count; l++) {
489          const uint32_t level = base_level + l;
490 
491          uint32_t logical_array_layer, logical_z_offset_px;
492          if (image->type == VK_IMAGE_TYPE_3D) {
493             logical_array_layer = 0;
494 
495             /* If the given miplevel does not have this layer, then any higher
496              * miplevels won't either because miplevels only get smaller the
497              * higher the LOD.
498              */
499             assert(layer < image->extent.depth);
500             if (layer >= anv_minify(image->extent.depth, level))
501                break;
502             logical_z_offset_px = layer;
503          } else {
504             assert(layer < image->array_size);
505             logical_array_layer = layer;
506             logical_z_offset_px = 0;
507          }
508 
509          uint32_t slice_start_offset_B, slice_end_offset_B;
510          isl_surf_get_image_range_B_tile(isl_surf, level,
511                                          logical_array_layer,
512                                          logical_z_offset_px,
513                                          &slice_start_offset_B,
514                                          &slice_end_offset_B);
515 
516          start_offset_B = MIN2(start_offset_B, slice_start_offset_B);
517          end_offset_B = MAX2(end_offset_B, slice_end_offset_B);
518       }
519 
520       /* Aux operates 64K at a time */
521       start_offset_B = align_down_u64(start_offset_B, 64 * 1024);
522       end_offset_B = align_u64(end_offset_B, 64 * 1024);
523 
524       for (uint64_t offset = start_offset_B;
525            offset < end_offset_B; offset += 64 * 1024) {
526          uint64_t address = base_address + offset;
527 
528          uint64_t aux_entry_addr64, *aux_entry_map;
529          aux_entry_map = gen_aux_map_get_entry(cmd_buffer->device->aux_map_ctx,
530                                                address, &aux_entry_addr64);
531 
532          assert(cmd_buffer->device->physical->use_softpin);
533          struct anv_address aux_entry_address = {
534             .bo = NULL,
535             .offset = aux_entry_addr64,
536          };
537 
538          const uint64_t old_aux_entry = READ_ONCE(*aux_entry_map);
539          uint64_t new_aux_entry =
540             (old_aux_entry & GEN_AUX_MAP_ADDRESS_MASK) | format_bits;
541 
542          if (isl_aux_usage_has_ccs(image->planes[plane].aux_usage))
543             new_aux_entry |= GEN_AUX_MAP_ENTRY_VALID_BIT;
544 
545          gen_mi_store(&b, gen_mi_mem64(aux_entry_address),
546                           gen_mi_imm(new_aux_entry));
547       }
548    }
549 
550    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_AUX_TABLE_INVALIDATE_BIT;
551 }
552 #endif /* GEN_GEN == 12 */
553 
554 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
555  * the initial layout is undefined, the HiZ buffer and depth buffer will
556  * represent the same data at the end of this operation.
557  */
558 static void
transition_depth_buffer(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,uint32_t base_layer,uint32_t layer_count,VkImageLayout initial_layout,VkImageLayout final_layout,bool will_full_fast_clear)559 transition_depth_buffer(struct anv_cmd_buffer *cmd_buffer,
560                         const struct anv_image *image,
561                         uint32_t base_layer, uint32_t layer_count,
562                         VkImageLayout initial_layout,
563                         VkImageLayout final_layout,
564                         bool will_full_fast_clear)
565 {
566    uint32_t depth_plane =
567       anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT);
568    if (image->planes[depth_plane].aux_usage == ISL_AUX_USAGE_NONE)
569       return;
570 
571 #if GEN_GEN == 12
572    if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
573         initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
574        cmd_buffer->device->physical->has_implicit_ccs &&
575        cmd_buffer->device->info.has_aux_map) {
576       anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
577                             0, 1, base_layer, layer_count);
578    }
579 #endif
580 
581    /* If will_full_fast_clear is set, the caller promises to fast-clear the
582     * largest portion of the specified range as it can.  For depth images,
583     * that means the entire image because we don't support multi-LOD HiZ.
584     */
585    assert(image->planes[0].surface.isl.levels == 1);
586    if (will_full_fast_clear)
587       return;
588 
589    const enum isl_aux_state initial_state =
590       anv_layout_to_aux_state(&cmd_buffer->device->info, image,
591                               VK_IMAGE_ASPECT_DEPTH_BIT,
592                               initial_layout);
593    const enum isl_aux_state final_state =
594       anv_layout_to_aux_state(&cmd_buffer->device->info, image,
595                               VK_IMAGE_ASPECT_DEPTH_BIT,
596                               final_layout);
597 
598    const bool initial_depth_valid =
599       isl_aux_state_has_valid_primary(initial_state);
600    const bool initial_hiz_valid =
601       isl_aux_state_has_valid_aux(initial_state);
602    const bool final_needs_depth =
603       isl_aux_state_has_valid_primary(final_state);
604    const bool final_needs_hiz =
605       isl_aux_state_has_valid_aux(final_state);
606 
607    /* Getting into the pass-through state for Depth is tricky and involves
608     * both a resolve and an ambiguate.  We don't handle that state right now
609     * as anv_layout_to_aux_state never returns it.
610     */
611    assert(final_state != ISL_AUX_STATE_PASS_THROUGH);
612 
613    if (final_needs_depth && !initial_depth_valid) {
614       assert(initial_hiz_valid);
615       anv_image_hiz_op(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
616                        0, base_layer, layer_count, ISL_AUX_OP_FULL_RESOLVE);
617    } else if (final_needs_hiz && !initial_hiz_valid) {
618       assert(initial_depth_valid);
619       anv_image_hiz_op(cmd_buffer, image, VK_IMAGE_ASPECT_DEPTH_BIT,
620                        0, base_layer, layer_count, ISL_AUX_OP_AMBIGUATE);
621    }
622 }
623 
624 static inline bool
vk_image_layout_stencil_write_optimal(VkImageLayout layout)625 vk_image_layout_stencil_write_optimal(VkImageLayout layout)
626 {
627    return layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
628           layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
629           layout == VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR;
630 }
631 
632 /* Transitions a HiZ-enabled depth buffer from one layout to another. Unless
633  * the initial layout is undefined, the HiZ buffer and depth buffer will
634  * represent the same data at the end of this operation.
635  */
636 static void
transition_stencil_buffer(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,uint32_t base_level,uint32_t level_count,uint32_t base_layer,uint32_t layer_count,VkImageLayout initial_layout,VkImageLayout final_layout,bool will_full_fast_clear)637 transition_stencil_buffer(struct anv_cmd_buffer *cmd_buffer,
638                           const struct anv_image *image,
639                           uint32_t base_level, uint32_t level_count,
640                           uint32_t base_layer, uint32_t layer_count,
641                           VkImageLayout initial_layout,
642                           VkImageLayout final_layout,
643                           bool will_full_fast_clear)
644 {
645 #if GEN_GEN == 7
646    uint32_t plane = anv_image_aspect_to_plane(image->aspects,
647                                               VK_IMAGE_ASPECT_STENCIL_BIT);
648 
649    /* On gen7, we have to store a texturable version of the stencil buffer in
650     * a shadow whenever VK_IMAGE_USAGE_SAMPLED_BIT is set and copy back and
651     * forth at strategic points. Stencil writes are only allowed in following
652     * layouts:
653     *
654     *  - VK_IMAGE_LAYOUT_GENERAL
655     *  - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
656     *  - VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
657     *  - VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL
658     *  - VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR
659     *
660     * For general, we have no nice opportunity to transition so we do the copy
661     * to the shadow unconditionally at the end of the subpass. For transfer
662     * destinations, we can update it as part of the transfer op. For the other
663     * layouts, we delay the copy until a transition into some other layout.
664     */
665    if (image->planes[plane].shadow_surface.isl.size_B > 0 &&
666        vk_image_layout_stencil_write_optimal(initial_layout) &&
667        !vk_image_layout_stencil_write_optimal(final_layout)) {
668       anv_image_copy_to_shadow(cmd_buffer, image,
669                                VK_IMAGE_ASPECT_STENCIL_BIT,
670                                base_level, level_count,
671                                base_layer, layer_count);
672    }
673 #elif GEN_GEN == 12
674    uint32_t plane = anv_image_aspect_to_plane(image->aspects,
675                                               VK_IMAGE_ASPECT_STENCIL_BIT);
676    if (image->planes[plane].aux_usage == ISL_AUX_USAGE_NONE)
677       return;
678 
679    if ((initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
680         initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) &&
681        cmd_buffer->device->physical->has_implicit_ccs &&
682        cmd_buffer->device->info.has_aux_map) {
683       anv_image_init_aux_tt(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
684                             base_level, level_count, base_layer, layer_count);
685 
686       /* If will_full_fast_clear is set, the caller promises to fast-clear the
687        * largest portion of the specified range as it can.
688        */
689       if (will_full_fast_clear)
690          return;
691 
692       for (uint32_t l = 0; l < level_count; l++) {
693          const uint32_t level = base_level + l;
694          const VkRect2D clear_rect = {
695             .offset.x = 0,
696             .offset.y = 0,
697             .extent.width = anv_minify(image->extent.width, level),
698             .extent.height = anv_minify(image->extent.height, level),
699          };
700 
701          uint32_t aux_layers =
702             anv_image_aux_layers(image, VK_IMAGE_ASPECT_STENCIL_BIT, level);
703          uint32_t level_layer_count =
704             MIN2(layer_count, aux_layers - base_layer);
705 
706          /* From Bspec's 3DSTATE_STENCIL_BUFFER_BODY > Stencil Compression
707           * Enable:
708           *
709           *    "When enabled, Stencil Buffer needs to be initialized via
710           *    stencil clear (HZ_OP) before any renderpass."
711           */
712          anv_image_hiz_clear(cmd_buffer, image, VK_IMAGE_ASPECT_STENCIL_BIT,
713                              level, base_layer, level_layer_count,
714                              clear_rect, 0 /* Stencil clear value */);
715       }
716    }
717 #endif
718 }
719 
720 #define MI_PREDICATE_SRC0    0x2400
721 #define MI_PREDICATE_SRC1    0x2408
722 #define MI_PREDICATE_RESULT  0x2418
723 
724 static void
set_image_compressed_bit(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,uint32_t level,uint32_t base_layer,uint32_t layer_count,bool compressed)725 set_image_compressed_bit(struct anv_cmd_buffer *cmd_buffer,
726                          const struct anv_image *image,
727                          VkImageAspectFlagBits aspect,
728                          uint32_t level,
729                          uint32_t base_layer, uint32_t layer_count,
730                          bool compressed)
731 {
732    uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
733 
734    /* We only have compression tracking for CCS_E */
735    if (image->planes[plane].aux_usage != ISL_AUX_USAGE_CCS_E)
736       return;
737 
738    for (uint32_t a = 0; a < layer_count; a++) {
739       uint32_t layer = base_layer + a;
740       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
741          sdi.Address = anv_image_get_compression_state_addr(cmd_buffer->device,
742                                                             image, aspect,
743                                                             level, layer);
744          sdi.ImmediateData = compressed ? UINT32_MAX : 0;
745       }
746    }
747 }
748 
749 static void
set_image_fast_clear_state(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,enum anv_fast_clear_type fast_clear)750 set_image_fast_clear_state(struct anv_cmd_buffer *cmd_buffer,
751                            const struct anv_image *image,
752                            VkImageAspectFlagBits aspect,
753                            enum anv_fast_clear_type fast_clear)
754 {
755    anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
756       sdi.Address = anv_image_get_fast_clear_type_addr(cmd_buffer->device,
757                                                        image, aspect);
758       sdi.ImmediateData = fast_clear;
759    }
760 
761    /* Whenever we have fast-clear, we consider that slice to be compressed.
762     * This makes building predicates much easier.
763     */
764    if (fast_clear != ANV_FAST_CLEAR_NONE)
765       set_image_compressed_bit(cmd_buffer, image, aspect, 0, 0, 1, true);
766 }
767 
768 /* This is only really practical on haswell and above because it requires
769  * MI math in order to get it correct.
770  */
771 #if GEN_GEN >= 8 || GEN_IS_HASWELL
772 static void
anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,uint32_t level,uint32_t array_layer,enum isl_aux_op resolve_op,enum anv_fast_clear_type fast_clear_supported)773 anv_cmd_compute_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
774                                   const struct anv_image *image,
775                                   VkImageAspectFlagBits aspect,
776                                   uint32_t level, uint32_t array_layer,
777                                   enum isl_aux_op resolve_op,
778                                   enum anv_fast_clear_type fast_clear_supported)
779 {
780    struct gen_mi_builder b;
781    gen_mi_builder_init(&b, &cmd_buffer->batch);
782 
783    const struct gen_mi_value fast_clear_type =
784       gen_mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
785                                                       image, aspect));
786 
787    if (resolve_op == ISL_AUX_OP_FULL_RESOLVE) {
788       /* In this case, we're doing a full resolve which means we want the
789        * resolve to happen if any compression (including fast-clears) is
790        * present.
791        *
792        * In order to simplify the logic a bit, we make the assumption that,
793        * if the first slice has been fast-cleared, it is also marked as
794        * compressed.  See also set_image_fast_clear_state.
795        */
796       const struct gen_mi_value compression_state =
797          gen_mi_mem32(anv_image_get_compression_state_addr(cmd_buffer->device,
798                                                            image, aspect,
799                                                            level, array_layer));
800       gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC0),
801                        compression_state);
802       gen_mi_store(&b, compression_state, gen_mi_imm(0));
803 
804       if (level == 0 && array_layer == 0) {
805          /* If the predicate is true, we want to write 0 to the fast clear type
806           * and, if it's false, leave it alone.  We can do this by writing
807           *
808           * clear_type = clear_type & ~predicate;
809           */
810          struct gen_mi_value new_fast_clear_type =
811             gen_mi_iand(&b, fast_clear_type,
812                             gen_mi_inot(&b, gen_mi_reg64(MI_PREDICATE_SRC0)));
813          gen_mi_store(&b, fast_clear_type, new_fast_clear_type);
814       }
815    } else if (level == 0 && array_layer == 0) {
816       /* In this case, we are doing a partial resolve to get rid of fast-clear
817        * colors.  We don't care about the compression state but we do care
818        * about how much fast clear is allowed by the final layout.
819        */
820       assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
821       assert(fast_clear_supported < ANV_FAST_CLEAR_ANY);
822 
823       /* We need to compute (fast_clear_supported < image->fast_clear) */
824       struct gen_mi_value pred =
825          gen_mi_ult(&b, gen_mi_imm(fast_clear_supported), fast_clear_type);
826       gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC0),
827                        gen_mi_value_ref(&b, pred));
828 
829       /* If the predicate is true, we want to write 0 to the fast clear type
830        * and, if it's false, leave it alone.  We can do this by writing
831        *
832        * clear_type = clear_type & ~predicate;
833        */
834       struct gen_mi_value new_fast_clear_type =
835          gen_mi_iand(&b, fast_clear_type, gen_mi_inot(&b, pred));
836       gen_mi_store(&b, fast_clear_type, new_fast_clear_type);
837    } else {
838       /* In this case, we're trying to do a partial resolve on a slice that
839        * doesn't have clear color.  There's nothing to do.
840        */
841       assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
842       return;
843    }
844 
845    /* Set src1 to 0 and use a != condition */
846    gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC1), gen_mi_imm(0));
847 
848    anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
849       mip.LoadOperation    = LOAD_LOADINV;
850       mip.CombineOperation = COMBINE_SET;
851       mip.CompareOperation = COMPARE_SRCS_EQUAL;
852    }
853 }
854 #endif /* GEN_GEN >= 8 || GEN_IS_HASWELL */
855 
856 #if GEN_GEN <= 8
857 static void
anv_cmd_simple_resolve_predicate(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,uint32_t level,uint32_t array_layer,enum isl_aux_op resolve_op,enum anv_fast_clear_type fast_clear_supported)858 anv_cmd_simple_resolve_predicate(struct anv_cmd_buffer *cmd_buffer,
859                                  const struct anv_image *image,
860                                  VkImageAspectFlagBits aspect,
861                                  uint32_t level, uint32_t array_layer,
862                                  enum isl_aux_op resolve_op,
863                                  enum anv_fast_clear_type fast_clear_supported)
864 {
865    struct gen_mi_builder b;
866    gen_mi_builder_init(&b, &cmd_buffer->batch);
867 
868    struct gen_mi_value fast_clear_type_mem =
869       gen_mi_mem32(anv_image_get_fast_clear_type_addr(cmd_buffer->device,
870                                                       image, aspect));
871 
872    /* This only works for partial resolves and only when the clear color is
873     * all or nothing.  On the upside, this emits less command streamer code
874     * and works on Ivybridge and Bay Trail.
875     */
876    assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
877    assert(fast_clear_supported != ANV_FAST_CLEAR_ANY);
878 
879    /* We don't support fast clears on anything other than the first slice. */
880    if (level > 0 || array_layer > 0)
881       return;
882 
883    /* On gen8, we don't have a concept of default clear colors because we
884     * can't sample from CCS surfaces.  It's enough to just load the fast clear
885     * state into the predicate register.
886     */
887    gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC0), fast_clear_type_mem);
888    gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC1), gen_mi_imm(0));
889    gen_mi_store(&b, fast_clear_type_mem, gen_mi_imm(0));
890 
891    anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
892       mip.LoadOperation    = LOAD_LOADINV;
893       mip.CombineOperation = COMBINE_SET;
894       mip.CompareOperation = COMPARE_SRCS_EQUAL;
895    }
896 }
897 #endif /* GEN_GEN <= 8 */
898 
899 static void
anv_cmd_predicated_ccs_resolve(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,enum isl_format format,struct isl_swizzle swizzle,VkImageAspectFlagBits aspect,uint32_t level,uint32_t array_layer,enum isl_aux_op resolve_op,enum anv_fast_clear_type fast_clear_supported)900 anv_cmd_predicated_ccs_resolve(struct anv_cmd_buffer *cmd_buffer,
901                                const struct anv_image *image,
902                                enum isl_format format,
903                                struct isl_swizzle swizzle,
904                                VkImageAspectFlagBits aspect,
905                                uint32_t level, uint32_t array_layer,
906                                enum isl_aux_op resolve_op,
907                                enum anv_fast_clear_type fast_clear_supported)
908 {
909    const uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
910 
911 #if GEN_GEN >= 9
912    anv_cmd_compute_resolve_predicate(cmd_buffer, image,
913                                      aspect, level, array_layer,
914                                      resolve_op, fast_clear_supported);
915 #else /* GEN_GEN <= 8 */
916    anv_cmd_simple_resolve_predicate(cmd_buffer, image,
917                                     aspect, level, array_layer,
918                                     resolve_op, fast_clear_supported);
919 #endif
920 
921    /* CCS_D only supports full resolves and BLORP will assert on us if we try
922     * to do a partial resolve on a CCS_D surface.
923     */
924    if (resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE &&
925        image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_D)
926       resolve_op = ISL_AUX_OP_FULL_RESOLVE;
927 
928    anv_image_ccs_op(cmd_buffer, image, format, swizzle, aspect,
929                     level, array_layer, 1, resolve_op, NULL, true);
930 }
931 
932 static void
anv_cmd_predicated_mcs_resolve(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,enum isl_format format,struct isl_swizzle swizzle,VkImageAspectFlagBits aspect,uint32_t array_layer,enum isl_aux_op resolve_op,enum anv_fast_clear_type fast_clear_supported)933 anv_cmd_predicated_mcs_resolve(struct anv_cmd_buffer *cmd_buffer,
934                                const struct anv_image *image,
935                                enum isl_format format,
936                                struct isl_swizzle swizzle,
937                                VkImageAspectFlagBits aspect,
938                                uint32_t array_layer,
939                                enum isl_aux_op resolve_op,
940                                enum anv_fast_clear_type fast_clear_supported)
941 {
942    assert(aspect == VK_IMAGE_ASPECT_COLOR_BIT);
943    assert(resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE);
944 
945 #if GEN_GEN >= 8 || GEN_IS_HASWELL
946    anv_cmd_compute_resolve_predicate(cmd_buffer, image,
947                                      aspect, 0, array_layer,
948                                      resolve_op, fast_clear_supported);
949 
950    anv_image_mcs_op(cmd_buffer, image, format, swizzle, aspect,
951                     array_layer, 1, resolve_op, NULL, true);
952 #else
953    unreachable("MCS resolves are unsupported on Ivybridge and Bay Trail");
954 #endif
955 }
956 
957 void
genX(cmd_buffer_mark_image_written)958 genX(cmd_buffer_mark_image_written)(struct anv_cmd_buffer *cmd_buffer,
959                                     const struct anv_image *image,
960                                     VkImageAspectFlagBits aspect,
961                                     enum isl_aux_usage aux_usage,
962                                     uint32_t level,
963                                     uint32_t base_layer,
964                                     uint32_t layer_count)
965 {
966    /* The aspect must be exactly one of the image aspects. */
967    assert(util_bitcount(aspect) == 1 && (aspect & image->aspects));
968 
969    /* The only compression types with more than just fast-clears are MCS,
970     * CCS_E, and HiZ.  With HiZ we just trust the layout and don't actually
971     * track the current fast-clear and compression state.  This leaves us
972     * with just MCS and CCS_E.
973     */
974    if (aux_usage != ISL_AUX_USAGE_CCS_E &&
975        aux_usage != ISL_AUX_USAGE_MCS)
976       return;
977 
978    set_image_compressed_bit(cmd_buffer, image, aspect,
979                             level, base_layer, layer_count, true);
980 }
981 
982 static void
init_fast_clear_color(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect)983 init_fast_clear_color(struct anv_cmd_buffer *cmd_buffer,
984                       const struct anv_image *image,
985                       VkImageAspectFlagBits aspect)
986 {
987    assert(cmd_buffer && image);
988    assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
989 
990    set_image_fast_clear_state(cmd_buffer, image, aspect,
991                               ANV_FAST_CLEAR_NONE);
992 
993    /* Initialize the struct fields that are accessed for fast-clears so that
994     * the HW restrictions on the field values are satisfied.
995     */
996    struct anv_address addr =
997       anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
998 
999    if (GEN_GEN >= 9) {
1000       const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1001       const unsigned num_dwords = GEN_GEN >= 10 ?
1002                                   isl_dev->ss.clear_color_state_size / 4 :
1003                                   isl_dev->ss.clear_value_size / 4;
1004       for (unsigned i = 0; i < num_dwords; i++) {
1005          anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
1006             sdi.Address = addr;
1007             sdi.Address.offset += i * 4;
1008             sdi.ImmediateData = 0;
1009          }
1010       }
1011    } else {
1012       anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_DATA_IMM), sdi) {
1013          sdi.Address = addr;
1014          if (GEN_GEN >= 8 || GEN_IS_HASWELL) {
1015             /* Pre-SKL, the dword containing the clear values also contains
1016              * other fields, so we need to initialize those fields to match the
1017              * values that would be in a color attachment.
1018              */
1019             sdi.ImmediateData = ISL_CHANNEL_SELECT_RED   << 25 |
1020                                 ISL_CHANNEL_SELECT_GREEN << 22 |
1021                                 ISL_CHANNEL_SELECT_BLUE  << 19 |
1022                                 ISL_CHANNEL_SELECT_ALPHA << 16;
1023          } else if (GEN_GEN == 7) {
1024             /* On IVB, the dword containing the clear values also contains
1025              * other fields that must be zero or can be zero.
1026              */
1027             sdi.ImmediateData = 0;
1028          }
1029       }
1030    }
1031 }
1032 
1033 /* Copy the fast-clear value dword(s) between a surface state object and an
1034  * image's fast clear state buffer.
1035  */
1036 static void
genX(copy_fast_clear_dwords)1037 genX(copy_fast_clear_dwords)(struct anv_cmd_buffer *cmd_buffer,
1038                              struct anv_state surface_state,
1039                              const struct anv_image *image,
1040                              VkImageAspectFlagBits aspect,
1041                              bool copy_from_surface_state)
1042 {
1043    assert(cmd_buffer && image);
1044    assert(image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1045 
1046    struct anv_address ss_clear_addr = {
1047       .bo = cmd_buffer->device->surface_state_pool.block_pool.bo,
1048       .offset = surface_state.offset +
1049                 cmd_buffer->device->isl_dev.ss.clear_value_offset,
1050    };
1051    const struct anv_address entry_addr =
1052       anv_image_get_clear_color_addr(cmd_buffer->device, image, aspect);
1053    unsigned copy_size = cmd_buffer->device->isl_dev.ss.clear_value_size;
1054 
1055 #if GEN_GEN == 7
1056    /* On gen7, the combination of commands used here(MI_LOAD_REGISTER_MEM
1057     * and MI_STORE_REGISTER_MEM) can cause GPU hangs if any rendering is
1058     * in-flight when they are issued even if the memory touched is not
1059     * currently active for rendering.  The weird bit is that it is not the
1060     * MI_LOAD/STORE_REGISTER_MEM commands which hang but rather the in-flight
1061     * rendering hangs such that the next stalling command after the
1062     * MI_LOAD/STORE_REGISTER_MEM commands will catch the hang.
1063     *
1064     * It is unclear exactly why this hang occurs.  Both MI commands come with
1065     * warnings about the 3D pipeline but that doesn't seem to fully explain
1066     * it.  My (Jason's) best theory is that it has something to do with the
1067     * fact that we're using a GPU state register as our temporary and that
1068     * something with reading/writing it is causing problems.
1069     *
1070     * In order to work around this issue, we emit a PIPE_CONTROL with the
1071     * command streamer stall bit set.
1072     */
1073    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
1074    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1075 #endif
1076 
1077    struct gen_mi_builder b;
1078    gen_mi_builder_init(&b, &cmd_buffer->batch);
1079 
1080    if (copy_from_surface_state) {
1081       gen_mi_memcpy(&b, entry_addr, ss_clear_addr, copy_size);
1082    } else {
1083       gen_mi_memcpy(&b, ss_clear_addr, entry_addr, copy_size);
1084 
1085       /* Updating a surface state object may require that the state cache be
1086        * invalidated. From the SKL PRM, Shared Functions -> State -> State
1087        * Caching:
1088        *
1089        *    Whenever the RENDER_SURFACE_STATE object in memory pointed to by
1090        *    the Binding Table Pointer (BTP) and Binding Table Index (BTI) is
1091        *    modified [...], the L1 state cache must be invalidated to ensure
1092        *    the new surface or sampler state is fetched from system memory.
1093        *
1094        * In testing, SKL doesn't actually seem to need this, but HSW does.
1095        */
1096       cmd_buffer->state.pending_pipe_bits |=
1097          ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
1098    }
1099 }
1100 
1101 /**
1102  * @brief Transitions a color buffer from one layout to another.
1103  *
1104  * See section 6.1.1. Image Layout Transitions of the Vulkan 1.0.50 spec for
1105  * more information.
1106  *
1107  * @param level_count VK_REMAINING_MIP_LEVELS isn't supported.
1108  * @param layer_count VK_REMAINING_ARRAY_LAYERS isn't supported. For 3D images,
1109  *                    this represents the maximum layers to transition at each
1110  *                    specified miplevel.
1111  */
1112 static void
transition_color_buffer(struct anv_cmd_buffer * cmd_buffer,const struct anv_image * image,VkImageAspectFlagBits aspect,const uint32_t base_level,uint32_t level_count,uint32_t base_layer,uint32_t layer_count,VkImageLayout initial_layout,VkImageLayout final_layout,bool will_full_fast_clear)1113 transition_color_buffer(struct anv_cmd_buffer *cmd_buffer,
1114                         const struct anv_image *image,
1115                         VkImageAspectFlagBits aspect,
1116                         const uint32_t base_level, uint32_t level_count,
1117                         uint32_t base_layer, uint32_t layer_count,
1118                         VkImageLayout initial_layout,
1119                         VkImageLayout final_layout,
1120                         bool will_full_fast_clear)
1121 {
1122    struct anv_device *device = cmd_buffer->device;
1123    const struct gen_device_info *devinfo = &device->info;
1124    /* Validate the inputs. */
1125    assert(cmd_buffer);
1126    assert(image && image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV);
1127    /* These values aren't supported for simplicity's sake. */
1128    assert(level_count != VK_REMAINING_MIP_LEVELS &&
1129           layer_count != VK_REMAINING_ARRAY_LAYERS);
1130    /* Ensure the subresource range is valid. */
1131    UNUSED uint64_t last_level_num = base_level + level_count;
1132    const uint32_t max_depth = anv_minify(image->extent.depth, base_level);
1133    UNUSED const uint32_t image_layers = MAX2(image->array_size, max_depth);
1134    assert((uint64_t)base_layer + layer_count  <= image_layers);
1135    assert(last_level_num <= image->levels);
1136    /* The spec disallows these final layouts. */
1137    assert(final_layout != VK_IMAGE_LAYOUT_UNDEFINED &&
1138           final_layout != VK_IMAGE_LAYOUT_PREINITIALIZED);
1139 
1140    /* No work is necessary if the layout stays the same or if this subresource
1141     * range lacks auxiliary data.
1142     */
1143    if (initial_layout == final_layout)
1144       return;
1145 
1146    uint32_t plane = anv_image_aspect_to_plane(image->aspects, aspect);
1147 
1148    if (image->planes[plane].shadow_surface.isl.size_B > 0 &&
1149        final_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) {
1150       /* This surface is a linear compressed image with a tiled shadow surface
1151        * for texturing.  The client is about to use it in READ_ONLY_OPTIMAL so
1152        * we need to ensure the shadow copy is up-to-date.
1153        */
1154       assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1155       assert(image->planes[plane].surface.isl.tiling == ISL_TILING_LINEAR);
1156       assert(image->planes[plane].shadow_surface.isl.tiling != ISL_TILING_LINEAR);
1157       assert(isl_format_is_compressed(image->planes[plane].surface.isl.format));
1158       assert(plane == 0);
1159       anv_image_copy_to_shadow(cmd_buffer, image,
1160                                VK_IMAGE_ASPECT_COLOR_BIT,
1161                                base_level, level_count,
1162                                base_layer, layer_count);
1163    }
1164 
1165    if (base_layer >= anv_image_aux_layers(image, aspect, base_level))
1166       return;
1167 
1168    assert(image->planes[plane].surface.isl.tiling != ISL_TILING_LINEAR);
1169 
1170    if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED ||
1171        initial_layout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
1172 #if GEN_GEN == 12
1173       if (device->physical->has_implicit_ccs && devinfo->has_aux_map) {
1174          anv_image_init_aux_tt(cmd_buffer, image, aspect,
1175                                base_level, level_count,
1176                                base_layer, layer_count);
1177       }
1178 #else
1179       assert(!(device->physical->has_implicit_ccs && devinfo->has_aux_map));
1180 #endif
1181 
1182       /* A subresource in the undefined layout may have been aliased and
1183        * populated with any arrangement of bits. Therefore, we must initialize
1184        * the related aux buffer and clear buffer entry with desirable values.
1185        * An initial layout of PREINITIALIZED is the same as UNDEFINED for
1186        * images with VK_IMAGE_TILING_OPTIMAL.
1187        *
1188        * Initialize the relevant clear buffer entries.
1189        */
1190       if (base_level == 0 && base_layer == 0)
1191          init_fast_clear_color(cmd_buffer, image, aspect);
1192 
1193       /* Initialize the aux buffers to enable correct rendering.  In order to
1194        * ensure that things such as storage images work correctly, aux buffers
1195        * need to be initialized to valid data.
1196        *
1197        * Having an aux buffer with invalid data is a problem for two reasons:
1198        *
1199        *  1) Having an invalid value in the buffer can confuse the hardware.
1200        *     For instance, with CCS_E on SKL, a two-bit CCS value of 2 is
1201        *     invalid and leads to the hardware doing strange things.  It
1202        *     doesn't hang as far as we can tell but rendering corruption can
1203        *     occur.
1204        *
1205        *  2) If this transition is into the GENERAL layout and we then use the
1206        *     image as a storage image, then we must have the aux buffer in the
1207        *     pass-through state so that, if we then go to texture from the
1208        *     image, we get the results of our storage image writes and not the
1209        *     fast clear color or other random data.
1210        *
1211        * For CCS both of the problems above are real demonstrable issues.  In
1212        * that case, the only thing we can do is to perform an ambiguate to
1213        * transition the aux surface into the pass-through state.
1214        *
1215        * For MCS, (2) is never an issue because we don't support multisampled
1216        * storage images.  In theory, issue (1) is a problem with MCS but we've
1217        * never seen it in the wild.  For 4x and 16x, all bit patters could, in
1218        * theory, be interpreted as something but we don't know that all bit
1219        * patterns are actually valid.  For 2x and 8x, you could easily end up
1220        * with the MCS referring to an invalid plane because not all bits of
1221        * the MCS value are actually used.  Even though we've never seen issues
1222        * in the wild, it's best to play it safe and initialize the MCS.  We
1223        * can use a fast-clear for MCS because we only ever touch from render
1224        * and texture (no image load store).
1225        */
1226       if (image->samples == 1) {
1227          for (uint32_t l = 0; l < level_count; l++) {
1228             const uint32_t level = base_level + l;
1229 
1230             uint32_t aux_layers = anv_image_aux_layers(image, aspect, level);
1231             if (base_layer >= aux_layers)
1232                break; /* We will only get fewer layers as level increases */
1233             uint32_t level_layer_count =
1234                MIN2(layer_count, aux_layers - base_layer);
1235 
1236             /* If will_full_fast_clear is set, the caller promises to
1237              * fast-clear the largest portion of the specified range as it can.
1238              * For color images, that means only the first LOD and array slice.
1239              */
1240             if (level == 0 && base_layer == 0 && will_full_fast_clear) {
1241                base_layer++;
1242                level_layer_count--;
1243                if (level_layer_count == 0)
1244                   continue;
1245             }
1246 
1247             anv_image_ccs_op(cmd_buffer, image,
1248                              image->planes[plane].surface.isl.format,
1249                              ISL_SWIZZLE_IDENTITY,
1250                              aspect, level, base_layer, level_layer_count,
1251                              ISL_AUX_OP_AMBIGUATE, NULL, false);
1252 
1253             if (image->planes[plane].aux_usage == ISL_AUX_USAGE_CCS_E) {
1254                set_image_compressed_bit(cmd_buffer, image, aspect,
1255                                         level, base_layer, level_layer_count,
1256                                         false);
1257             }
1258          }
1259       } else {
1260          if (image->samples == 4 || image->samples == 16) {
1261             anv_perf_warn(cmd_buffer->device, image,
1262                           "Doing a potentially unnecessary fast-clear to "
1263                           "define an MCS buffer.");
1264          }
1265 
1266          /* If will_full_fast_clear is set, the caller promises to fast-clear
1267           * the largest portion of the specified range as it can.
1268           */
1269          if (will_full_fast_clear)
1270             return;
1271 
1272          assert(base_level == 0 && level_count == 1);
1273          anv_image_mcs_op(cmd_buffer, image,
1274                           image->planes[plane].surface.isl.format,
1275                           ISL_SWIZZLE_IDENTITY,
1276                           aspect, base_layer, layer_count,
1277                           ISL_AUX_OP_FAST_CLEAR, NULL, false);
1278       }
1279       return;
1280    }
1281 
1282    const enum isl_aux_usage initial_aux_usage =
1283       anv_layout_to_aux_usage(devinfo, image, aspect, 0, initial_layout);
1284    const enum isl_aux_usage final_aux_usage =
1285       anv_layout_to_aux_usage(devinfo, image, aspect, 0, final_layout);
1286 
1287    /* The current code assumes that there is no mixing of CCS_E and CCS_D.
1288     * We can handle transitions between CCS_D/E to and from NONE.  What we
1289     * don't yet handle is switching between CCS_E and CCS_D within a given
1290     * image.  Doing so in a performant way requires more detailed aux state
1291     * tracking such as what is done in i965.  For now, just assume that we
1292     * only have one type of compression.
1293     */
1294    assert(initial_aux_usage == ISL_AUX_USAGE_NONE ||
1295           final_aux_usage == ISL_AUX_USAGE_NONE ||
1296           initial_aux_usage == final_aux_usage);
1297 
1298    /* If initial aux usage is NONE, there is nothing to resolve */
1299    if (initial_aux_usage == ISL_AUX_USAGE_NONE)
1300       return;
1301 
1302    enum isl_aux_op resolve_op = ISL_AUX_OP_NONE;
1303 
1304    /* If the initial layout supports more fast clear than the final layout
1305     * then we need at least a partial resolve.
1306     */
1307    const enum anv_fast_clear_type initial_fast_clear =
1308       anv_layout_to_fast_clear_type(devinfo, image, aspect, initial_layout);
1309    const enum anv_fast_clear_type final_fast_clear =
1310       anv_layout_to_fast_clear_type(devinfo, image, aspect, final_layout);
1311    if (final_fast_clear < initial_fast_clear)
1312       resolve_op = ISL_AUX_OP_PARTIAL_RESOLVE;
1313 
1314    if (initial_aux_usage == ISL_AUX_USAGE_CCS_E &&
1315        final_aux_usage != ISL_AUX_USAGE_CCS_E)
1316       resolve_op = ISL_AUX_OP_FULL_RESOLVE;
1317 
1318    if (resolve_op == ISL_AUX_OP_NONE)
1319       return;
1320 
1321    /* Perform a resolve to synchronize data between the main and aux buffer.
1322     * Before we begin, we must satisfy the cache flushing requirement specified
1323     * in the Sky Lake PRM Vol. 7, "MCS Buffer for Render Target(s)":
1324     *
1325     *    Any transition from any value in {Clear, Render, Resolve} to a
1326     *    different value in {Clear, Render, Resolve} requires end of pipe
1327     *    synchronization.
1328     *
1329     * We perform a flush of the write cache before and after the clear and
1330     * resolve operations to meet this requirement.
1331     *
1332     * Unlike other drawing, fast clear operations are not properly
1333     * synchronized. The first PIPE_CONTROL here likely ensures that the
1334     * contents of the previous render or clear hit the render target before we
1335     * resolve and the second likely ensures that the resolve is complete before
1336     * we do any more rendering or clearing.
1337     */
1338    cmd_buffer->state.pending_pipe_bits |=
1339       ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_END_OF_PIPE_SYNC_BIT;
1340 
1341    for (uint32_t l = 0; l < level_count; l++) {
1342       uint32_t level = base_level + l;
1343 
1344       uint32_t aux_layers = anv_image_aux_layers(image, aspect, level);
1345       if (base_layer >= aux_layers)
1346          break; /* We will only get fewer layers as level increases */
1347       uint32_t level_layer_count =
1348          MIN2(layer_count, aux_layers - base_layer);
1349 
1350       for (uint32_t a = 0; a < level_layer_count; a++) {
1351          uint32_t array_layer = base_layer + a;
1352 
1353          /* If will_full_fast_clear is set, the caller promises to fast-clear
1354           * the largest portion of the specified range as it can.  For color
1355           * images, that means only the first LOD and array slice.
1356           */
1357          if (level == 0 && array_layer == 0 && will_full_fast_clear)
1358             continue;
1359 
1360          if (image->samples == 1) {
1361             anv_cmd_predicated_ccs_resolve(cmd_buffer, image,
1362                                            image->planes[plane].surface.isl.format,
1363                                            ISL_SWIZZLE_IDENTITY,
1364                                            aspect, level, array_layer, resolve_op,
1365                                            final_fast_clear);
1366          } else {
1367             /* We only support fast-clear on the first layer so partial
1368              * resolves should not be used on other layers as they will use
1369              * the clear color stored in memory that is only valid for layer0.
1370              */
1371             if (resolve_op == ISL_AUX_OP_PARTIAL_RESOLVE &&
1372                 array_layer != 0)
1373                continue;
1374 
1375             anv_cmd_predicated_mcs_resolve(cmd_buffer, image,
1376                                            image->planes[plane].surface.isl.format,
1377                                            ISL_SWIZZLE_IDENTITY,
1378                                            aspect, array_layer, resolve_op,
1379                                            final_fast_clear);
1380          }
1381       }
1382    }
1383 
1384    cmd_buffer->state.pending_pipe_bits |=
1385       ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT | ANV_PIPE_END_OF_PIPE_SYNC_BIT;
1386 }
1387 
1388 static VkResult
genX(cmd_buffer_setup_attachments)1389 genX(cmd_buffer_setup_attachments)(struct anv_cmd_buffer *cmd_buffer,
1390                                    const struct anv_render_pass *pass,
1391                                    const struct anv_framebuffer *framebuffer,
1392                                    const VkRenderPassBeginInfo *begin)
1393 {
1394    struct anv_cmd_state *state = &cmd_buffer->state;
1395 
1396    vk_free(&cmd_buffer->pool->alloc, state->attachments);
1397 
1398    if (pass->attachment_count > 0) {
1399       state->attachments = vk_zalloc(&cmd_buffer->pool->alloc,
1400                                      pass->attachment_count *
1401                                           sizeof(state->attachments[0]),
1402                                      8, VK_SYSTEM_ALLOCATION_SCOPE_OBJECT);
1403       if (state->attachments == NULL) {
1404          /* Propagate VK_ERROR_OUT_OF_HOST_MEMORY to vkEndCommandBuffer */
1405          return anv_batch_set_error(&cmd_buffer->batch,
1406                                     VK_ERROR_OUT_OF_HOST_MEMORY);
1407       }
1408    } else {
1409       state->attachments = NULL;
1410    }
1411 
1412    const VkRenderPassAttachmentBeginInfoKHR *attach_begin =
1413       vk_find_struct_const(begin, RENDER_PASS_ATTACHMENT_BEGIN_INFO_KHR);
1414    if (begin && !attach_begin)
1415       assert(pass->attachment_count == framebuffer->attachment_count);
1416 
1417    for (uint32_t i = 0; i < pass->attachment_count; ++i) {
1418       if (attach_begin && attach_begin->attachmentCount != 0) {
1419          assert(attach_begin->attachmentCount == pass->attachment_count);
1420          ANV_FROM_HANDLE(anv_image_view, iview, attach_begin->pAttachments[i]);
1421          state->attachments[i].image_view = iview;
1422       } else if (framebuffer && i < framebuffer->attachment_count) {
1423          state->attachments[i].image_view = framebuffer->attachments[i];
1424       } else {
1425          state->attachments[i].image_view = NULL;
1426       }
1427    }
1428 
1429    if (begin) {
1430       for (uint32_t i = 0; i < pass->attachment_count; ++i) {
1431          const struct anv_render_pass_attachment *pass_att = &pass->attachments[i];
1432          struct anv_attachment_state *att_state = &state->attachments[i];
1433          VkImageAspectFlags att_aspects = vk_format_aspects(pass_att->format);
1434          VkImageAspectFlags clear_aspects = 0;
1435          VkImageAspectFlags load_aspects = 0;
1436 
1437          if (att_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1438             /* color attachment */
1439             if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1440                clear_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1441             } else if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
1442                load_aspects |= VK_IMAGE_ASPECT_COLOR_BIT;
1443             }
1444          } else {
1445             /* depthstencil attachment */
1446             if (att_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
1447                if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1448                   clear_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1449                } else if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
1450                   load_aspects |= VK_IMAGE_ASPECT_DEPTH_BIT;
1451                }
1452             }
1453             if (att_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
1454                if (pass_att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_CLEAR) {
1455                   clear_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1456                } else if (pass_att->stencil_load_op == VK_ATTACHMENT_LOAD_OP_LOAD) {
1457                   load_aspects |= VK_IMAGE_ASPECT_STENCIL_BIT;
1458                }
1459             }
1460          }
1461 
1462          att_state->current_layout = pass_att->initial_layout;
1463          att_state->current_stencil_layout = pass_att->stencil_initial_layout;
1464          att_state->pending_clear_aspects = clear_aspects;
1465          att_state->pending_load_aspects = load_aspects;
1466          if (clear_aspects)
1467             att_state->clear_value = begin->pClearValues[i];
1468 
1469          struct anv_image_view *iview = state->attachments[i].image_view;
1470          anv_assert(iview->vk_format == pass_att->format);
1471 
1472          const uint32_t num_layers = iview->planes[0].isl.array_len;
1473          att_state->pending_clear_views = (1 << num_layers) - 1;
1474 
1475          /* This will be initialized after the first subpass transition. */
1476          att_state->aux_usage = ISL_AUX_USAGE_NONE;
1477 
1478          att_state->fast_clear = false;
1479          if (clear_aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
1480             assert(clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
1481             att_state->fast_clear =
1482                anv_can_fast_clear_color_view(cmd_buffer->device, iview,
1483                                              pass_att->first_subpass_layout,
1484                                              vk_to_isl_color(att_state->clear_value.color),
1485                                              framebuffer->layers,
1486                                              begin->renderArea);
1487          } else if (clear_aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
1488                                      VK_IMAGE_ASPECT_STENCIL_BIT)) {
1489             att_state->fast_clear =
1490                anv_can_hiz_clear_ds_view(cmd_buffer->device, iview,
1491                                          pass_att->first_subpass_layout,
1492                                          clear_aspects,
1493                                          att_state->clear_value.depthStencil.depth,
1494                                          begin->renderArea);
1495          }
1496       }
1497    }
1498 
1499    return VK_SUCCESS;
1500 }
1501 
1502 /**
1503  * Setup anv_cmd_state::attachments for vkCmdBeginRenderPass.
1504  */
1505 static VkResult
genX(cmd_buffer_alloc_att_surf_states)1506 genX(cmd_buffer_alloc_att_surf_states)(struct anv_cmd_buffer *cmd_buffer,
1507                                        const struct anv_render_pass *pass,
1508                                        const struct anv_subpass *subpass)
1509 {
1510    const struct isl_device *isl_dev = &cmd_buffer->device->isl_dev;
1511    struct anv_cmd_state *state = &cmd_buffer->state;
1512 
1513    /* Reserve one for the NULL state. */
1514    unsigned num_states = 1;
1515    for (uint32_t i = 0; i < subpass->attachment_count; i++) {
1516       uint32_t att = subpass->attachments[i].attachment;
1517       if (att == VK_ATTACHMENT_UNUSED)
1518          continue;
1519 
1520       assert(att < pass->attachment_count);
1521       if (!vk_format_is_color(pass->attachments[att].format))
1522          continue;
1523 
1524       const VkImageUsageFlagBits att_usage = subpass->attachments[i].usage;
1525       assert(util_bitcount(att_usage) == 1);
1526 
1527       if (att_usage == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT ||
1528           att_usage == VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
1529          num_states++;
1530    }
1531 
1532    const uint32_t ss_stride = align_u32(isl_dev->ss.size, isl_dev->ss.align);
1533    state->attachment_states =
1534       anv_state_stream_alloc(&cmd_buffer->surface_state_stream,
1535                              num_states * ss_stride, isl_dev->ss.align);
1536    if (state->attachment_states.map == NULL) {
1537       return anv_batch_set_error(&cmd_buffer->batch,
1538                                  VK_ERROR_OUT_OF_DEVICE_MEMORY);
1539    }
1540 
1541    struct anv_state next_state = state->attachment_states;
1542    next_state.alloc_size = isl_dev->ss.size;
1543 
1544    state->null_surface_state = next_state;
1545    next_state.offset += ss_stride;
1546    next_state.map += ss_stride;
1547 
1548    for (uint32_t i = 0; i < subpass->attachment_count; i++) {
1549       uint32_t att = subpass->attachments[i].attachment;
1550       if (att == VK_ATTACHMENT_UNUSED)
1551          continue;
1552 
1553       assert(att < pass->attachment_count);
1554       if (!vk_format_is_color(pass->attachments[att].format))
1555          continue;
1556 
1557       const VkImageUsageFlagBits att_usage = subpass->attachments[i].usage;
1558       assert(util_bitcount(att_usage) == 1);
1559 
1560       if (att_usage == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)
1561          state->attachments[att].color.state = next_state;
1562       else if (att_usage == VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)
1563          state->attachments[att].input.state = next_state;
1564       else
1565          continue;
1566 
1567       state->attachments[att].color.state = next_state;
1568       next_state.offset += ss_stride;
1569       next_state.map += ss_stride;
1570    }
1571 
1572    assert(next_state.offset == state->attachment_states.offset +
1573                                state->attachment_states.alloc_size);
1574 
1575    return VK_SUCCESS;
1576 }
1577 
1578 VkResult
genX(BeginCommandBuffer)1579 genX(BeginCommandBuffer)(
1580     VkCommandBuffer                             commandBuffer,
1581     const VkCommandBufferBeginInfo*             pBeginInfo)
1582 {
1583    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1584 
1585    /* If this is the first vkBeginCommandBuffer, we must *initialize* the
1586     * command buffer's state. Otherwise, we must *reset* its state. In both
1587     * cases we reset it.
1588     *
1589     * From the Vulkan 1.0 spec:
1590     *
1591     *    If a command buffer is in the executable state and the command buffer
1592     *    was allocated from a command pool with the
1593     *    VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT flag set, then
1594     *    vkBeginCommandBuffer implicitly resets the command buffer, behaving
1595     *    as if vkResetCommandBuffer had been called with
1596     *    VK_COMMAND_BUFFER_RESET_RELEASE_RESOURCES_BIT not set. It then puts
1597     *    the command buffer in the recording state.
1598     */
1599    anv_cmd_buffer_reset(cmd_buffer);
1600 
1601    cmd_buffer->usage_flags = pBeginInfo->flags;
1602 
1603    /* VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT must be ignored for
1604     * primary level command buffers.
1605     *
1606     * From the Vulkan 1.0 spec:
1607     *
1608     *    VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT specifies that a
1609     *    secondary command buffer is considered to be entirely inside a render
1610     *    pass. If this is a primary command buffer, then this bit is ignored.
1611     */
1612    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY)
1613       cmd_buffer->usage_flags &= ~VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT;
1614 
1615    genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
1616 
1617    /* We sometimes store vertex data in the dynamic state buffer for blorp
1618     * operations and our dynamic state stream may re-use data from previous
1619     * command buffers.  In order to prevent stale cache data, we flush the VF
1620     * cache.  We could do this on every blorp call but that's not really
1621     * needed as all of the data will get written by the CPU prior to the GPU
1622     * executing anything.  The chances are fairly high that they will use
1623     * blorp at least once per primary command buffer so it shouldn't be
1624     * wasted.
1625     *
1626     * There is also a workaround on gen8 which requires us to invalidate the
1627     * VF cache occasionally.  It's easier if we can assume we start with a
1628     * fresh cache (See also genX(cmd_buffer_set_binding_for_gen8_vb_flush).)
1629     */
1630    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
1631 
1632    /* Re-emit the aux table register in every command buffer.  This way we're
1633     * ensured that we have the table even if this command buffer doesn't
1634     * initialize any images.
1635     */
1636    if (cmd_buffer->device->info.has_aux_map)
1637       cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_AUX_TABLE_INVALIDATE_BIT;
1638 
1639    /* We send an "Indirect State Pointers Disable" packet at
1640     * EndCommandBuffer, so all push contant packets are ignored during a
1641     * context restore. Documentation says after that command, we need to
1642     * emit push constants again before any rendering operation. So we
1643     * flag them dirty here to make sure they get emitted.
1644     */
1645    cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
1646 
1647    VkResult result = VK_SUCCESS;
1648    if (cmd_buffer->usage_flags &
1649        VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1650       assert(pBeginInfo->pInheritanceInfo);
1651       ANV_FROM_HANDLE(anv_render_pass, pass,
1652                       pBeginInfo->pInheritanceInfo->renderPass);
1653       struct anv_subpass *subpass =
1654          &pass->subpasses[pBeginInfo->pInheritanceInfo->subpass];
1655       ANV_FROM_HANDLE(anv_framebuffer, framebuffer,
1656                       pBeginInfo->pInheritanceInfo->framebuffer);
1657 
1658       cmd_buffer->state.pass = pass;
1659       cmd_buffer->state.subpass = subpass;
1660 
1661       /* This is optional in the inheritance info. */
1662       cmd_buffer->state.framebuffer = framebuffer;
1663 
1664       result = genX(cmd_buffer_setup_attachments)(cmd_buffer, pass,
1665                                                   framebuffer, NULL);
1666       if (result != VK_SUCCESS)
1667          return result;
1668 
1669       result = genX(cmd_buffer_alloc_att_surf_states)(cmd_buffer, pass,
1670                                                       subpass);
1671       if (result != VK_SUCCESS)
1672          return result;
1673 
1674       /* Record that HiZ is enabled if we can. */
1675       if (cmd_buffer->state.framebuffer) {
1676          const struct anv_image_view * const iview =
1677             anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
1678 
1679          if (iview) {
1680             VkImageLayout layout =
1681                 cmd_buffer->state.subpass->depth_stencil_attachment->layout;
1682 
1683             enum isl_aux_usage aux_usage =
1684                anv_layout_to_aux_usage(&cmd_buffer->device->info, iview->image,
1685                                        VK_IMAGE_ASPECT_DEPTH_BIT,
1686                                        VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
1687                                        layout);
1688 
1689             cmd_buffer->state.hiz_enabled = isl_aux_usage_has_hiz(aux_usage);
1690          }
1691       }
1692 
1693       cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
1694    }
1695 
1696 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1697    if (cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY) {
1698       const VkCommandBufferInheritanceConditionalRenderingInfoEXT *conditional_rendering_info =
1699          vk_find_struct_const(pBeginInfo->pInheritanceInfo->pNext, COMMAND_BUFFER_INHERITANCE_CONDITIONAL_RENDERING_INFO_EXT);
1700 
1701       /* If secondary buffer supports conditional rendering
1702        * we should emit commands as if conditional rendering is enabled.
1703        */
1704       cmd_buffer->state.conditional_render_enabled =
1705          conditional_rendering_info && conditional_rendering_info->conditionalRenderingEnable;
1706    }
1707 #endif
1708 
1709    return result;
1710 }
1711 
1712 /* From the PRM, Volume 2a:
1713  *
1714  *    "Indirect State Pointers Disable
1715  *
1716  *    At the completion of the post-sync operation associated with this pipe
1717  *    control packet, the indirect state pointers in the hardware are
1718  *    considered invalid; the indirect pointers are not saved in the context.
1719  *    If any new indirect state commands are executed in the command stream
1720  *    while the pipe control is pending, the new indirect state commands are
1721  *    preserved.
1722  *
1723  *    [DevIVB+]: Using Invalidate State Pointer (ISP) only inhibits context
1724  *    restoring of Push Constant (3DSTATE_CONSTANT_*) commands. Push Constant
1725  *    commands are only considered as Indirect State Pointers. Once ISP is
1726  *    issued in a context, SW must initialize by programming push constant
1727  *    commands for all the shaders (at least to zero length) before attempting
1728  *    any rendering operation for the same context."
1729  *
1730  * 3DSTATE_CONSTANT_* packets are restored during a context restore,
1731  * even though they point to a BO that has been already unreferenced at
1732  * the end of the previous batch buffer. This has been fine so far since
1733  * we are protected by these scratch page (every address not covered by
1734  * a BO should be pointing to the scratch page). But on CNL, it is
1735  * causing a GPU hang during context restore at the 3DSTATE_CONSTANT_*
1736  * instruction.
1737  *
1738  * The flag "Indirect State Pointers Disable" in PIPE_CONTROL tells the
1739  * hardware to ignore previous 3DSTATE_CONSTANT_* packets during a
1740  * context restore, so the mentioned hang doesn't happen. However,
1741  * software must program push constant commands for all stages prior to
1742  * rendering anything. So we flag them dirty in BeginCommandBuffer.
1743  *
1744  * Finally, we also make sure to stall at pixel scoreboard to make sure the
1745  * constants have been loaded into the EUs prior to disable the push constants
1746  * so that it doesn't hang a previous 3DPRIMITIVE.
1747  */
1748 static void
emit_isp_disable(struct anv_cmd_buffer * cmd_buffer)1749 emit_isp_disable(struct anv_cmd_buffer *cmd_buffer)
1750 {
1751    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1752          pc.StallAtPixelScoreboard = true;
1753          pc.CommandStreamerStallEnable = true;
1754    }
1755    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1756          pc.IndirectStatePointersDisable = true;
1757          pc.CommandStreamerStallEnable = true;
1758    }
1759 }
1760 
1761 VkResult
genX(EndCommandBuffer)1762 genX(EndCommandBuffer)(
1763     VkCommandBuffer                             commandBuffer)
1764 {
1765    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
1766 
1767    if (anv_batch_has_error(&cmd_buffer->batch))
1768       return cmd_buffer->batch.status;
1769 
1770    /* We want every command buffer to start with the PMA fix in a known state,
1771     * so we disable it at the end of the command buffer.
1772     */
1773    genX(cmd_buffer_enable_pma_fix)(cmd_buffer, false);
1774 
1775    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
1776 
1777    emit_isp_disable(cmd_buffer);
1778 
1779    anv_cmd_buffer_end_batch_buffer(cmd_buffer);
1780 
1781    return VK_SUCCESS;
1782 }
1783 
1784 void
genX(CmdExecuteCommands)1785 genX(CmdExecuteCommands)(
1786     VkCommandBuffer                             commandBuffer,
1787     uint32_t                                    commandBufferCount,
1788     const VkCommandBuffer*                      pCmdBuffers)
1789 {
1790    ANV_FROM_HANDLE(anv_cmd_buffer, primary, commandBuffer);
1791 
1792    assert(primary->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
1793 
1794    if (anv_batch_has_error(&primary->batch))
1795       return;
1796 
1797    /* The secondary command buffers will assume that the PMA fix is disabled
1798     * when they begin executing.  Make sure this is true.
1799     */
1800    genX(cmd_buffer_enable_pma_fix)(primary, false);
1801 
1802    /* The secondary command buffer doesn't know which textures etc. have been
1803     * flushed prior to their execution.  Apply those flushes now.
1804     */
1805    genX(cmd_buffer_apply_pipe_flushes)(primary);
1806 
1807    for (uint32_t i = 0; i < commandBufferCount; i++) {
1808       ANV_FROM_HANDLE(anv_cmd_buffer, secondary, pCmdBuffers[i]);
1809 
1810       assert(secondary->level == VK_COMMAND_BUFFER_LEVEL_SECONDARY);
1811       assert(!anv_batch_has_error(&secondary->batch));
1812 
1813 #if GEN_GEN >= 8 || GEN_IS_HASWELL
1814       if (secondary->state.conditional_render_enabled) {
1815          if (!primary->state.conditional_render_enabled) {
1816             /* Secondary buffer is constructed as if it will be executed
1817              * with conditional rendering, we should satisfy this dependency
1818              * regardless of conditional rendering being enabled in primary.
1819              */
1820             struct gen_mi_builder b;
1821             gen_mi_builder_init(&b, &primary->batch);
1822             gen_mi_store(&b, gen_mi_reg64(ANV_PREDICATE_RESULT_REG),
1823                              gen_mi_imm(UINT64_MAX));
1824          }
1825       }
1826 #endif
1827 
1828       if (secondary->usage_flags &
1829           VK_COMMAND_BUFFER_USAGE_RENDER_PASS_CONTINUE_BIT) {
1830          /* If we're continuing a render pass from the primary, we need to
1831           * copy the surface states for the current subpass into the storage
1832           * we allocated for them in BeginCommandBuffer.
1833           */
1834          struct anv_bo *ss_bo =
1835             primary->device->surface_state_pool.block_pool.bo;
1836          struct anv_state src_state = primary->state.attachment_states;
1837          struct anv_state dst_state = secondary->state.attachment_states;
1838          assert(src_state.alloc_size == dst_state.alloc_size);
1839 
1840          genX(cmd_buffer_so_memcpy)(primary,
1841                                     (struct anv_address) {
1842                                        .bo = ss_bo,
1843                                        .offset = dst_state.offset,
1844                                     },
1845                                     (struct anv_address) {
1846                                        .bo = ss_bo,
1847                                        .offset = src_state.offset,
1848                                     },
1849                                     src_state.alloc_size);
1850       }
1851 
1852       anv_cmd_buffer_add_secondary(primary, secondary);
1853 
1854       assert(secondary->perf_query_pool == NULL || primary->perf_query_pool == NULL ||
1855              secondary->perf_query_pool == primary->perf_query_pool);
1856       if (secondary->perf_query_pool)
1857          primary->perf_query_pool = secondary->perf_query_pool;
1858    }
1859 
1860    /* The secondary isn't counted in our VF cache tracking so we need to
1861     * invalidate the whole thing.
1862     */
1863    if (GEN_GEN >= 8 && GEN_GEN <= 9) {
1864       primary->state.pending_pipe_bits |=
1865          ANV_PIPE_CS_STALL_BIT | ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
1866    }
1867 
1868    /* The secondary may have selected a different pipeline (3D or compute) and
1869     * may have changed the current L3$ configuration.  Reset our tracking
1870     * variables to invalid values to ensure that we re-emit these in the case
1871     * where we do any draws or compute dispatches from the primary after the
1872     * secondary has returned.
1873     */
1874    primary->state.current_pipeline = UINT32_MAX;
1875    primary->state.current_l3_config = NULL;
1876    primary->state.current_hash_scale = 0;
1877 
1878    /* Each of the secondary command buffers will use its own state base
1879     * address.  We need to re-emit state base address for the primary after
1880     * all of the secondaries are done.
1881     *
1882     * TODO: Maybe we want to make this a dirty bit to avoid extra state base
1883     * address calls?
1884     */
1885    genX(cmd_buffer_emit_state_base_address)(primary);
1886 }
1887 
1888 #define IVB_L3SQCREG1_SQGHPCI_DEFAULT     0x00730000
1889 #define VLV_L3SQCREG1_SQGHPCI_DEFAULT     0x00d30000
1890 #define HSW_L3SQCREG1_SQGHPCI_DEFAULT     0x00610000
1891 
1892 /**
1893  * Program the hardware to use the specified L3 configuration.
1894  */
1895 void
genX(cmd_buffer_config_l3)1896 genX(cmd_buffer_config_l3)(struct anv_cmd_buffer *cmd_buffer,
1897                            const struct gen_l3_config *cfg)
1898 {
1899    assert(cfg || GEN_GEN >= 12);
1900    if (cfg == cmd_buffer->state.current_l3_config)
1901       return;
1902 
1903    if (INTEL_DEBUG & DEBUG_L3) {
1904       mesa_logd("L3 config transition: ");
1905       gen_dump_l3_config(cfg, stderr);
1906    }
1907 
1908    UNUSED const bool has_slm = cfg->n[GEN_L3P_SLM];
1909 
1910    /* According to the hardware docs, the L3 partitioning can only be changed
1911     * while the pipeline is completely drained and the caches are flushed,
1912     * which involves a first PIPE_CONTROL flush which stalls the pipeline...
1913     */
1914    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1915       pc.DCFlushEnable = true;
1916       pc.PostSyncOperation = NoWrite;
1917       pc.CommandStreamerStallEnable = true;
1918    }
1919 
1920    /* ...followed by a second pipelined PIPE_CONTROL that initiates
1921     * invalidation of the relevant caches.  Note that because RO invalidation
1922     * happens at the top of the pipeline (i.e. right away as the PIPE_CONTROL
1923     * command is processed by the CS) we cannot combine it with the previous
1924     * stalling flush as the hardware documentation suggests, because that
1925     * would cause the CS to stall on previous rendering *after* RO
1926     * invalidation and wouldn't prevent the RO caches from being polluted by
1927     * concurrent rendering before the stall completes.  This intentionally
1928     * doesn't implement the SKL+ hardware workaround suggesting to enable CS
1929     * stall on PIPE_CONTROLs with the texture cache invalidation bit set for
1930     * GPGPU workloads because the previous and subsequent PIPE_CONTROLs
1931     * already guarantee that there is no concurrent GPGPU kernel execution
1932     * (see SKL HSD 2132585).
1933     */
1934    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1935       pc.TextureCacheInvalidationEnable = true;
1936       pc.ConstantCacheInvalidationEnable = true;
1937       pc.InstructionCacheInvalidateEnable = true;
1938       pc.StateCacheInvalidationEnable = true;
1939       pc.PostSyncOperation = NoWrite;
1940    }
1941 
1942    /* Now send a third stalling flush to make sure that invalidation is
1943     * complete when the L3 configuration registers are modified.
1944     */
1945    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
1946       pc.DCFlushEnable = true;
1947       pc.PostSyncOperation = NoWrite;
1948       pc.CommandStreamerStallEnable = true;
1949    }
1950 
1951 #if GEN_GEN >= 8
1952 
1953    assert(!cfg->n[GEN_L3P_IS] && !cfg->n[GEN_L3P_C] && !cfg->n[GEN_L3P_T]);
1954 
1955 #if GEN_GEN >= 12
1956 #define L3_ALLOCATION_REG GENX(L3ALLOC)
1957 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
1958 #else
1959 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
1960 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
1961 #endif
1962 
1963    uint32_t l3cr;
1964    anv_pack_struct(&l3cr, L3_ALLOCATION_REG,
1965 #if GEN_GEN < 11
1966                    .SLMEnable = has_slm,
1967 #endif
1968 #if GEN_GEN == 11
1969    /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
1970     * in L3CNTLREG register. The default setting of the bit is not the
1971     * desirable behavior.
1972    */
1973                    .ErrorDetectionBehaviorControl = true,
1974                    .UseFullWays = true,
1975 #endif
1976                    .URBAllocation = cfg->n[GEN_L3P_URB],
1977                    .ROAllocation = cfg->n[GEN_L3P_RO],
1978                    .DCAllocation = cfg->n[GEN_L3P_DC],
1979                    .AllAllocation = cfg->n[GEN_L3P_ALL]);
1980 
1981    /* Set up the L3 partitioning. */
1982    emit_lri(&cmd_buffer->batch, L3_ALLOCATION_REG_num, l3cr);
1983 
1984 #else
1985 
1986    const bool has_dc = cfg->n[GEN_L3P_DC] || cfg->n[GEN_L3P_ALL];
1987    const bool has_is = cfg->n[GEN_L3P_IS] || cfg->n[GEN_L3P_RO] ||
1988                        cfg->n[GEN_L3P_ALL];
1989    const bool has_c = cfg->n[GEN_L3P_C] || cfg->n[GEN_L3P_RO] ||
1990                       cfg->n[GEN_L3P_ALL];
1991    const bool has_t = cfg->n[GEN_L3P_T] || cfg->n[GEN_L3P_RO] ||
1992                       cfg->n[GEN_L3P_ALL];
1993 
1994    assert(!cfg->n[GEN_L3P_ALL]);
1995 
1996    /* When enabled SLM only uses a portion of the L3 on half of the banks,
1997     * the matching space on the remaining banks has to be allocated to a
1998     * client (URB for all validated configurations) set to the
1999     * lower-bandwidth 2-bank address hashing mode.
2000     */
2001    const struct gen_device_info *devinfo = &cmd_buffer->device->info;
2002    const bool urb_low_bw = has_slm && !devinfo->is_baytrail;
2003    assert(!urb_low_bw || cfg->n[GEN_L3P_URB] == cfg->n[GEN_L3P_SLM]);
2004 
2005    /* Minimum number of ways that can be allocated to the URB. */
2006    const unsigned n0_urb = devinfo->is_baytrail ? 32 : 0;
2007    assert(cfg->n[GEN_L3P_URB] >= n0_urb);
2008 
2009    uint32_t l3sqcr1, l3cr2, l3cr3;
2010    anv_pack_struct(&l3sqcr1, GENX(L3SQCREG1),
2011                    .ConvertDC_UC = !has_dc,
2012                    .ConvertIS_UC = !has_is,
2013                    .ConvertC_UC = !has_c,
2014                    .ConvertT_UC = !has_t);
2015    l3sqcr1 |=
2016       GEN_IS_HASWELL ? HSW_L3SQCREG1_SQGHPCI_DEFAULT :
2017       devinfo->is_baytrail ? VLV_L3SQCREG1_SQGHPCI_DEFAULT :
2018       IVB_L3SQCREG1_SQGHPCI_DEFAULT;
2019 
2020    anv_pack_struct(&l3cr2, GENX(L3CNTLREG2),
2021                    .SLMEnable = has_slm,
2022                    .URBLowBandwidth = urb_low_bw,
2023                    .URBAllocation = cfg->n[GEN_L3P_URB] - n0_urb,
2024 #if !GEN_IS_HASWELL
2025                    .ALLAllocation = cfg->n[GEN_L3P_ALL],
2026 #endif
2027                    .ROAllocation = cfg->n[GEN_L3P_RO],
2028                    .DCAllocation = cfg->n[GEN_L3P_DC]);
2029 
2030    anv_pack_struct(&l3cr3, GENX(L3CNTLREG3),
2031                    .ISAllocation = cfg->n[GEN_L3P_IS],
2032                    .ISLowBandwidth = 0,
2033                    .CAllocation = cfg->n[GEN_L3P_C],
2034                    .CLowBandwidth = 0,
2035                    .TAllocation = cfg->n[GEN_L3P_T],
2036                    .TLowBandwidth = 0);
2037 
2038    /* Set up the L3 partitioning. */
2039    emit_lri(&cmd_buffer->batch, GENX(L3SQCREG1_num), l3sqcr1);
2040    emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG2_num), l3cr2);
2041    emit_lri(&cmd_buffer->batch, GENX(L3CNTLREG3_num), l3cr3);
2042 
2043 #if GEN_IS_HASWELL
2044    if (cmd_buffer->device->physical->cmd_parser_version >= 4) {
2045       /* Enable L3 atomics on HSW if we have a DC partition, otherwise keep
2046        * them disabled to avoid crashing the system hard.
2047        */
2048       uint32_t scratch1, chicken3;
2049       anv_pack_struct(&scratch1, GENX(SCRATCH1),
2050                       .L3AtomicDisable = !has_dc);
2051       anv_pack_struct(&chicken3, GENX(CHICKEN3),
2052                       .L3AtomicDisableMask = true,
2053                       .L3AtomicDisable = !has_dc);
2054       emit_lri(&cmd_buffer->batch, GENX(SCRATCH1_num), scratch1);
2055       emit_lri(&cmd_buffer->batch, GENX(CHICKEN3_num), chicken3);
2056    }
2057 #endif
2058 
2059 #endif
2060 
2061    cmd_buffer->state.current_l3_config = cfg;
2062 }
2063 
2064 void
genX(cmd_buffer_apply_pipe_flushes)2065 genX(cmd_buffer_apply_pipe_flushes)(struct anv_cmd_buffer *cmd_buffer)
2066 {
2067    UNUSED const struct gen_device_info *devinfo = &cmd_buffer->device->info;
2068    enum anv_pipe_bits bits = cmd_buffer->state.pending_pipe_bits;
2069 
2070    if (cmd_buffer->device->physical->always_flush_cache)
2071       bits |= ANV_PIPE_FLUSH_BITS | ANV_PIPE_INVALIDATE_BITS;
2072 
2073    /*
2074     * From Sandybridge PRM, volume 2, "1.7.2 End-of-Pipe Synchronization":
2075     *
2076     *    Write synchronization is a special case of end-of-pipe
2077     *    synchronization that requires that the render cache and/or depth
2078     *    related caches are flushed to memory, where the data will become
2079     *    globally visible. This type of synchronization is required prior to
2080     *    SW (CPU) actually reading the result data from memory, or initiating
2081     *    an operation that will use as a read surface (such as a texture
2082     *    surface) a previous render target and/or depth/stencil buffer
2083     *
2084     *
2085     * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
2086     *
2087     *    Exercising the write cache flush bits (Render Target Cache Flush
2088     *    Enable, Depth Cache Flush Enable, DC Flush) in PIPE_CONTROL only
2089     *    ensures the write caches are flushed and doesn't guarantee the data
2090     *    is globally visible.
2091     *
2092     *    SW can track the completion of the end-of-pipe-synchronization by
2093     *    using "Notify Enable" and "PostSync Operation - Write Immediate
2094     *    Data" in the PIPE_CONTROL command.
2095     *
2096     * In other words, flushes are pipelined while invalidations are handled
2097     * immediately.  Therefore, if we're flushing anything then we need to
2098     * schedule an end-of-pipe sync before any invalidations can happen.
2099     */
2100    if (bits & ANV_PIPE_FLUSH_BITS)
2101       bits |= ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
2102 
2103 
2104    /* HSD 1209978178: docs say that before programming the aux table:
2105     *
2106     *    "Driver must ensure that the engine is IDLE but ensure it doesn't
2107     *    add extra flushes in the case it knows that the engine is already
2108     *    IDLE."
2109     */
2110    if (GEN_GEN == 12 && (bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT))
2111       bits |= ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
2112 
2113    /* If we're going to do an invalidate and we have a pending end-of-pipe
2114     * sync that has yet to be resolved, we do the end-of-pipe sync now.
2115     */
2116    if ((bits & ANV_PIPE_INVALIDATE_BITS) &&
2117        (bits & ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT)) {
2118       bits |= ANV_PIPE_END_OF_PIPE_SYNC_BIT;
2119       bits &= ~ANV_PIPE_NEEDS_END_OF_PIPE_SYNC_BIT;
2120    }
2121 
2122    if (GEN_GEN >= 12 &&
2123        ((bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT) ||
2124         (bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT))) {
2125       /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
2126        * Enable):
2127        *
2128        *    Unified Cache (Tile Cache Disabled):
2129        *
2130        *    When the Color and Depth (Z) streams are enabled to be cached in
2131        *    the DC space of L2, Software must use "Render Target Cache Flush
2132        *    Enable" and "Depth Cache Flush Enable" along with "Tile Cache
2133        *    Flush" for getting the color and depth (Z) write data to be
2134        *    globally observable.  In this mode of operation it is not required
2135        *    to set "CS Stall" upon setting "Tile Cache Flush" bit.
2136        */
2137       bits |= ANV_PIPE_TILE_CACHE_FLUSH_BIT;
2138    }
2139 
2140    /* GEN:BUG:1409226450, Wait for EU to be idle before pipe control which
2141     * invalidates the instruction cache
2142     */
2143    if (GEN_GEN == 12 && (bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT))
2144       bits |= ANV_PIPE_CS_STALL_BIT | ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
2145 
2146    if ((GEN_GEN >= 8 && GEN_GEN <= 9) &&
2147        (bits & ANV_PIPE_CS_STALL_BIT) &&
2148        (bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT)) {
2149       /* If we are doing a VF cache invalidate AND a CS stall (it must be
2150        * both) then we can reset our vertex cache tracking.
2151        */
2152       memset(cmd_buffer->state.gfx.vb_dirty_ranges, 0,
2153              sizeof(cmd_buffer->state.gfx.vb_dirty_ranges));
2154       memset(&cmd_buffer->state.gfx.ib_dirty_range, 0,
2155              sizeof(cmd_buffer->state.gfx.ib_dirty_range));
2156    }
2157 
2158    /* Project: SKL / Argument: LRI Post Sync Operation [23]
2159     *
2160     * "PIPECONTROL command with “Command Streamer Stall Enable” must be
2161     *  programmed prior to programming a PIPECONTROL command with "LRI
2162     *  Post Sync Operation" in GPGPU mode of operation (i.e when
2163     *  PIPELINE_SELECT command is set to GPGPU mode of operation)."
2164     *
2165     * The same text exists a few rows below for Post Sync Op.
2166     *
2167     * On Gen12 this is GEN:BUG:1607156449.
2168     */
2169    if (bits & ANV_PIPE_POST_SYNC_BIT) {
2170       if ((GEN_GEN == 9 || (GEN_GEN == 12 && devinfo->revision == 0 /* A0 */)) &&
2171           cmd_buffer->state.current_pipeline == GPGPU)
2172          bits |= ANV_PIPE_CS_STALL_BIT;
2173       bits &= ~ANV_PIPE_POST_SYNC_BIT;
2174    }
2175 
2176    if (bits & (ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT |
2177                ANV_PIPE_END_OF_PIPE_SYNC_BIT)) {
2178       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2179 #if GEN_GEN >= 12
2180          pipe.TileCacheFlushEnable = bits & ANV_PIPE_TILE_CACHE_FLUSH_BIT;
2181 #endif
2182          pipe.DepthCacheFlushEnable = bits & ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
2183          pipe.DCFlushEnable = bits & ANV_PIPE_DATA_CACHE_FLUSH_BIT;
2184          pipe.RenderTargetCacheFlushEnable =
2185             bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
2186 
2187          /* GEN:BUG:1409600907: "PIPE_CONTROL with Depth Stall Enable bit must
2188           * be set with any PIPE_CONTROL with Depth Flush Enable bit set.
2189           */
2190 #if GEN_GEN >= 12
2191          pipe.DepthStallEnable =
2192             pipe.DepthCacheFlushEnable || (bits & ANV_PIPE_DEPTH_STALL_BIT);
2193 #else
2194          pipe.DepthStallEnable = bits & ANV_PIPE_DEPTH_STALL_BIT;
2195 #endif
2196 
2197          pipe.CommandStreamerStallEnable = bits & ANV_PIPE_CS_STALL_BIT;
2198          pipe.StallAtPixelScoreboard = bits & ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
2199 
2200          /* From Sandybridge PRM, volume 2, "1.7.3.1 Writing a Value to Memory":
2201           *
2202           *    "The most common action to perform upon reaching a
2203           *    synchronization point is to write a value out to memory. An
2204           *    immediate value (included with the synchronization command) may
2205           *    be written."
2206           *
2207           *
2208           * From Broadwell PRM, volume 7, "End-of-Pipe Synchronization":
2209           *
2210           *    "In case the data flushed out by the render engine is to be
2211           *    read back in to the render engine in coherent manner, then the
2212           *    render engine has to wait for the fence completion before
2213           *    accessing the flushed data. This can be achieved by following
2214           *    means on various products: PIPE_CONTROL command with CS Stall
2215           *    and the required write caches flushed with Post-Sync-Operation
2216           *    as Write Immediate Data.
2217           *
2218           *    Example:
2219           *       - Workload-1 (3D/GPGPU/MEDIA)
2220           *       - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write
2221           *         Immediate Data, Required Write Cache Flush bits set)
2222           *       - Workload-2 (Can use the data produce or output by
2223           *         Workload-1)
2224           */
2225          if (bits & ANV_PIPE_END_OF_PIPE_SYNC_BIT) {
2226             pipe.CommandStreamerStallEnable = true;
2227             pipe.PostSyncOperation = WriteImmediateData;
2228             pipe.Address = cmd_buffer->device->workaround_address;
2229          }
2230 
2231          /*
2232           * According to the Broadwell documentation, any PIPE_CONTROL with the
2233           * "Command Streamer Stall" bit set must also have another bit set,
2234           * with five different options:
2235           *
2236           *  - Render Target Cache Flush
2237           *  - Depth Cache Flush
2238           *  - Stall at Pixel Scoreboard
2239           *  - Post-Sync Operation
2240           *  - Depth Stall
2241           *  - DC Flush Enable
2242           *
2243           * I chose "Stall at Pixel Scoreboard" since that's what we use in
2244           * mesa and it seems to work fine. The choice is fairly arbitrary.
2245           */
2246          if (pipe.CommandStreamerStallEnable &&
2247              !pipe.RenderTargetCacheFlushEnable &&
2248              !pipe.DepthCacheFlushEnable &&
2249              !pipe.StallAtPixelScoreboard &&
2250              !pipe.PostSyncOperation &&
2251              !pipe.DepthStallEnable &&
2252              !pipe.DCFlushEnable)
2253             pipe.StallAtPixelScoreboard = true;
2254       }
2255 
2256       /* If a render target flush was emitted, then we can toggle off the bit
2257        * saying that render target writes are ongoing.
2258        */
2259       if (bits & ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT)
2260          bits &= ~(ANV_PIPE_RENDER_TARGET_BUFFER_WRITES);
2261 
2262       if (GEN_IS_HASWELL) {
2263          /* Haswell needs addition work-arounds:
2264           *
2265           * From Haswell PRM, volume 2, part 1, "End-of-Pipe Synchronization":
2266           *
2267           *    Option 1:
2268           *    PIPE_CONTROL command with the CS Stall and the required write
2269           *    caches flushed with Post-SyncOperation as Write Immediate Data
2270           *    followed by eight dummy MI_STORE_DATA_IMM (write to scratch
2271           *    spce) commands.
2272           *
2273           *    Example:
2274           *       - Workload-1
2275           *       - PIPE_CONTROL (CS Stall, Post-Sync-Operation Write
2276           *         Immediate Data, Required Write Cache Flush bits set)
2277           *       - MI_STORE_DATA_IMM (8 times) (Dummy data, Scratch Address)
2278           *       - Workload-2 (Can use the data produce or output by
2279           *         Workload-1)
2280           *
2281           * Unfortunately, both the PRMs and the internal docs are a bit
2282           * out-of-date in this regard.  What the windows driver does (and
2283           * this appears to actually work) is to emit a register read from the
2284           * memory address written by the pipe control above.
2285           *
2286           * What register we load into doesn't matter.  We choose an indirect
2287           * rendering register because we know it always exists and it's one
2288           * of the first registers the command parser allows us to write.  If
2289           * you don't have command parser support in your kernel (pre-4.2),
2290           * this will get turned into MI_NOOP and you won't get the
2291           * workaround.  Unfortunately, there's just not much we can do in
2292           * that case.  This register is perfectly safe to write since we
2293           * always re-load all of the indirect draw registers right before
2294           * 3DPRIMITIVE when needed anyway.
2295           */
2296          anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
2297             lrm.RegisterAddress  = 0x243C; /* GEN7_3DPRIM_START_INSTANCE */
2298             lrm.MemoryAddress = cmd_buffer->device->workaround_address;
2299          }
2300       }
2301 
2302       bits &= ~(ANV_PIPE_FLUSH_BITS | ANV_PIPE_CS_STALL_BIT |
2303                 ANV_PIPE_END_OF_PIPE_SYNC_BIT);
2304    }
2305 
2306    if (bits & ANV_PIPE_INVALIDATE_BITS) {
2307       /* From the SKL PRM, Vol. 2a, "PIPE_CONTROL",
2308        *
2309        *    "If the VF Cache Invalidation Enable is set to a 1 in a
2310        *    PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields sets to
2311        *    0, with the VF Cache Invalidation Enable set to 0 needs to be sent
2312        *    prior to the PIPE_CONTROL with VF Cache Invalidation Enable set to
2313        *    a 1."
2314        *
2315        * This appears to hang Broadwell, so we restrict it to just gen9.
2316        */
2317       if (GEN_GEN == 9 && (bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT))
2318          anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe);
2319 
2320       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
2321          pipe.StateCacheInvalidationEnable =
2322             bits & ANV_PIPE_STATE_CACHE_INVALIDATE_BIT;
2323          pipe.ConstantCacheInvalidationEnable =
2324             bits & ANV_PIPE_CONSTANT_CACHE_INVALIDATE_BIT;
2325          pipe.VFCacheInvalidationEnable =
2326             bits & ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
2327          pipe.TextureCacheInvalidationEnable =
2328             bits & ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT;
2329          pipe.InstructionCacheInvalidateEnable =
2330             bits & ANV_PIPE_INSTRUCTION_CACHE_INVALIDATE_BIT;
2331 
2332          /* From the SKL PRM, Vol. 2a, "PIPE_CONTROL",
2333           *
2334           *    "When VF Cache Invalidate is set “Post Sync Operation” must be
2335           *    enabled to “Write Immediate Data” or “Write PS Depth Count” or
2336           *    “Write Timestamp”.
2337           */
2338          if (GEN_GEN == 9 && pipe.VFCacheInvalidationEnable) {
2339             pipe.PostSyncOperation = WriteImmediateData;
2340             pipe.Address = cmd_buffer->device->workaround_address;
2341          }
2342       }
2343 
2344 #if GEN_GEN == 12
2345       if ((bits & ANV_PIPE_AUX_TABLE_INVALIDATE_BIT) &&
2346           cmd_buffer->device->info.has_aux_map) {
2347          anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
2348             lri.RegisterOffset = GENX(GFX_CCS_AUX_INV_num);
2349             lri.DataDWord = 1;
2350          }
2351       }
2352 #endif
2353 
2354       bits &= ~ANV_PIPE_INVALIDATE_BITS;
2355    }
2356 
2357    cmd_buffer->state.pending_pipe_bits = bits;
2358 }
2359 
genX(CmdPipelineBarrier)2360 void genX(CmdPipelineBarrier)(
2361     VkCommandBuffer                             commandBuffer,
2362     VkPipelineStageFlags                        srcStageMask,
2363     VkPipelineStageFlags                        destStageMask,
2364     VkBool32                                    byRegion,
2365     uint32_t                                    memoryBarrierCount,
2366     const VkMemoryBarrier*                      pMemoryBarriers,
2367     uint32_t                                    bufferMemoryBarrierCount,
2368     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
2369     uint32_t                                    imageMemoryBarrierCount,
2370     const VkImageMemoryBarrier*                 pImageMemoryBarriers)
2371 {
2372    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
2373 
2374    /* XXX: Right now, we're really dumb and just flush whatever categories
2375     * the app asks for.  One of these days we may make this a bit better
2376     * but right now that's all the hardware allows for in most areas.
2377     */
2378    VkAccessFlags src_flags = 0;
2379    VkAccessFlags dst_flags = 0;
2380 
2381    for (uint32_t i = 0; i < memoryBarrierCount; i++) {
2382       src_flags |= pMemoryBarriers[i].srcAccessMask;
2383       dst_flags |= pMemoryBarriers[i].dstAccessMask;
2384    }
2385 
2386    for (uint32_t i = 0; i < bufferMemoryBarrierCount; i++) {
2387       src_flags |= pBufferMemoryBarriers[i].srcAccessMask;
2388       dst_flags |= pBufferMemoryBarriers[i].dstAccessMask;
2389    }
2390 
2391    for (uint32_t i = 0; i < imageMemoryBarrierCount; i++) {
2392       src_flags |= pImageMemoryBarriers[i].srcAccessMask;
2393       dst_flags |= pImageMemoryBarriers[i].dstAccessMask;
2394       ANV_FROM_HANDLE(anv_image, image, pImageMemoryBarriers[i].image);
2395       const VkImageSubresourceRange *range =
2396          &pImageMemoryBarriers[i].subresourceRange;
2397 
2398       uint32_t base_layer, layer_count;
2399       if (image->type == VK_IMAGE_TYPE_3D) {
2400          base_layer = 0;
2401          layer_count = anv_minify(image->extent.depth, range->baseMipLevel);
2402       } else {
2403          base_layer = range->baseArrayLayer;
2404          layer_count = anv_get_layerCount(image, range);
2405       }
2406 
2407       if (range->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) {
2408          transition_depth_buffer(cmd_buffer, image,
2409                                  base_layer, layer_count,
2410                                  pImageMemoryBarriers[i].oldLayout,
2411                                  pImageMemoryBarriers[i].newLayout,
2412                                  false /* will_full_fast_clear */);
2413       }
2414 
2415       if (range->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) {
2416          transition_stencil_buffer(cmd_buffer, image,
2417                                    range->baseMipLevel,
2418                                    anv_get_levelCount(image, range),
2419                                    base_layer, layer_count,
2420                                    pImageMemoryBarriers[i].oldLayout,
2421                                    pImageMemoryBarriers[i].newLayout,
2422                                    false /* will_full_fast_clear */);
2423       }
2424 
2425       if (range->aspectMask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
2426          VkImageAspectFlags color_aspects =
2427             anv_image_expand_aspects(image, range->aspectMask);
2428          uint32_t aspect_bit;
2429          anv_foreach_image_aspect_bit(aspect_bit, image, color_aspects) {
2430             transition_color_buffer(cmd_buffer, image, 1UL << aspect_bit,
2431                                     range->baseMipLevel,
2432                                     anv_get_levelCount(image, range),
2433                                     base_layer, layer_count,
2434                                     pImageMemoryBarriers[i].oldLayout,
2435                                     pImageMemoryBarriers[i].newLayout,
2436                                     false /* will_full_fast_clear */);
2437          }
2438       }
2439    }
2440 
2441    cmd_buffer->state.pending_pipe_bits |=
2442       anv_pipe_flush_bits_for_access_flags(cmd_buffer->device, src_flags) |
2443       anv_pipe_invalidate_bits_for_access_flags(cmd_buffer->device, dst_flags);
2444 }
2445 
2446 static void
cmd_buffer_alloc_push_constants(struct anv_cmd_buffer * cmd_buffer)2447 cmd_buffer_alloc_push_constants(struct anv_cmd_buffer *cmd_buffer)
2448 {
2449    VkShaderStageFlags stages =
2450       cmd_buffer->state.gfx.pipeline->active_stages;
2451 
2452    /* In order to avoid thrash, we assume that vertex and fragment stages
2453     * always exist.  In the rare case where one is missing *and* the other
2454     * uses push concstants, this may be suboptimal.  However, avoiding stalls
2455     * seems more important.
2456     */
2457    stages |= VK_SHADER_STAGE_FRAGMENT_BIT | VK_SHADER_STAGE_VERTEX_BIT;
2458 
2459    if (stages == cmd_buffer->state.gfx.push_constant_stages)
2460       return;
2461 
2462 #if GEN_GEN >= 8
2463    const unsigned push_constant_kb = 32;
2464 #elif GEN_IS_HASWELL
2465    const unsigned push_constant_kb = cmd_buffer->device->info.gt == 3 ? 32 : 16;
2466 #else
2467    const unsigned push_constant_kb = 16;
2468 #endif
2469 
2470    const unsigned num_stages =
2471       util_bitcount(stages & VK_SHADER_STAGE_ALL_GRAPHICS);
2472    unsigned size_per_stage = push_constant_kb / num_stages;
2473 
2474    /* Broadwell+ and Haswell gt3 require that the push constant sizes be in
2475     * units of 2KB.  Incidentally, these are the same platforms that have
2476     * 32KB worth of push constant space.
2477     */
2478    if (push_constant_kb == 32)
2479       size_per_stage &= ~1u;
2480 
2481    uint32_t kb_used = 0;
2482    for (int i = MESA_SHADER_VERTEX; i < MESA_SHADER_FRAGMENT; i++) {
2483       unsigned push_size = (stages & (1 << i)) ? size_per_stage : 0;
2484       anv_batch_emit(&cmd_buffer->batch,
2485                      GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
2486          alloc._3DCommandSubOpcode  = 18 + i;
2487          alloc.ConstantBufferOffset = (push_size > 0) ? kb_used : 0;
2488          alloc.ConstantBufferSize   = push_size;
2489       }
2490       kb_used += push_size;
2491    }
2492 
2493    anv_batch_emit(&cmd_buffer->batch,
2494                   GENX(3DSTATE_PUSH_CONSTANT_ALLOC_PS), alloc) {
2495       alloc.ConstantBufferOffset = kb_used;
2496       alloc.ConstantBufferSize = push_constant_kb - kb_used;
2497    }
2498 
2499    cmd_buffer->state.gfx.push_constant_stages = stages;
2500 
2501    /* From the BDW PRM for 3DSTATE_PUSH_CONSTANT_ALLOC_VS:
2502     *
2503     *    "The 3DSTATE_CONSTANT_VS must be reprogrammed prior to
2504     *    the next 3DPRIMITIVE command after programming the
2505     *    3DSTATE_PUSH_CONSTANT_ALLOC_VS"
2506     *
2507     * Since 3DSTATE_PUSH_CONSTANT_ALLOC_VS is programmed as part of
2508     * pipeline setup, we need to dirty push constants.
2509     */
2510    cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_ALL_GRAPHICS;
2511 }
2512 
2513 static struct anv_address
anv_descriptor_set_address(struct anv_cmd_buffer * cmd_buffer,struct anv_descriptor_set * set)2514 anv_descriptor_set_address(struct anv_cmd_buffer *cmd_buffer,
2515                            struct anv_descriptor_set *set)
2516 {
2517    if (set->pool) {
2518       /* This is a normal descriptor set */
2519       return (struct anv_address) {
2520          .bo = set->pool->bo,
2521          .offset = set->desc_mem.offset,
2522       };
2523    } else {
2524       /* This is a push descriptor set.  We have to flag it as used on the GPU
2525        * so that the next time we push descriptors, we grab a new memory.
2526        */
2527       struct anv_push_descriptor_set *push_set =
2528          (struct anv_push_descriptor_set *)set;
2529       push_set->set_used_on_gpu = true;
2530 
2531       return (struct anv_address) {
2532          .bo = cmd_buffer->dynamic_state_stream.state_pool->block_pool.bo,
2533          .offset = set->desc_mem.offset,
2534       };
2535    }
2536 }
2537 
2538 static VkResult
emit_binding_table(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,struct anv_shader_bin * shader,struct anv_state * bt_state)2539 emit_binding_table(struct anv_cmd_buffer *cmd_buffer,
2540                    struct anv_cmd_pipeline_state *pipe_state,
2541                    struct anv_shader_bin *shader,
2542                    struct anv_state *bt_state)
2543 {
2544    struct anv_subpass *subpass = cmd_buffer->state.subpass;
2545    uint32_t state_offset;
2546 
2547    struct anv_pipeline_bind_map *map = &shader->bind_map;
2548    if (map->surface_count == 0) {
2549       *bt_state = (struct anv_state) { 0, };
2550       return VK_SUCCESS;
2551    }
2552 
2553    *bt_state = anv_cmd_buffer_alloc_binding_table(cmd_buffer,
2554                                                   map->surface_count,
2555                                                   &state_offset);
2556    uint32_t *bt_map = bt_state->map;
2557 
2558    if (bt_state->map == NULL)
2559       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2560 
2561    /* We only need to emit relocs if we're not using softpin.  If we are using
2562     * softpin then we always keep all user-allocated memory objects resident.
2563     */
2564    const bool need_client_mem_relocs =
2565       !cmd_buffer->device->physical->use_softpin;
2566    struct anv_push_constants *push = &pipe_state->push_constants;
2567 
2568    for (uint32_t s = 0; s < map->surface_count; s++) {
2569       struct anv_pipeline_binding *binding = &map->surface_to_descriptor[s];
2570 
2571       struct anv_state surface_state;
2572 
2573       switch (binding->set) {
2574       case ANV_DESCRIPTOR_SET_NULL:
2575          bt_map[s] = 0;
2576          break;
2577 
2578       case ANV_DESCRIPTOR_SET_COLOR_ATTACHMENTS:
2579          /* Color attachment binding */
2580          assert(shader->stage == MESA_SHADER_FRAGMENT);
2581          if (binding->index < subpass->color_count) {
2582             const unsigned att =
2583                subpass->color_attachments[binding->index].attachment;
2584 
2585             /* From the Vulkan 1.0.46 spec:
2586              *
2587              *    "If any color or depth/stencil attachments are
2588              *    VK_ATTACHMENT_UNUSED, then no writes occur for those
2589              *    attachments."
2590              */
2591             if (att == VK_ATTACHMENT_UNUSED) {
2592                surface_state = cmd_buffer->state.null_surface_state;
2593             } else {
2594                surface_state = cmd_buffer->state.attachments[att].color.state;
2595             }
2596          } else {
2597             surface_state = cmd_buffer->state.null_surface_state;
2598          }
2599 
2600          assert(surface_state.map);
2601          bt_map[s] = surface_state.offset + state_offset;
2602          break;
2603 
2604       case ANV_DESCRIPTOR_SET_SHADER_CONSTANTS: {
2605          struct anv_state surface_state =
2606             anv_cmd_buffer_alloc_surface_state(cmd_buffer);
2607 
2608          struct anv_address constant_data = {
2609             .bo = cmd_buffer->device->instruction_state_pool.block_pool.bo,
2610             .offset = shader->kernel.offset +
2611                       shader->prog_data->const_data_offset,
2612          };
2613          unsigned constant_data_size = shader->prog_data->const_data_size;
2614 
2615          const enum isl_format format =
2616             anv_isl_format_for_descriptor_type(cmd_buffer->device,
2617                                                VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER);
2618          anv_fill_buffer_surface_state(cmd_buffer->device,
2619                                        surface_state, format,
2620                                        ISL_SURF_USAGE_CONSTANT_BUFFER_BIT,
2621                                        constant_data, constant_data_size, 1);
2622 
2623          assert(surface_state.map);
2624          bt_map[s] = surface_state.offset + state_offset;
2625          add_surface_reloc(cmd_buffer, surface_state, constant_data);
2626          break;
2627       }
2628 
2629       case ANV_DESCRIPTOR_SET_NUM_WORK_GROUPS: {
2630          /* This is always the first binding for compute shaders */
2631          assert(shader->stage == MESA_SHADER_COMPUTE && s == 0);
2632 
2633          struct anv_state surface_state =
2634             anv_cmd_buffer_alloc_surface_state(cmd_buffer);
2635 
2636          const enum isl_format format =
2637             anv_isl_format_for_descriptor_type(cmd_buffer->device,
2638                                                VK_DESCRIPTOR_TYPE_STORAGE_BUFFER);
2639          anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
2640                                        format,
2641                                        ISL_SURF_USAGE_CONSTANT_BUFFER_BIT,
2642                                        cmd_buffer->state.compute.num_workgroups,
2643                                        12, 1);
2644 
2645          assert(surface_state.map);
2646          bt_map[s] = surface_state.offset + state_offset;
2647          if (need_client_mem_relocs) {
2648             add_surface_reloc(cmd_buffer, surface_state,
2649                               cmd_buffer->state.compute.num_workgroups);
2650          }
2651          break;
2652       }
2653 
2654       case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
2655          /* This is a descriptor set buffer so the set index is actually
2656           * given by binding->binding.  (Yes, that's confusing.)
2657           */
2658          struct anv_descriptor_set *set =
2659             pipe_state->descriptors[binding->index];
2660          assert(set->desc_mem.alloc_size);
2661          assert(set->desc_surface_state.alloc_size);
2662          bt_map[s] = set->desc_surface_state.offset + state_offset;
2663          add_surface_reloc(cmd_buffer, set->desc_surface_state,
2664                            anv_descriptor_set_address(cmd_buffer, set));
2665          break;
2666       }
2667 
2668       default: {
2669          assert(binding->set < MAX_SETS);
2670          const struct anv_descriptor_set *set =
2671             pipe_state->descriptors[binding->set];
2672          if (binding->index >= set->descriptor_count) {
2673             /* From the Vulkan spec section entitled "DescriptorSet and
2674              * Binding Assignment":
2675              *
2676              *    "If the array is runtime-sized, then array elements greater
2677              *    than or equal to the size of that binding in the bound
2678              *    descriptor set must not be used."
2679              *
2680              * Unfortunately, the compiler isn't smart enough to figure out
2681              * when a dynamic binding isn't used so it may grab the whole
2682              * array and stick it in the binding table.  In this case, it's
2683              * safe to just skip those bindings that are OOB.
2684              */
2685             assert(binding->index < set->layout->descriptor_count);
2686             continue;
2687          }
2688          const struct anv_descriptor *desc = &set->descriptors[binding->index];
2689 
2690          switch (desc->type) {
2691          case VK_DESCRIPTOR_TYPE_SAMPLER:
2692             /* Nothing for us to do here */
2693             continue;
2694 
2695          case VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER:
2696          case VK_DESCRIPTOR_TYPE_SAMPLED_IMAGE: {
2697             if (desc->image_view) {
2698                struct anv_surface_state sstate =
2699                   (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
2700                   desc->image_view->planes[binding->plane].general_sampler_surface_state :
2701                   desc->image_view->planes[binding->plane].optimal_sampler_surface_state;
2702                surface_state = sstate.state;
2703                assert(surface_state.alloc_size);
2704                if (need_client_mem_relocs)
2705                   add_surface_state_relocs(cmd_buffer, sstate);
2706             } else {
2707                surface_state = cmd_buffer->device->null_surface_state;
2708             }
2709             break;
2710          }
2711          case VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT:
2712             assert(shader->stage == MESA_SHADER_FRAGMENT);
2713             assert(desc->image_view != NULL);
2714             if ((desc->image_view->aspect_mask & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) == 0) {
2715                /* For depth and stencil input attachments, we treat it like any
2716                 * old texture that a user may have bound.
2717                 */
2718                assert(desc->image_view->n_planes == 1);
2719                struct anv_surface_state sstate =
2720                   (desc->layout == VK_IMAGE_LAYOUT_GENERAL) ?
2721                   desc->image_view->planes[0].general_sampler_surface_state :
2722                   desc->image_view->planes[0].optimal_sampler_surface_state;
2723                surface_state = sstate.state;
2724                assert(surface_state.alloc_size);
2725                if (need_client_mem_relocs)
2726                   add_surface_state_relocs(cmd_buffer, sstate);
2727             } else {
2728                /* For color input attachments, we create the surface state at
2729                 * vkBeginRenderPass time so that we can include aux and clear
2730                 * color information.
2731                 */
2732                assert(binding->input_attachment_index < subpass->input_count);
2733                const unsigned subpass_att = binding->input_attachment_index;
2734                const unsigned att = subpass->input_attachments[subpass_att].attachment;
2735                surface_state = cmd_buffer->state.attachments[att].input.state;
2736             }
2737             break;
2738 
2739          case VK_DESCRIPTOR_TYPE_STORAGE_IMAGE: {
2740             if (desc->image_view) {
2741                struct anv_surface_state sstate = (binding->write_only)
2742                   ? desc->image_view->planes[binding->plane].writeonly_storage_surface_state
2743                   : desc->image_view->planes[binding->plane].storage_surface_state;
2744                surface_state = sstate.state;
2745                assert(surface_state.alloc_size);
2746                if (need_client_mem_relocs)
2747                   add_surface_state_relocs(cmd_buffer, sstate);
2748             } else {
2749                surface_state = cmd_buffer->device->null_surface_state;
2750             }
2751             break;
2752          }
2753 
2754          case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER:
2755          case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER:
2756          case VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER:
2757             if (desc->buffer_view) {
2758                surface_state = desc->buffer_view->surface_state;
2759                assert(surface_state.alloc_size);
2760                if (need_client_mem_relocs) {
2761                   add_surface_reloc(cmd_buffer, surface_state,
2762                                     desc->buffer_view->address);
2763                }
2764             } else {
2765                surface_state = cmd_buffer->device->null_surface_state;
2766             }
2767             break;
2768 
2769          case VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC:
2770          case VK_DESCRIPTOR_TYPE_STORAGE_BUFFER_DYNAMIC: {
2771             if (desc->buffer) {
2772                /* Compute the offset within the buffer */
2773                uint32_t dynamic_offset =
2774                   push->dynamic_offsets[binding->dynamic_offset_index];
2775                uint64_t offset = desc->offset + dynamic_offset;
2776                /* Clamp to the buffer size */
2777                offset = MIN2(offset, desc->buffer->size);
2778                /* Clamp the range to the buffer size */
2779                uint32_t range = MIN2(desc->range, desc->buffer->size - offset);
2780 
2781                /* Align the range for consistency */
2782                if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC)
2783                   range = align_u32(range, ANV_UBO_ALIGNMENT);
2784 
2785                struct anv_address address =
2786                   anv_address_add(desc->buffer->address, offset);
2787 
2788                surface_state =
2789                   anv_state_stream_alloc(&cmd_buffer->surface_state_stream, 64, 64);
2790                enum isl_format format =
2791                   anv_isl_format_for_descriptor_type(cmd_buffer->device,
2792                                                      desc->type);
2793 
2794                isl_surf_usage_flags_t usage =
2795                   desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC ?
2796                   ISL_SURF_USAGE_CONSTANT_BUFFER_BIT :
2797                   ISL_SURF_USAGE_STORAGE_BIT;
2798 
2799                anv_fill_buffer_surface_state(cmd_buffer->device, surface_state,
2800                                              format, usage, address, range, 1);
2801                if (need_client_mem_relocs)
2802                   add_surface_reloc(cmd_buffer, surface_state, address);
2803             } else {
2804                surface_state = cmd_buffer->device->null_surface_state;
2805             }
2806             break;
2807          }
2808 
2809          case VK_DESCRIPTOR_TYPE_STORAGE_TEXEL_BUFFER:
2810             if (desc->buffer_view) {
2811                surface_state = (binding->write_only)
2812                   ? desc->buffer_view->writeonly_storage_surface_state
2813                   : desc->buffer_view->storage_surface_state;
2814                assert(surface_state.alloc_size);
2815                if (need_client_mem_relocs) {
2816                   add_surface_reloc(cmd_buffer, surface_state,
2817                                     desc->buffer_view->address);
2818                }
2819             } else {
2820                surface_state = cmd_buffer->device->null_surface_state;
2821             }
2822             break;
2823 
2824          default:
2825             assert(!"Invalid descriptor type");
2826             continue;
2827          }
2828          assert(surface_state.map);
2829          bt_map[s] = surface_state.offset + state_offset;
2830          break;
2831       }
2832       }
2833    }
2834 
2835    return VK_SUCCESS;
2836 }
2837 
2838 static VkResult
emit_samplers(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,struct anv_shader_bin * shader,struct anv_state * state)2839 emit_samplers(struct anv_cmd_buffer *cmd_buffer,
2840               struct anv_cmd_pipeline_state *pipe_state,
2841               struct anv_shader_bin *shader,
2842               struct anv_state *state)
2843 {
2844    struct anv_pipeline_bind_map *map = &shader->bind_map;
2845    if (map->sampler_count == 0) {
2846       *state = (struct anv_state) { 0, };
2847       return VK_SUCCESS;
2848    }
2849 
2850    uint32_t size = map->sampler_count * 16;
2851    *state = anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, size, 32);
2852 
2853    if (state->map == NULL)
2854       return VK_ERROR_OUT_OF_DEVICE_MEMORY;
2855 
2856    for (uint32_t s = 0; s < map->sampler_count; s++) {
2857       struct anv_pipeline_binding *binding = &map->sampler_to_descriptor[s];
2858       const struct anv_descriptor *desc =
2859          &pipe_state->descriptors[binding->set]->descriptors[binding->index];
2860 
2861       if (desc->type != VK_DESCRIPTOR_TYPE_SAMPLER &&
2862           desc->type != VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER)
2863          continue;
2864 
2865       struct anv_sampler *sampler = desc->sampler;
2866 
2867       /* This can happen if we have an unfilled slot since TYPE_SAMPLER
2868        * happens to be zero.
2869        */
2870       if (sampler == NULL)
2871          continue;
2872 
2873       memcpy(state->map + (s * 16),
2874              sampler->state[binding->plane], sizeof(sampler->state[0]));
2875    }
2876 
2877    return VK_SUCCESS;
2878 }
2879 
2880 static uint32_t
flush_descriptor_sets(struct anv_cmd_buffer * cmd_buffer,struct anv_cmd_pipeline_state * pipe_state,struct anv_shader_bin ** shaders,uint32_t num_shaders)2881 flush_descriptor_sets(struct anv_cmd_buffer *cmd_buffer,
2882                       struct anv_cmd_pipeline_state *pipe_state,
2883                       struct anv_shader_bin **shaders,
2884                       uint32_t num_shaders)
2885 {
2886    const VkShaderStageFlags dirty = cmd_buffer->state.descriptors_dirty;
2887    VkShaderStageFlags flushed = 0;
2888 
2889    VkResult result = VK_SUCCESS;
2890    for (uint32_t i = 0; i < num_shaders; i++) {
2891       if (!shaders[i])
2892          continue;
2893 
2894       gl_shader_stage stage = shaders[i]->stage;
2895       VkShaderStageFlags vk_stage = mesa_to_vk_shader_stage(stage);
2896       if ((vk_stage & dirty) == 0)
2897          continue;
2898 
2899       result = emit_samplers(cmd_buffer, pipe_state, shaders[i],
2900                              &cmd_buffer->state.samplers[stage]);
2901       if (result != VK_SUCCESS)
2902          break;
2903       result = emit_binding_table(cmd_buffer, pipe_state, shaders[i],
2904                                   &cmd_buffer->state.binding_tables[stage]);
2905       if (result != VK_SUCCESS)
2906          break;
2907 
2908       flushed |= vk_stage;
2909    }
2910 
2911    if (result != VK_SUCCESS) {
2912       assert(result == VK_ERROR_OUT_OF_DEVICE_MEMORY);
2913 
2914       result = anv_cmd_buffer_new_binding_table_block(cmd_buffer);
2915       if (result != VK_SUCCESS)
2916          return 0;
2917 
2918       /* Re-emit state base addresses so we get the new surface state base
2919        * address before we start emitting binding tables etc.
2920        */
2921       genX(cmd_buffer_emit_state_base_address)(cmd_buffer);
2922 
2923       /* Re-emit all active binding tables */
2924       flushed = 0;
2925 
2926       for (uint32_t i = 0; i < num_shaders; i++) {
2927          if (!shaders[i])
2928             continue;
2929 
2930          gl_shader_stage stage = shaders[i]->stage;
2931 
2932          result = emit_samplers(cmd_buffer, pipe_state, shaders[i],
2933                                 &cmd_buffer->state.samplers[stage]);
2934          if (result != VK_SUCCESS) {
2935             anv_batch_set_error(&cmd_buffer->batch, result);
2936             return 0;
2937          }
2938          result = emit_binding_table(cmd_buffer, pipe_state, shaders[i],
2939                                      &cmd_buffer->state.binding_tables[stage]);
2940          if (result != VK_SUCCESS) {
2941             anv_batch_set_error(&cmd_buffer->batch, result);
2942             return 0;
2943          }
2944 
2945          flushed |= mesa_to_vk_shader_stage(stage);
2946       }
2947    }
2948 
2949    cmd_buffer->state.descriptors_dirty &= ~flushed;
2950 
2951    return flushed;
2952 }
2953 
2954 static void
cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer * cmd_buffer,uint32_t stages)2955 cmd_buffer_emit_descriptor_pointers(struct anv_cmd_buffer *cmd_buffer,
2956                                     uint32_t stages)
2957 {
2958    static const uint32_t sampler_state_opcodes[] = {
2959       [MESA_SHADER_VERTEX]                      = 43,
2960       [MESA_SHADER_TESS_CTRL]                   = 44, /* HS */
2961       [MESA_SHADER_TESS_EVAL]                   = 45, /* DS */
2962       [MESA_SHADER_GEOMETRY]                    = 46,
2963       [MESA_SHADER_FRAGMENT]                    = 47,
2964       [MESA_SHADER_COMPUTE]                     = 0,
2965    };
2966 
2967    static const uint32_t binding_table_opcodes[] = {
2968       [MESA_SHADER_VERTEX]                      = 38,
2969       [MESA_SHADER_TESS_CTRL]                   = 39,
2970       [MESA_SHADER_TESS_EVAL]                   = 40,
2971       [MESA_SHADER_GEOMETRY]                    = 41,
2972       [MESA_SHADER_FRAGMENT]                    = 42,
2973       [MESA_SHADER_COMPUTE]                     = 0,
2974    };
2975 
2976    anv_foreach_stage(s, stages) {
2977       assert(s < ARRAY_SIZE(binding_table_opcodes));
2978       assert(binding_table_opcodes[s] > 0);
2979 
2980       if (cmd_buffer->state.samplers[s].alloc_size > 0) {
2981          anv_batch_emit(&cmd_buffer->batch,
2982                         GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ssp) {
2983             ssp._3DCommandSubOpcode = sampler_state_opcodes[s];
2984             ssp.PointertoVSSamplerState = cmd_buffer->state.samplers[s].offset;
2985          }
2986       }
2987 
2988       /* Always emit binding table pointers if we're asked to, since on SKL
2989        * this is what flushes push constants. */
2990       anv_batch_emit(&cmd_buffer->batch,
2991                      GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), btp) {
2992          btp._3DCommandSubOpcode = binding_table_opcodes[s];
2993          btp.PointertoVSBindingTable = cmd_buffer->state.binding_tables[s].offset;
2994       }
2995    }
2996 }
2997 
2998 static struct anv_address
get_push_range_address(struct anv_cmd_buffer * cmd_buffer,gl_shader_stage stage,const struct anv_push_range * range)2999 get_push_range_address(struct anv_cmd_buffer *cmd_buffer,
3000                        gl_shader_stage stage,
3001                        const struct anv_push_range *range)
3002 {
3003    struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3004    switch (range->set) {
3005    case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
3006       /* This is a descriptor set buffer so the set index is
3007        * actually given by binding->binding.  (Yes, that's
3008        * confusing.)
3009        */
3010       struct anv_descriptor_set *set =
3011          gfx_state->base.descriptors[range->index];
3012       return anv_descriptor_set_address(cmd_buffer, set);
3013    }
3014 
3015    case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS: {
3016       if (gfx_state->base.push_constants_state.alloc_size == 0) {
3017          gfx_state->base.push_constants_state =
3018             anv_cmd_buffer_gfx_push_constants(cmd_buffer);
3019       }
3020       return (struct anv_address) {
3021          .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
3022          .offset = gfx_state->base.push_constants_state.offset,
3023       };
3024    }
3025 
3026    default: {
3027       assert(range->set < MAX_SETS);
3028       struct anv_descriptor_set *set =
3029          gfx_state->base.descriptors[range->set];
3030       const struct anv_descriptor *desc =
3031          &set->descriptors[range->index];
3032 
3033       if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
3034          if (desc->buffer_view)
3035             return desc->buffer_view->address;
3036       } else {
3037          assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
3038          if (desc->buffer) {
3039             const struct anv_push_constants *push =
3040                &gfx_state->base.push_constants;
3041             uint32_t dynamic_offset =
3042                push->dynamic_offsets[range->dynamic_offset_index];
3043             return anv_address_add(desc->buffer->address,
3044                                    desc->offset + dynamic_offset);
3045          }
3046       }
3047 
3048       /* For NULL UBOs, we just return an address in the workaround BO.  We do
3049        * writes to it for workarounds but always at the bottom.  The higher
3050        * bytes should be all zeros.
3051        */
3052       assert(range->length * 32 <= 2048);
3053       return (struct anv_address) {
3054          .bo = cmd_buffer->device->workaround_bo,
3055          .offset = 1024,
3056       };
3057    }
3058    }
3059 }
3060 
3061 
3062 /** Returns the size in bytes of the bound buffer
3063  *
3064  * The range is relative to the start of the buffer, not the start of the
3065  * range.  The returned range may be smaller than
3066  *
3067  *    (range->start + range->length) * 32;
3068  */
3069 static uint32_t
get_push_range_bound_size(struct anv_cmd_buffer * cmd_buffer,gl_shader_stage stage,const struct anv_push_range * range)3070 get_push_range_bound_size(struct anv_cmd_buffer *cmd_buffer,
3071                           gl_shader_stage stage,
3072                           const struct anv_push_range *range)
3073 {
3074    assert(stage != MESA_SHADER_COMPUTE);
3075    const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3076    switch (range->set) {
3077    case ANV_DESCRIPTOR_SET_DESCRIPTORS: {
3078       struct anv_descriptor_set *set =
3079          gfx_state->base.descriptors[range->index];
3080       assert(range->start * 32 < set->desc_mem.alloc_size);
3081       assert((range->start + range->length) * 32 <= set->desc_mem.alloc_size);
3082       return set->desc_mem.alloc_size;
3083    }
3084 
3085    case ANV_DESCRIPTOR_SET_PUSH_CONSTANTS:
3086       return (range->start + range->length) * 32;
3087 
3088    default: {
3089       assert(range->set < MAX_SETS);
3090       struct anv_descriptor_set *set =
3091          gfx_state->base.descriptors[range->set];
3092       const struct anv_descriptor *desc =
3093          &set->descriptors[range->index];
3094 
3095       if (desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER) {
3096          if (!desc->buffer_view)
3097             return 0;
3098 
3099          if (range->start * 32 > desc->buffer_view->range)
3100             return 0;
3101 
3102          return desc->buffer_view->range;
3103       } else {
3104          if (!desc->buffer)
3105             return 0;
3106 
3107          assert(desc->type == VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER_DYNAMIC);
3108          /* Compute the offset within the buffer */
3109          const struct anv_push_constants *push =
3110             &gfx_state->base.push_constants;
3111          uint32_t dynamic_offset =
3112             push->dynamic_offsets[range->dynamic_offset_index];
3113          uint64_t offset = desc->offset + dynamic_offset;
3114          /* Clamp to the buffer size */
3115          offset = MIN2(offset, desc->buffer->size);
3116          /* Clamp the range to the buffer size */
3117          uint32_t bound_range = MIN2(desc->range, desc->buffer->size - offset);
3118 
3119          /* Align the range for consistency */
3120          bound_range = align_u32(bound_range, ANV_UBO_ALIGNMENT);
3121 
3122          return bound_range;
3123       }
3124    }
3125    }
3126 }
3127 
3128 static void
cmd_buffer_emit_push_constant(struct anv_cmd_buffer * cmd_buffer,gl_shader_stage stage,struct anv_address * buffers,unsigned buffer_count)3129 cmd_buffer_emit_push_constant(struct anv_cmd_buffer *cmd_buffer,
3130                               gl_shader_stage stage,
3131                               struct anv_address *buffers,
3132                               unsigned buffer_count)
3133 {
3134    const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3135    const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline;
3136 
3137    static const uint32_t push_constant_opcodes[] = {
3138       [MESA_SHADER_VERTEX]                      = 21,
3139       [MESA_SHADER_TESS_CTRL]                   = 25, /* HS */
3140       [MESA_SHADER_TESS_EVAL]                   = 26, /* DS */
3141       [MESA_SHADER_GEOMETRY]                    = 22,
3142       [MESA_SHADER_FRAGMENT]                    = 23,
3143       [MESA_SHADER_COMPUTE]                     = 0,
3144    };
3145 
3146    assert(stage < ARRAY_SIZE(push_constant_opcodes));
3147    assert(push_constant_opcodes[stage] > 0);
3148 
3149    anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_VS), c) {
3150       c._3DCommandSubOpcode = push_constant_opcodes[stage];
3151 
3152       if (anv_pipeline_has_stage(pipeline, stage)) {
3153          const struct anv_pipeline_bind_map *bind_map =
3154             &pipeline->shaders[stage]->bind_map;
3155 
3156 #if GEN_GEN >= 9
3157          /* This field exists since Gen8.  However, the Broadwell PRM says:
3158           *
3159           *    "Constant Buffer Object Control State must be always programmed
3160           *    to zero."
3161           *
3162           * This restriction does not exist on any newer platforms.
3163           *
3164           * We only have one MOCS field for the whole packet, not one per
3165           * buffer.  We could go out of our way here to walk over all of the
3166           * buffers and see if any of them are used externally and use the
3167           * external MOCS.  However, the notion that someone would use the
3168           * same bit of memory for both scanout and a UBO is nuts.  Let's not
3169           * bother and assume it's all internal.
3170           */
3171          c.MOCS = isl_mocs(&cmd_buffer->device->isl_dev, 0);
3172 #endif
3173 
3174 #if GEN_GEN >= 8 || GEN_IS_HASWELL
3175          /* The Skylake PRM contains the following restriction:
3176           *
3177           *    "The driver must ensure The following case does not occur
3178           *     without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
3179           *     buffer 3 read length equal to zero committed followed by a
3180           *     3DSTATE_CONSTANT_* with buffer 0 read length not equal to
3181           *     zero committed."
3182           *
3183           * To avoid this, we program the buffers in the highest slots.
3184           * This way, slot 0 is only used if slot 3 is also used.
3185           */
3186          assert(buffer_count <= 4);
3187          const unsigned shift = 4 - buffer_count;
3188          for (unsigned i = 0; i < buffer_count; i++) {
3189             const struct anv_push_range *range = &bind_map->push_ranges[i];
3190 
3191             /* At this point we only have non-empty ranges */
3192             assert(range->length > 0);
3193 
3194             /* For Ivy Bridge, make sure we only set the first range (actual
3195              * push constants)
3196              */
3197             assert((GEN_GEN >= 8 || GEN_IS_HASWELL) || i == 0);
3198 
3199             c.ConstantBody.ReadLength[i + shift] = range->length;
3200             c.ConstantBody.Buffer[i + shift] =
3201                anv_address_add(buffers[i], range->start * 32);
3202          }
3203 #else
3204          /* For Ivy Bridge, push constants are relative to dynamic state
3205           * base address and we only ever push actual push constants.
3206           */
3207          if (bind_map->push_ranges[0].length > 0) {
3208             assert(buffer_count == 1);
3209             assert(bind_map->push_ranges[0].set ==
3210                    ANV_DESCRIPTOR_SET_PUSH_CONSTANTS);
3211             assert(buffers[0].bo ==
3212                    cmd_buffer->device->dynamic_state_pool.block_pool.bo);
3213             c.ConstantBody.ReadLength[0] = bind_map->push_ranges[0].length;
3214             c.ConstantBody.Buffer[0].bo = NULL;
3215             c.ConstantBody.Buffer[0].offset = buffers[0].offset;
3216          }
3217          assert(bind_map->push_ranges[1].length == 0);
3218          assert(bind_map->push_ranges[2].length == 0);
3219          assert(bind_map->push_ranges[3].length == 0);
3220 #endif
3221       }
3222    }
3223 }
3224 
3225 #if GEN_GEN >= 12
3226 static void
cmd_buffer_emit_push_constant_all(struct anv_cmd_buffer * cmd_buffer,uint32_t shader_mask,struct anv_address * buffers,uint32_t buffer_count)3227 cmd_buffer_emit_push_constant_all(struct anv_cmd_buffer *cmd_buffer,
3228                                   uint32_t shader_mask,
3229                                   struct anv_address *buffers,
3230                                   uint32_t buffer_count)
3231 {
3232    if (buffer_count == 0) {
3233       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CONSTANT_ALL), c) {
3234          c.ShaderUpdateEnable = shader_mask;
3235          c.MOCS = isl_mocs(&cmd_buffer->device->isl_dev, 0);
3236       }
3237       return;
3238    }
3239 
3240    const struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3241    const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline;
3242 
3243    static const uint32_t push_constant_opcodes[] = {
3244       [MESA_SHADER_VERTEX]                      = 21,
3245       [MESA_SHADER_TESS_CTRL]                   = 25, /* HS */
3246       [MESA_SHADER_TESS_EVAL]                   = 26, /* DS */
3247       [MESA_SHADER_GEOMETRY]                    = 22,
3248       [MESA_SHADER_FRAGMENT]                    = 23,
3249       [MESA_SHADER_COMPUTE]                     = 0,
3250    };
3251 
3252    gl_shader_stage stage = vk_to_mesa_shader_stage(shader_mask);
3253    assert(stage < ARRAY_SIZE(push_constant_opcodes));
3254    assert(push_constant_opcodes[stage] > 0);
3255 
3256    const struct anv_pipeline_bind_map *bind_map =
3257       &pipeline->shaders[stage]->bind_map;
3258 
3259    uint32_t *dw;
3260    const uint32_t buffer_mask = (1 << buffer_count) - 1;
3261    const uint32_t num_dwords = 2 + 2 * buffer_count;
3262 
3263    dw = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
3264                         GENX(3DSTATE_CONSTANT_ALL),
3265                         .ShaderUpdateEnable = shader_mask,
3266                         .PointerBufferMask = buffer_mask,
3267                         .MOCS = isl_mocs(&cmd_buffer->device->isl_dev, 0));
3268 
3269    for (int i = 0; i < buffer_count; i++) {
3270       const struct anv_push_range *range = &bind_map->push_ranges[i];
3271       GENX(3DSTATE_CONSTANT_ALL_DATA_pack)(
3272          &cmd_buffer->batch, dw + 2 + i * 2,
3273          &(struct GENX(3DSTATE_CONSTANT_ALL_DATA)) {
3274             .PointerToConstantBuffer =
3275                anv_address_add(buffers[i], range->start * 32),
3276             .ConstantBufferReadLength = range->length,
3277          });
3278    }
3279 }
3280 #endif
3281 
3282 static void
cmd_buffer_flush_push_constants(struct anv_cmd_buffer * cmd_buffer,VkShaderStageFlags dirty_stages)3283 cmd_buffer_flush_push_constants(struct anv_cmd_buffer *cmd_buffer,
3284                                 VkShaderStageFlags dirty_stages)
3285 {
3286    VkShaderStageFlags flushed = 0;
3287    struct anv_cmd_graphics_state *gfx_state = &cmd_buffer->state.gfx;
3288    const struct anv_graphics_pipeline *pipeline = gfx_state->pipeline;
3289 
3290 #if GEN_GEN >= 12
3291    uint32_t nobuffer_stages = 0;
3292 #endif
3293 
3294    /* Compute robust pushed register access mask for each stage. */
3295    if (cmd_buffer->device->robust_buffer_access) {
3296       anv_foreach_stage(stage, dirty_stages) {
3297          if (!anv_pipeline_has_stage(pipeline, stage))
3298             continue;
3299 
3300          const struct anv_pipeline_bind_map *bind_map =
3301             &pipeline->shaders[stage]->bind_map;
3302          struct anv_push_constants *push = &gfx_state->base.push_constants;
3303 
3304          push->push_reg_mask[stage] = 0;
3305          /* Start of the current range in the shader, relative to the start of
3306           * push constants in the shader.
3307           */
3308          unsigned range_start_reg = 0;
3309          for (unsigned i = 0; i < 4; i++) {
3310             const struct anv_push_range *range = &bind_map->push_ranges[i];
3311             if (range->length == 0)
3312                continue;
3313 
3314             unsigned bound_size =
3315                get_push_range_bound_size(cmd_buffer, stage, range);
3316             if (bound_size >= range->start * 32) {
3317                unsigned bound_regs =
3318                   MIN2(DIV_ROUND_UP(bound_size, 32) - range->start,
3319                        range->length);
3320                assert(range_start_reg + bound_regs <= 64);
3321                push->push_reg_mask[stage] |= BITFIELD64_RANGE(range_start_reg,
3322                                                               bound_regs);
3323             }
3324 
3325             cmd_buffer->state.push_constants_dirty |=
3326                mesa_to_vk_shader_stage(stage);
3327 
3328             range_start_reg += range->length;
3329          }
3330       }
3331    }
3332 
3333    /* Resets the push constant state so that we allocate a new one if
3334     * needed.
3335     */
3336    gfx_state->base.push_constants_state = ANV_STATE_NULL;
3337 
3338    anv_foreach_stage(stage, dirty_stages) {
3339       unsigned buffer_count = 0;
3340       flushed |= mesa_to_vk_shader_stage(stage);
3341       UNUSED uint32_t max_push_range = 0;
3342 
3343       struct anv_address buffers[4] = {};
3344       if (anv_pipeline_has_stage(pipeline, stage)) {
3345          const struct anv_pipeline_bind_map *bind_map =
3346             &pipeline->shaders[stage]->bind_map;
3347 
3348          /* We have to gather buffer addresses as a second step because the
3349           * loop above puts data into the push constant area and the call to
3350           * get_push_range_address is what locks our push constants and copies
3351           * them into the actual GPU buffer.  If we did the two loops at the
3352           * same time, we'd risk only having some of the sizes in the push
3353           * constant buffer when we did the copy.
3354           */
3355          for (unsigned i = 0; i < 4; i++) {
3356             const struct anv_push_range *range = &bind_map->push_ranges[i];
3357             if (range->length == 0)
3358                break;
3359 
3360             buffers[i] = get_push_range_address(cmd_buffer, stage, range);
3361             max_push_range = MAX2(max_push_range, range->length);
3362             buffer_count++;
3363          }
3364 
3365          /* We have at most 4 buffers but they should be tightly packed */
3366          for (unsigned i = buffer_count; i < 4; i++)
3367             assert(bind_map->push_ranges[i].length == 0);
3368       }
3369 
3370 #if GEN_GEN >= 12
3371       /* If this stage doesn't have any push constants, emit it later in a
3372        * single CONSTANT_ALL packet.
3373        */
3374       if (buffer_count == 0) {
3375          nobuffer_stages |= 1 << stage;
3376          continue;
3377       }
3378 
3379       /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
3380        * contains only 5 bits, so we can only use it for buffers smaller than
3381        * 32.
3382        */
3383       if (max_push_range < 32) {
3384          cmd_buffer_emit_push_constant_all(cmd_buffer, 1 << stage,
3385                                            buffers, buffer_count);
3386          continue;
3387       }
3388 #endif
3389 
3390       cmd_buffer_emit_push_constant(cmd_buffer, stage, buffers, buffer_count);
3391    }
3392 
3393 #if GEN_GEN >= 12
3394    if (nobuffer_stages)
3395       cmd_buffer_emit_push_constant_all(cmd_buffer, nobuffer_stages, NULL, 0);
3396 #endif
3397 
3398    cmd_buffer->state.push_constants_dirty &= ~flushed;
3399 }
3400 
3401 static void
cmd_buffer_emit_clip(struct anv_cmd_buffer * cmd_buffer)3402 cmd_buffer_emit_clip(struct anv_cmd_buffer *cmd_buffer)
3403 {
3404    const uint32_t clip_states =
3405 #if GEN_GEN <= 7
3406       ANV_CMD_DIRTY_DYNAMIC_FRONT_FACE |
3407       ANV_CMD_DIRTY_DYNAMIC_CULL_MODE |
3408 #endif
3409       ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
3410       ANV_CMD_DIRTY_PIPELINE;
3411 
3412    if ((cmd_buffer->state.gfx.dirty & clip_states) == 0)
3413       return;
3414 
3415 #if GEN_GEN <= 7
3416    const struct anv_dynamic_state *d = &cmd_buffer->state.gfx.dynamic;
3417 #endif
3418    struct GENX(3DSTATE_CLIP) clip = {
3419       GENX(3DSTATE_CLIP_header),
3420 #if GEN_GEN <= 7
3421       .FrontWinding = genX(vk_to_gen_front_face)[d->front_face],
3422       .CullMode     = genX(vk_to_gen_cullmode)[d->cull_mode],
3423 #endif
3424    };
3425    uint32_t dwords[GENX(3DSTATE_CLIP_length)];
3426 
3427    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3428    const struct brw_vue_prog_data *last =
3429       anv_pipeline_get_last_vue_prog_data(pipeline);
3430    if (last->vue_map.slots_valid & VARYING_BIT_VIEWPORT) {
3431       clip.MaximumVPIndex =
3432          cmd_buffer->state.gfx.dynamic.viewport.count > 0 ?
3433          cmd_buffer->state.gfx.dynamic.viewport.count - 1 : 0;
3434    }
3435 
3436    GENX(3DSTATE_CLIP_pack)(NULL, dwords, &clip);
3437    anv_batch_emit_merge(&cmd_buffer->batch, dwords,
3438                         pipeline->gen7.clip);
3439 }
3440 
3441 void
genX(cmd_buffer_flush_state)3442 genX(cmd_buffer_flush_state)(struct anv_cmd_buffer *cmd_buffer)
3443 {
3444    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3445    uint32_t *p;
3446 
3447    assert((pipeline->active_stages & VK_SHADER_STAGE_COMPUTE_BIT) == 0);
3448 
3449    genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->base.l3_config);
3450 
3451    genX(cmd_buffer_emit_hashing_mode)(cmd_buffer, UINT_MAX, UINT_MAX, 1);
3452 
3453    genX(flush_pipeline_select_3d)(cmd_buffer);
3454 
3455    /* Apply any pending pipeline flushes we may have.  We want to apply them
3456     * now because, if any of those flushes are for things like push constants,
3457     * the GPU will read the state at weird times.
3458     */
3459    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3460 
3461    uint32_t vb_emit = cmd_buffer->state.gfx.vb_dirty & pipeline->vb_used;
3462    if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE)
3463       vb_emit |= pipeline->vb_used;
3464 
3465    if (vb_emit) {
3466       const uint32_t num_buffers = __builtin_popcount(vb_emit);
3467       const uint32_t num_dwords = 1 + num_buffers * 4;
3468 
3469       p = anv_batch_emitn(&cmd_buffer->batch, num_dwords,
3470                           GENX(3DSTATE_VERTEX_BUFFERS));
3471       uint32_t vb, i = 0;
3472       for_each_bit(vb, vb_emit) {
3473          struct anv_buffer *buffer = cmd_buffer->state.vertex_bindings[vb].buffer;
3474          uint32_t offset = cmd_buffer->state.vertex_bindings[vb].offset;
3475 
3476          /* If dynamic, use stride/size from vertex binding, otherwise use
3477           * stride/size that was setup in the pipeline object.
3478           */
3479          bool dynamic_stride = cmd_buffer->state.gfx.dynamic.dyn_vbo_stride;
3480          bool dynamic_size = cmd_buffer->state.gfx.dynamic.dyn_vbo_size;
3481 
3482          struct GENX(VERTEX_BUFFER_STATE) state;
3483          if (buffer) {
3484             uint32_t stride = dynamic_stride ?
3485                cmd_buffer->state.vertex_bindings[vb].stride : pipeline->vb[vb].stride;
3486             /* From the Vulkan spec (vkCmdBindVertexBuffers2EXT):
3487              *
3488              * "If pname:pSizes is not NULL then pname:pSizes[i] specifies
3489              * the bound size of the vertex buffer starting from the corresponding
3490              * elements of pname:pBuffers[i] plus pname:pOffsets[i]."
3491              */
3492             UNUSED uint32_t size = dynamic_size ?
3493                cmd_buffer->state.vertex_bindings[vb].size : buffer->size - offset;
3494 
3495             state = (struct GENX(VERTEX_BUFFER_STATE)) {
3496                .VertexBufferIndex = vb,
3497 
3498                .MOCS = anv_mocs(cmd_buffer->device, buffer->address.bo,
3499                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT),
3500 #if GEN_GEN <= 7
3501                .BufferAccessType = pipeline->vb[vb].instanced ? INSTANCEDATA : VERTEXDATA,
3502                .InstanceDataStepRate = pipeline->vb[vb].instance_divisor,
3503 #endif
3504                .AddressModifyEnable = true,
3505                .BufferPitch = stride,
3506                .BufferStartingAddress = anv_address_add(buffer->address, offset),
3507                .NullVertexBuffer = offset >= buffer->size,
3508 
3509 #if GEN_GEN >= 8
3510                .BufferSize = size,
3511 #else
3512                /* XXX: to handle dynamic offset for older gens we might want
3513                 * to modify Endaddress, but there are issues when doing so:
3514                 *
3515                 * https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/7439
3516                 */
3517                .EndAddress = anv_address_add(buffer->address, buffer->size - 1),
3518 #endif
3519             };
3520          } else {
3521             state = (struct GENX(VERTEX_BUFFER_STATE)) {
3522                .VertexBufferIndex = vb,
3523                .NullVertexBuffer = true,
3524             };
3525          }
3526 
3527 #if GEN_GEN >= 8 && GEN_GEN <= 9
3528          genX(cmd_buffer_set_binding_for_gen8_vb_flush)(cmd_buffer, vb,
3529                                                         state.BufferStartingAddress,
3530                                                         state.BufferSize);
3531 #endif
3532 
3533          GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, &p[1 + i * 4], &state);
3534          i++;
3535       }
3536    }
3537 
3538    cmd_buffer->state.gfx.vb_dirty &= ~vb_emit;
3539 
3540    if ((cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_XFB_ENABLE) ||
3541        (GEN_GEN == 7 && (cmd_buffer->state.gfx.dirty &
3542                          ANV_CMD_DIRTY_PIPELINE))) {
3543       /* We don't need any per-buffer dirty tracking because you're not
3544        * allowed to bind different XFB buffers while XFB is enabled.
3545        */
3546       for (unsigned idx = 0; idx < MAX_XFB_BUFFERS; idx++) {
3547          struct anv_xfb_binding *xfb = &cmd_buffer->state.xfb_bindings[idx];
3548          anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_SO_BUFFER), sob) {
3549 #if GEN_GEN < 12
3550             sob.SOBufferIndex = idx;
3551 #else
3552             sob._3DCommandOpcode = 0;
3553             sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + idx;
3554 #endif
3555 
3556             if (cmd_buffer->state.xfb_enabled && xfb->buffer && xfb->size != 0) {
3557                sob.MOCS = isl_mocs(&cmd_buffer->device->isl_dev, 0);
3558                sob.SurfaceBaseAddress = anv_address_add(xfb->buffer->address,
3559                                                         xfb->offset);
3560 #if GEN_GEN >= 8
3561                sob.SOBufferEnable = true;
3562                sob.StreamOffsetWriteEnable = false;
3563                /* Size is in DWords - 1 */
3564                sob.SurfaceSize = DIV_ROUND_UP(xfb->size, 4) - 1;
3565 #else
3566                /* We don't have SOBufferEnable in 3DSTATE_SO_BUFFER on Gen7 so
3567                 * we trust in SurfaceEndAddress = SurfaceBaseAddress = 0 (the
3568                 * default for an empty SO_BUFFER packet) to disable them.
3569                 */
3570                sob.SurfacePitch = pipeline->gen7.xfb_bo_pitch[idx];
3571                sob.SurfaceEndAddress = anv_address_add(xfb->buffer->address,
3572                                                        xfb->offset + xfb->size);
3573 #endif
3574             }
3575          }
3576       }
3577 
3578       /* CNL and later require a CS stall after 3DSTATE_SO_BUFFER */
3579       if (GEN_GEN >= 10)
3580          cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
3581    }
3582 
3583    if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE) {
3584       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->base.batch);
3585 
3586       /* If the pipeline changed, we may need to re-allocate push constant
3587        * space in the URB.
3588        */
3589       cmd_buffer_alloc_push_constants(cmd_buffer);
3590    }
3591 
3592    if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_PIPELINE)
3593       cmd_buffer->state.gfx.primitive_topology = pipeline->topology;
3594 
3595 #if GEN_GEN <= 7
3596    if (cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_VERTEX_BIT ||
3597        cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_VERTEX_BIT) {
3598       /* From the IVB PRM Vol. 2, Part 1, Section 3.2.1:
3599        *
3600        *    "A PIPE_CONTROL with Post-Sync Operation set to 1h and a depth
3601        *    stall needs to be sent just prior to any 3DSTATE_VS,
3602        *    3DSTATE_URB_VS, 3DSTATE_CONSTANT_VS,
3603        *    3DSTATE_BINDING_TABLE_POINTER_VS,
3604        *    3DSTATE_SAMPLER_STATE_POINTER_VS command.  Only one
3605        *    PIPE_CONTROL needs to be sent before any combination of VS
3606        *    associated 3DSTATE."
3607        */
3608       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
3609          pc.DepthStallEnable  = true;
3610          pc.PostSyncOperation = WriteImmediateData;
3611          pc.Address           = cmd_buffer->device->workaround_address;
3612       }
3613    }
3614 #endif
3615 
3616    /* Render targets live in the same binding table as fragment descriptors */
3617    if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_RENDER_TARGETS)
3618       cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_FRAGMENT_BIT;
3619 
3620    /* We emit the binding tables and sampler tables first, then emit push
3621     * constants and then finally emit binding table and sampler table
3622     * pointers.  It has to happen in this order, since emitting the binding
3623     * tables may change the push constants (in case of storage images). After
3624     * emitting push constants, on SKL+ we have to emit the corresponding
3625     * 3DSTATE_BINDING_TABLE_POINTER_* for the push constants to take effect.
3626     */
3627    uint32_t dirty = 0;
3628    if (cmd_buffer->state.descriptors_dirty) {
3629       dirty = flush_descriptor_sets(cmd_buffer,
3630                                     &cmd_buffer->state.gfx.base,
3631                                     pipeline->shaders,
3632                                     ARRAY_SIZE(pipeline->shaders));
3633    }
3634 
3635    if (dirty || cmd_buffer->state.push_constants_dirty) {
3636       /* Because we're pushing UBOs, we have to push whenever either
3637        * descriptors or push constants is dirty.
3638        */
3639       dirty |= cmd_buffer->state.push_constants_dirty;
3640       dirty &= ANV_STAGE_MASK & VK_SHADER_STAGE_ALL_GRAPHICS;
3641       cmd_buffer_flush_push_constants(cmd_buffer, dirty);
3642    }
3643 
3644    if (dirty)
3645       cmd_buffer_emit_descriptor_pointers(cmd_buffer, dirty);
3646 
3647    cmd_buffer_emit_clip(cmd_buffer);
3648 
3649    if (cmd_buffer->state.gfx.dirty & ANV_CMD_DIRTY_DYNAMIC_VIEWPORT)
3650       gen8_cmd_buffer_emit_viewport(cmd_buffer);
3651 
3652    if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_VIEWPORT |
3653                                   ANV_CMD_DIRTY_PIPELINE)) {
3654       gen8_cmd_buffer_emit_depth_viewport(cmd_buffer,
3655                                           pipeline->depth_clamp_enable);
3656    }
3657 
3658    if (cmd_buffer->state.gfx.dirty & (ANV_CMD_DIRTY_DYNAMIC_SCISSOR |
3659                                       ANV_CMD_DIRTY_RENDER_TARGETS))
3660       gen7_cmd_buffer_emit_scissor(cmd_buffer);
3661 
3662    genX(cmd_buffer_flush_dynamic_state)(cmd_buffer);
3663 }
3664 
3665 static void
emit_vertex_bo(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,uint32_t size,uint32_t index)3666 emit_vertex_bo(struct anv_cmd_buffer *cmd_buffer,
3667                struct anv_address addr,
3668                uint32_t size, uint32_t index)
3669 {
3670    uint32_t *p = anv_batch_emitn(&cmd_buffer->batch, 5,
3671                                  GENX(3DSTATE_VERTEX_BUFFERS));
3672 
3673    GENX(VERTEX_BUFFER_STATE_pack)(&cmd_buffer->batch, p + 1,
3674       &(struct GENX(VERTEX_BUFFER_STATE)) {
3675          .VertexBufferIndex = index,
3676          .AddressModifyEnable = true,
3677          .BufferPitch = 0,
3678          .MOCS = addr.bo ? anv_mocs(cmd_buffer->device, addr.bo,
3679                                     ISL_SURF_USAGE_VERTEX_BUFFER_BIT) : 0,
3680          .NullVertexBuffer = size == 0,
3681 #if (GEN_GEN >= 8)
3682          .BufferStartingAddress = addr,
3683          .BufferSize = size
3684 #else
3685          .BufferStartingAddress = addr,
3686          .EndAddress = anv_address_add(addr, size),
3687 #endif
3688       });
3689 
3690    genX(cmd_buffer_set_binding_for_gen8_vb_flush)(cmd_buffer,
3691                                                   index, addr, size);
3692 }
3693 
3694 static void
emit_base_vertex_instance_bo(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr)3695 emit_base_vertex_instance_bo(struct anv_cmd_buffer *cmd_buffer,
3696                              struct anv_address addr)
3697 {
3698    emit_vertex_bo(cmd_buffer, addr, addr.bo ? 8 : 0, ANV_SVGS_VB_INDEX);
3699 }
3700 
3701 static void
emit_base_vertex_instance(struct anv_cmd_buffer * cmd_buffer,uint32_t base_vertex,uint32_t base_instance)3702 emit_base_vertex_instance(struct anv_cmd_buffer *cmd_buffer,
3703                           uint32_t base_vertex, uint32_t base_instance)
3704 {
3705    if (base_vertex == 0 && base_instance == 0) {
3706       emit_base_vertex_instance_bo(cmd_buffer, ANV_NULL_ADDRESS);
3707    } else {
3708       struct anv_state id_state =
3709          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 8, 4);
3710 
3711       ((uint32_t *)id_state.map)[0] = base_vertex;
3712       ((uint32_t *)id_state.map)[1] = base_instance;
3713 
3714       struct anv_address addr = {
3715          .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
3716          .offset = id_state.offset,
3717       };
3718 
3719       emit_base_vertex_instance_bo(cmd_buffer, addr);
3720    }
3721 }
3722 
3723 static void
emit_draw_index(struct anv_cmd_buffer * cmd_buffer,uint32_t draw_index)3724 emit_draw_index(struct anv_cmd_buffer *cmd_buffer, uint32_t draw_index)
3725 {
3726    struct anv_state state =
3727       anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 4, 4);
3728 
3729    ((uint32_t *)state.map)[0] = draw_index;
3730 
3731    struct anv_address addr = {
3732       .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
3733       .offset = state.offset,
3734    };
3735 
3736    emit_vertex_bo(cmd_buffer, addr, 4, ANV_DRAWID_VB_INDEX);
3737 }
3738 
3739 static void
update_dirty_vbs_for_gen8_vb_flush(struct anv_cmd_buffer * cmd_buffer,uint32_t access_type)3740 update_dirty_vbs_for_gen8_vb_flush(struct anv_cmd_buffer *cmd_buffer,
3741                                    uint32_t access_type)
3742 {
3743    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3744    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3745 
3746    uint64_t vb_used = pipeline->vb_used;
3747    if (vs_prog_data->uses_firstvertex ||
3748        vs_prog_data->uses_baseinstance)
3749       vb_used |= 1ull << ANV_SVGS_VB_INDEX;
3750    if (vs_prog_data->uses_drawid)
3751       vb_used |= 1ull << ANV_DRAWID_VB_INDEX;
3752 
3753    genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)(cmd_buffer,
3754                                                        access_type == RANDOM,
3755                                                        vb_used);
3756 }
3757 
genX(CmdDraw)3758 void genX(CmdDraw)(
3759     VkCommandBuffer                             commandBuffer,
3760     uint32_t                                    vertexCount,
3761     uint32_t                                    instanceCount,
3762     uint32_t                                    firstVertex,
3763     uint32_t                                    firstInstance)
3764 {
3765    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3766    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3767    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3768 
3769    if (anv_batch_has_error(&cmd_buffer->batch))
3770       return;
3771 
3772    genX(cmd_buffer_flush_state)(cmd_buffer);
3773 
3774    if (cmd_buffer->state.conditional_render_enabled)
3775       genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
3776 
3777    if (vs_prog_data->uses_firstvertex ||
3778        vs_prog_data->uses_baseinstance)
3779       emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
3780    if (vs_prog_data->uses_drawid)
3781       emit_draw_index(cmd_buffer, 0);
3782 
3783    /* Emitting draw index or vertex index BOs may result in needing
3784     * additional VF cache flushes.
3785     */
3786    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3787 
3788    /* Our implementation of VK_KHR_multiview uses instancing to draw the
3789     * different views.  We need to multiply instanceCount by the view count.
3790     */
3791    if (!pipeline->use_primitive_replication)
3792       instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
3793 
3794    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
3795       prim.PredicateEnable          = cmd_buffer->state.conditional_render_enabled;
3796       prim.VertexAccessType         = SEQUENTIAL;
3797       prim.PrimitiveTopologyType    = cmd_buffer->state.gfx.primitive_topology;
3798       prim.VertexCountPerInstance   = vertexCount;
3799       prim.StartVertexLocation      = firstVertex;
3800       prim.InstanceCount            = instanceCount;
3801       prim.StartInstanceLocation    = firstInstance;
3802       prim.BaseVertexLocation       = 0;
3803    }
3804 
3805    update_dirty_vbs_for_gen8_vb_flush(cmd_buffer, SEQUENTIAL);
3806 }
3807 
genX(CmdDrawIndexed)3808 void genX(CmdDrawIndexed)(
3809     VkCommandBuffer                             commandBuffer,
3810     uint32_t                                    indexCount,
3811     uint32_t                                    instanceCount,
3812     uint32_t                                    firstIndex,
3813     int32_t                                     vertexOffset,
3814     uint32_t                                    firstInstance)
3815 {
3816    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3817    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3818    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3819 
3820    if (anv_batch_has_error(&cmd_buffer->batch))
3821       return;
3822 
3823    genX(cmd_buffer_flush_state)(cmd_buffer);
3824 
3825    if (cmd_buffer->state.conditional_render_enabled)
3826       genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
3827 
3828    if (vs_prog_data->uses_firstvertex ||
3829        vs_prog_data->uses_baseinstance)
3830       emit_base_vertex_instance(cmd_buffer, vertexOffset, firstInstance);
3831    if (vs_prog_data->uses_drawid)
3832       emit_draw_index(cmd_buffer, 0);
3833 
3834    /* Emitting draw index or vertex index BOs may result in needing
3835     * additional VF cache flushes.
3836     */
3837    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3838 
3839    /* Our implementation of VK_KHR_multiview uses instancing to draw the
3840     * different views.  We need to multiply instanceCount by the view count.
3841     */
3842    if (!pipeline->use_primitive_replication)
3843       instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
3844 
3845    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
3846       prim.PredicateEnable          = cmd_buffer->state.conditional_render_enabled;
3847       prim.VertexAccessType         = RANDOM;
3848       prim.PrimitiveTopologyType    = cmd_buffer->state.gfx.primitive_topology;
3849       prim.VertexCountPerInstance   = indexCount;
3850       prim.StartVertexLocation      = firstIndex;
3851       prim.InstanceCount            = instanceCount;
3852       prim.StartInstanceLocation    = firstInstance;
3853       prim.BaseVertexLocation       = vertexOffset;
3854    }
3855 
3856    update_dirty_vbs_for_gen8_vb_flush(cmd_buffer, RANDOM);
3857 }
3858 
3859 /* Auto-Draw / Indirect Registers */
3860 #define GEN7_3DPRIM_END_OFFSET          0x2420
3861 #define GEN7_3DPRIM_START_VERTEX        0x2430
3862 #define GEN7_3DPRIM_VERTEX_COUNT        0x2434
3863 #define GEN7_3DPRIM_INSTANCE_COUNT      0x2438
3864 #define GEN7_3DPRIM_START_INSTANCE      0x243C
3865 #define GEN7_3DPRIM_BASE_VERTEX         0x2440
3866 
genX(CmdDrawIndirectByteCountEXT)3867 void genX(CmdDrawIndirectByteCountEXT)(
3868     VkCommandBuffer                             commandBuffer,
3869     uint32_t                                    instanceCount,
3870     uint32_t                                    firstInstance,
3871     VkBuffer                                    counterBuffer,
3872     VkDeviceSize                                counterBufferOffset,
3873     uint32_t                                    counterOffset,
3874     uint32_t                                    vertexStride)
3875 {
3876 #if GEN_IS_HASWELL || GEN_GEN >= 8
3877    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3878    ANV_FROM_HANDLE(anv_buffer, counter_buffer, counterBuffer);
3879    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3880    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3881 
3882    /* firstVertex is always zero for this draw function */
3883    const uint32_t firstVertex = 0;
3884 
3885    if (anv_batch_has_error(&cmd_buffer->batch))
3886       return;
3887 
3888    genX(cmd_buffer_flush_state)(cmd_buffer);
3889 
3890    if (vs_prog_data->uses_firstvertex ||
3891        vs_prog_data->uses_baseinstance)
3892       emit_base_vertex_instance(cmd_buffer, firstVertex, firstInstance);
3893    if (vs_prog_data->uses_drawid)
3894       emit_draw_index(cmd_buffer, 0);
3895 
3896    /* Emitting draw index or vertex index BOs may result in needing
3897     * additional VF cache flushes.
3898     */
3899    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
3900 
3901    /* Our implementation of VK_KHR_multiview uses instancing to draw the
3902     * different views.  We need to multiply instanceCount by the view count.
3903     */
3904    if (!pipeline->use_primitive_replication)
3905       instanceCount *= anv_subpass_view_count(cmd_buffer->state.subpass);
3906 
3907    struct gen_mi_builder b;
3908    gen_mi_builder_init(&b, &cmd_buffer->batch);
3909    struct gen_mi_value count =
3910       gen_mi_mem32(anv_address_add(counter_buffer->address,
3911                                    counterBufferOffset));
3912    if (counterOffset)
3913       count = gen_mi_isub(&b, count, gen_mi_imm(counterOffset));
3914    count = gen_mi_udiv32_imm(&b, count, vertexStride);
3915    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_VERTEX_COUNT), count);
3916 
3917    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_START_VERTEX),
3918                     gen_mi_imm(firstVertex));
3919    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_INSTANCE_COUNT),
3920                     gen_mi_imm(instanceCount));
3921    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_START_INSTANCE),
3922                     gen_mi_imm(firstInstance));
3923    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_BASE_VERTEX), gen_mi_imm(0));
3924 
3925    anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
3926       prim.IndirectParameterEnable  = true;
3927       prim.VertexAccessType         = SEQUENTIAL;
3928       prim.PrimitiveTopologyType    = cmd_buffer->state.gfx.primitive_topology;
3929    }
3930 
3931    update_dirty_vbs_for_gen8_vb_flush(cmd_buffer, SEQUENTIAL);
3932 #endif /* GEN_IS_HASWELL || GEN_GEN >= 8 */
3933 }
3934 
3935 static void
load_indirect_parameters(struct anv_cmd_buffer * cmd_buffer,struct anv_address addr,bool indexed)3936 load_indirect_parameters(struct anv_cmd_buffer *cmd_buffer,
3937                          struct anv_address addr,
3938                          bool indexed)
3939 {
3940    struct gen_mi_builder b;
3941    gen_mi_builder_init(&b, &cmd_buffer->batch);
3942 
3943    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_VERTEX_COUNT),
3944                     gen_mi_mem32(anv_address_add(addr, 0)));
3945 
3946    struct gen_mi_value instance_count = gen_mi_mem32(anv_address_add(addr, 4));
3947    unsigned view_count = anv_subpass_view_count(cmd_buffer->state.subpass);
3948    if (view_count > 1) {
3949 #if GEN_IS_HASWELL || GEN_GEN >= 8
3950       instance_count = gen_mi_imul_imm(&b, instance_count, view_count);
3951 #else
3952       anv_finishme("Multiview + indirect draw requires MI_MATH; "
3953                    "MI_MATH is not supported on Ivy Bridge");
3954 #endif
3955    }
3956    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_INSTANCE_COUNT), instance_count);
3957 
3958    gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_START_VERTEX),
3959                     gen_mi_mem32(anv_address_add(addr, 8)));
3960 
3961    if (indexed) {
3962       gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_BASE_VERTEX),
3963                        gen_mi_mem32(anv_address_add(addr, 12)));
3964       gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_START_INSTANCE),
3965                        gen_mi_mem32(anv_address_add(addr, 16)));
3966    } else {
3967       gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_START_INSTANCE),
3968                        gen_mi_mem32(anv_address_add(addr, 12)));
3969       gen_mi_store(&b, gen_mi_reg32(GEN7_3DPRIM_BASE_VERTEX), gen_mi_imm(0));
3970    }
3971 }
3972 
genX(CmdDrawIndirect)3973 void genX(CmdDrawIndirect)(
3974     VkCommandBuffer                             commandBuffer,
3975     VkBuffer                                    _buffer,
3976     VkDeviceSize                                offset,
3977     uint32_t                                    drawCount,
3978     uint32_t                                    stride)
3979 {
3980    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
3981    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
3982    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
3983    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
3984 
3985    if (anv_batch_has_error(&cmd_buffer->batch))
3986       return;
3987 
3988    genX(cmd_buffer_flush_state)(cmd_buffer);
3989 
3990    if (cmd_buffer->state.conditional_render_enabled)
3991       genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
3992 
3993    for (uint32_t i = 0; i < drawCount; i++) {
3994       struct anv_address draw = anv_address_add(buffer->address, offset);
3995 
3996       if (vs_prog_data->uses_firstvertex ||
3997           vs_prog_data->uses_baseinstance)
3998          emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 8));
3999       if (vs_prog_data->uses_drawid)
4000          emit_draw_index(cmd_buffer, i);
4001 
4002       /* Emitting draw index or vertex index BOs may result in needing
4003        * additional VF cache flushes.
4004        */
4005       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4006 
4007       load_indirect_parameters(cmd_buffer, draw, false);
4008 
4009       anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4010          prim.IndirectParameterEnable  = true;
4011          prim.PredicateEnable          = cmd_buffer->state.conditional_render_enabled;
4012          prim.VertexAccessType         = SEQUENTIAL;
4013          prim.PrimitiveTopologyType    = cmd_buffer->state.gfx.primitive_topology;
4014       }
4015 
4016       update_dirty_vbs_for_gen8_vb_flush(cmd_buffer, SEQUENTIAL);
4017 
4018       offset += stride;
4019    }
4020 }
4021 
genX(CmdDrawIndexedIndirect)4022 void genX(CmdDrawIndexedIndirect)(
4023     VkCommandBuffer                             commandBuffer,
4024     VkBuffer                                    _buffer,
4025     VkDeviceSize                                offset,
4026     uint32_t                                    drawCount,
4027     uint32_t                                    stride)
4028 {
4029    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4030    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4031    struct anv_graphics_pipeline *pipeline = cmd_buffer->state.gfx.pipeline;
4032    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4033 
4034    if (anv_batch_has_error(&cmd_buffer->batch))
4035       return;
4036 
4037    genX(cmd_buffer_flush_state)(cmd_buffer);
4038 
4039    if (cmd_buffer->state.conditional_render_enabled)
4040       genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4041 
4042    for (uint32_t i = 0; i < drawCount; i++) {
4043       struct anv_address draw = anv_address_add(buffer->address, offset);
4044 
4045       /* TODO: We need to stomp base vertex to 0 somehow */
4046       if (vs_prog_data->uses_firstvertex ||
4047           vs_prog_data->uses_baseinstance)
4048          emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 12));
4049       if (vs_prog_data->uses_drawid)
4050          emit_draw_index(cmd_buffer, i);
4051 
4052       /* Emitting draw index or vertex index BOs may result in needing
4053        * additional VF cache flushes.
4054        */
4055       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4056 
4057       load_indirect_parameters(cmd_buffer, draw, true);
4058 
4059       anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4060          prim.IndirectParameterEnable  = true;
4061          prim.PredicateEnable          = cmd_buffer->state.conditional_render_enabled;
4062          prim.VertexAccessType         = RANDOM;
4063          prim.PrimitiveTopologyType    = cmd_buffer->state.gfx.primitive_topology;
4064       }
4065 
4066       update_dirty_vbs_for_gen8_vb_flush(cmd_buffer, RANDOM);
4067 
4068       offset += stride;
4069    }
4070 }
4071 
4072 static struct gen_mi_value
prepare_for_draw_count_predicate(struct anv_cmd_buffer * cmd_buffer,struct gen_mi_builder * b,struct anv_address count_address,const bool conditional_render_enabled)4073 prepare_for_draw_count_predicate(struct anv_cmd_buffer *cmd_buffer,
4074                                  struct gen_mi_builder *b,
4075                                  struct anv_address count_address,
4076                                  const bool conditional_render_enabled)
4077 {
4078    struct gen_mi_value ret = gen_mi_imm(0);
4079 
4080    if (conditional_render_enabled) {
4081 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4082       ret = gen_mi_new_gpr(b);
4083       gen_mi_store(b, gen_mi_value_ref(b, ret), gen_mi_mem32(count_address));
4084 #endif
4085    } else {
4086       /* Upload the current draw count from the draw parameters buffer to
4087        * MI_PREDICATE_SRC0.
4088        */
4089       gen_mi_store(b, gen_mi_reg64(MI_PREDICATE_SRC0),
4090                       gen_mi_mem32(count_address));
4091 
4092       gen_mi_store(b, gen_mi_reg32(MI_PREDICATE_SRC1 + 4), gen_mi_imm(0));
4093    }
4094 
4095    return ret;
4096 }
4097 
4098 static void
emit_draw_count_predicate(struct anv_cmd_buffer * cmd_buffer,struct gen_mi_builder * b,uint32_t draw_index)4099 emit_draw_count_predicate(struct anv_cmd_buffer *cmd_buffer,
4100                           struct gen_mi_builder *b,
4101                           uint32_t draw_index)
4102 {
4103    /* Upload the index of the current primitive to MI_PREDICATE_SRC1. */
4104    gen_mi_store(b, gen_mi_reg32(MI_PREDICATE_SRC1), gen_mi_imm(draw_index));
4105 
4106    if (draw_index == 0) {
4107       anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
4108          mip.LoadOperation    = LOAD_LOADINV;
4109          mip.CombineOperation = COMBINE_SET;
4110          mip.CompareOperation = COMPARE_SRCS_EQUAL;
4111       }
4112    } else {
4113       /* While draw_index < draw_count the predicate's result will be
4114        *  (draw_index == draw_count) ^ TRUE = TRUE
4115        * When draw_index == draw_count the result is
4116        *  (TRUE) ^ TRUE = FALSE
4117        * After this all results will be:
4118        *  (FALSE) ^ FALSE = FALSE
4119        */
4120       anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
4121          mip.LoadOperation    = LOAD_LOAD;
4122          mip.CombineOperation = COMBINE_XOR;
4123          mip.CompareOperation = COMPARE_SRCS_EQUAL;
4124       }
4125    }
4126 }
4127 
4128 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4129 static void
emit_draw_count_predicate_with_conditional_render(struct anv_cmd_buffer * cmd_buffer,struct gen_mi_builder * b,uint32_t draw_index,struct gen_mi_value max)4130 emit_draw_count_predicate_with_conditional_render(
4131                           struct anv_cmd_buffer *cmd_buffer,
4132                           struct gen_mi_builder *b,
4133                           uint32_t draw_index,
4134                           struct gen_mi_value max)
4135 {
4136    struct gen_mi_value pred = gen_mi_ult(b, gen_mi_imm(draw_index), max);
4137    pred = gen_mi_iand(b, pred, gen_mi_reg64(ANV_PREDICATE_RESULT_REG));
4138 
4139 #if GEN_GEN >= 8
4140    gen_mi_store(b, gen_mi_reg64(MI_PREDICATE_RESULT), pred);
4141 #else
4142    /* MI_PREDICATE_RESULT is not whitelisted in i915 command parser
4143     * so we emit MI_PREDICATE to set it.
4144     */
4145 
4146    gen_mi_store(b, gen_mi_reg64(MI_PREDICATE_SRC0), pred);
4147    gen_mi_store(b, gen_mi_reg64(MI_PREDICATE_SRC1), gen_mi_imm(0));
4148 
4149    anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
4150       mip.LoadOperation    = LOAD_LOADINV;
4151       mip.CombineOperation = COMBINE_SET;
4152       mip.CompareOperation = COMPARE_SRCS_EQUAL;
4153    }
4154 #endif
4155 }
4156 #endif
4157 
genX(CmdDrawIndirectCount)4158 void genX(CmdDrawIndirectCount)(
4159     VkCommandBuffer                             commandBuffer,
4160     VkBuffer                                    _buffer,
4161     VkDeviceSize                                offset,
4162     VkBuffer                                    _countBuffer,
4163     VkDeviceSize                                countBufferOffset,
4164     uint32_t                                    maxDrawCount,
4165     uint32_t                                    stride)
4166 {
4167    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4168    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4169    ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer);
4170    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
4171    struct anv_graphics_pipeline *pipeline = cmd_state->gfx.pipeline;
4172    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4173 
4174    if (anv_batch_has_error(&cmd_buffer->batch))
4175       return;
4176 
4177    genX(cmd_buffer_flush_state)(cmd_buffer);
4178 
4179    struct gen_mi_builder b;
4180    gen_mi_builder_init(&b, &cmd_buffer->batch);
4181    struct anv_address count_address =
4182       anv_address_add(count_buffer->address, countBufferOffset);
4183    struct gen_mi_value max =
4184       prepare_for_draw_count_predicate(cmd_buffer, &b, count_address,
4185                                        cmd_state->conditional_render_enabled);
4186 
4187    for (uint32_t i = 0; i < maxDrawCount; i++) {
4188       struct anv_address draw = anv_address_add(buffer->address, offset);
4189 
4190 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4191       if (cmd_state->conditional_render_enabled) {
4192          emit_draw_count_predicate_with_conditional_render(
4193             cmd_buffer, &b, i, gen_mi_value_ref(&b, max));
4194       } else {
4195          emit_draw_count_predicate(cmd_buffer, &b, i);
4196       }
4197 #else
4198       emit_draw_count_predicate(cmd_buffer, &b, i);
4199 #endif
4200 
4201       if (vs_prog_data->uses_firstvertex ||
4202           vs_prog_data->uses_baseinstance)
4203          emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 8));
4204       if (vs_prog_data->uses_drawid)
4205          emit_draw_index(cmd_buffer, i);
4206 
4207       /* Emitting draw index or vertex index BOs may result in needing
4208        * additional VF cache flushes.
4209        */
4210       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4211 
4212       load_indirect_parameters(cmd_buffer, draw, false);
4213 
4214       anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4215          prim.IndirectParameterEnable  = true;
4216          prim.PredicateEnable          = true;
4217          prim.VertexAccessType         = SEQUENTIAL;
4218          prim.PrimitiveTopologyType    = cmd_buffer->state.gfx.primitive_topology;
4219       }
4220 
4221       update_dirty_vbs_for_gen8_vb_flush(cmd_buffer, SEQUENTIAL);
4222 
4223       offset += stride;
4224    }
4225 
4226    gen_mi_value_unref(&b, max);
4227 }
4228 
genX(CmdDrawIndexedIndirectCount)4229 void genX(CmdDrawIndexedIndirectCount)(
4230     VkCommandBuffer                             commandBuffer,
4231     VkBuffer                                    _buffer,
4232     VkDeviceSize                                offset,
4233     VkBuffer                                    _countBuffer,
4234     VkDeviceSize                                countBufferOffset,
4235     uint32_t                                    maxDrawCount,
4236     uint32_t                                    stride)
4237 {
4238    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4239    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4240    ANV_FROM_HANDLE(anv_buffer, count_buffer, _countBuffer);
4241    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
4242    struct anv_graphics_pipeline *pipeline = cmd_state->gfx.pipeline;
4243    const struct brw_vs_prog_data *vs_prog_data = get_vs_prog_data(pipeline);
4244 
4245    if (anv_batch_has_error(&cmd_buffer->batch))
4246       return;
4247 
4248    genX(cmd_buffer_flush_state)(cmd_buffer);
4249 
4250    struct gen_mi_builder b;
4251    gen_mi_builder_init(&b, &cmd_buffer->batch);
4252    struct anv_address count_address =
4253       anv_address_add(count_buffer->address, countBufferOffset);
4254    struct gen_mi_value max =
4255       prepare_for_draw_count_predicate(cmd_buffer, &b, count_address,
4256                                        cmd_state->conditional_render_enabled);
4257 
4258    for (uint32_t i = 0; i < maxDrawCount; i++) {
4259       struct anv_address draw = anv_address_add(buffer->address, offset);
4260 
4261 #if GEN_GEN >= 8 || GEN_IS_HASWELL
4262       if (cmd_state->conditional_render_enabled) {
4263          emit_draw_count_predicate_with_conditional_render(
4264             cmd_buffer, &b, i, gen_mi_value_ref(&b, max));
4265       } else {
4266          emit_draw_count_predicate(cmd_buffer, &b, i);
4267       }
4268 #else
4269       emit_draw_count_predicate(cmd_buffer, &b, i);
4270 #endif
4271 
4272       /* TODO: We need to stomp base vertex to 0 somehow */
4273       if (vs_prog_data->uses_firstvertex ||
4274           vs_prog_data->uses_baseinstance)
4275          emit_base_vertex_instance_bo(cmd_buffer, anv_address_add(draw, 12));
4276       if (vs_prog_data->uses_drawid)
4277          emit_draw_index(cmd_buffer, i);
4278 
4279       /* Emitting draw index or vertex index BOs may result in needing
4280        * additional VF cache flushes.
4281        */
4282       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4283 
4284       load_indirect_parameters(cmd_buffer, draw, true);
4285 
4286       anv_batch_emit(&cmd_buffer->batch, GENX(3DPRIMITIVE), prim) {
4287          prim.IndirectParameterEnable  = true;
4288          prim.PredicateEnable          = true;
4289          prim.VertexAccessType         = RANDOM;
4290          prim.PrimitiveTopologyType    = cmd_buffer->state.gfx.primitive_topology;
4291       }
4292 
4293       update_dirty_vbs_for_gen8_vb_flush(cmd_buffer, RANDOM);
4294 
4295       offset += stride;
4296    }
4297 
4298    gen_mi_value_unref(&b, max);
4299 }
4300 
genX(CmdBeginTransformFeedbackEXT)4301 void genX(CmdBeginTransformFeedbackEXT)(
4302     VkCommandBuffer                             commandBuffer,
4303     uint32_t                                    firstCounterBuffer,
4304     uint32_t                                    counterBufferCount,
4305     const VkBuffer*                             pCounterBuffers,
4306     const VkDeviceSize*                         pCounterBufferOffsets)
4307 {
4308    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4309 
4310    assert(firstCounterBuffer < MAX_XFB_BUFFERS);
4311    assert(counterBufferCount <= MAX_XFB_BUFFERS);
4312    assert(firstCounterBuffer + counterBufferCount <= MAX_XFB_BUFFERS);
4313 
4314    /* From the SKL PRM Vol. 2c, SO_WRITE_OFFSET:
4315     *
4316     *    "Ssoftware must ensure that no HW stream output operations can be in
4317     *    process or otherwise pending at the point that the MI_LOAD/STORE
4318     *    commands are processed. This will likely require a pipeline flush."
4319     */
4320    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
4321    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4322 
4323    for (uint32_t idx = 0; idx < MAX_XFB_BUFFERS; idx++) {
4324       /* If we have a counter buffer, this is a resume so we need to load the
4325        * value into the streamout offset register.  Otherwise, this is a begin
4326        * and we need to reset it to zero.
4327        */
4328       if (pCounterBuffers &&
4329           idx >= firstCounterBuffer &&
4330           idx - firstCounterBuffer < counterBufferCount &&
4331           pCounterBuffers[idx - firstCounterBuffer] != VK_NULL_HANDLE) {
4332          uint32_t cb_idx = idx - firstCounterBuffer;
4333          ANV_FROM_HANDLE(anv_buffer, counter_buffer, pCounterBuffers[cb_idx]);
4334          uint64_t offset = pCounterBufferOffsets ?
4335                            pCounterBufferOffsets[cb_idx] : 0;
4336 
4337          anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
4338             lrm.RegisterAddress  = GENX(SO_WRITE_OFFSET0_num) + idx * 4;
4339             lrm.MemoryAddress    = anv_address_add(counter_buffer->address,
4340                                                    offset);
4341          }
4342       } else {
4343          anv_batch_emit(&cmd_buffer->batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
4344             lri.RegisterOffset   = GENX(SO_WRITE_OFFSET0_num) + idx * 4;
4345             lri.DataDWord        = 0;
4346          }
4347       }
4348    }
4349 
4350    cmd_buffer->state.xfb_enabled = true;
4351    cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_XFB_ENABLE;
4352 }
4353 
genX(CmdEndTransformFeedbackEXT)4354 void genX(CmdEndTransformFeedbackEXT)(
4355     VkCommandBuffer                             commandBuffer,
4356     uint32_t                                    firstCounterBuffer,
4357     uint32_t                                    counterBufferCount,
4358     const VkBuffer*                             pCounterBuffers,
4359     const VkDeviceSize*                         pCounterBufferOffsets)
4360 {
4361    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4362 
4363    assert(firstCounterBuffer < MAX_XFB_BUFFERS);
4364    assert(counterBufferCount <= MAX_XFB_BUFFERS);
4365    assert(firstCounterBuffer + counterBufferCount <= MAX_XFB_BUFFERS);
4366 
4367    /* From the SKL PRM Vol. 2c, SO_WRITE_OFFSET:
4368     *
4369     *    "Ssoftware must ensure that no HW stream output operations can be in
4370     *    process or otherwise pending at the point that the MI_LOAD/STORE
4371     *    commands are processed. This will likely require a pipeline flush."
4372     */
4373    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
4374    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4375 
4376    for (uint32_t cb_idx = 0; cb_idx < counterBufferCount; cb_idx++) {
4377       unsigned idx = firstCounterBuffer + cb_idx;
4378 
4379       /* If we have a counter buffer, this is a resume so we need to load the
4380        * value into the streamout offset register.  Otherwise, this is a begin
4381        * and we need to reset it to zero.
4382        */
4383       if (pCounterBuffers &&
4384           cb_idx < counterBufferCount &&
4385           pCounterBuffers[cb_idx] != VK_NULL_HANDLE) {
4386          ANV_FROM_HANDLE(anv_buffer, counter_buffer, pCounterBuffers[cb_idx]);
4387          uint64_t offset = pCounterBufferOffsets ?
4388                            pCounterBufferOffsets[cb_idx] : 0;
4389 
4390          anv_batch_emit(&cmd_buffer->batch, GENX(MI_STORE_REGISTER_MEM), srm) {
4391             srm.MemoryAddress    = anv_address_add(counter_buffer->address,
4392                                                    offset);
4393             srm.RegisterAddress  = GENX(SO_WRITE_OFFSET0_num) + idx * 4;
4394          }
4395       }
4396    }
4397 
4398    cmd_buffer->state.xfb_enabled = false;
4399    cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_XFB_ENABLE;
4400 }
4401 
4402 void
genX(cmd_buffer_flush_compute_state)4403 genX(cmd_buffer_flush_compute_state)(struct anv_cmd_buffer *cmd_buffer)
4404 {
4405    struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
4406 
4407    assert(pipeline->cs);
4408 
4409    genX(cmd_buffer_config_l3)(cmd_buffer, pipeline->base.l3_config);
4410 
4411    genX(flush_pipeline_select_gpgpu)(cmd_buffer);
4412 
4413    /* Apply any pending pipeline flushes we may have.  We want to apply them
4414     * now because, if any of those flushes are for things like push constants,
4415     * the GPU will read the state at weird times.
4416     */
4417    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4418 
4419    if (cmd_buffer->state.compute.pipeline_dirty) {
4420       /* From the Sky Lake PRM Vol 2a, MEDIA_VFE_STATE:
4421        *
4422        *    "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
4423        *    the only bits that are changed are scoreboard related: Scoreboard
4424        *    Enable, Scoreboard Type, Scoreboard Mask, Scoreboard * Delta. For
4425        *    these scoreboard related states, a MEDIA_STATE_FLUSH is
4426        *    sufficient."
4427        */
4428       cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_CS_STALL_BIT;
4429       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4430 
4431       anv_batch_emit_batch(&cmd_buffer->batch, &pipeline->base.batch);
4432 
4433       /* The workgroup size of the pipeline affects our push constant layout
4434        * so flag push constants as dirty if we change the pipeline.
4435        */
4436       cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4437    }
4438 
4439    if ((cmd_buffer->state.descriptors_dirty & VK_SHADER_STAGE_COMPUTE_BIT) ||
4440        cmd_buffer->state.compute.pipeline_dirty) {
4441       flush_descriptor_sets(cmd_buffer,
4442                             &cmd_buffer->state.compute.base,
4443                             &pipeline->cs, 1);
4444 
4445       uint32_t iface_desc_data_dw[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
4446       struct GENX(INTERFACE_DESCRIPTOR_DATA) desc = {
4447          .BindingTablePointer =
4448             cmd_buffer->state.binding_tables[MESA_SHADER_COMPUTE].offset,
4449          .SamplerStatePointer =
4450             cmd_buffer->state.samplers[MESA_SHADER_COMPUTE].offset,
4451       };
4452       GENX(INTERFACE_DESCRIPTOR_DATA_pack)(NULL, iface_desc_data_dw, &desc);
4453 
4454       struct anv_state state =
4455          anv_cmd_buffer_merge_dynamic(cmd_buffer, iface_desc_data_dw,
4456                                       pipeline->interface_descriptor_data,
4457                                       GENX(INTERFACE_DESCRIPTOR_DATA_length),
4458                                       64);
4459 
4460       uint32_t size = GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
4461       anv_batch_emit(&cmd_buffer->batch,
4462                      GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), mid) {
4463          mid.InterfaceDescriptorTotalLength        = size;
4464          mid.InterfaceDescriptorDataStartAddress   = state.offset;
4465       }
4466    }
4467 
4468    if (cmd_buffer->state.push_constants_dirty & VK_SHADER_STAGE_COMPUTE_BIT) {
4469       struct anv_state push_state =
4470          anv_cmd_buffer_cs_push_constants(cmd_buffer);
4471 
4472       if (push_state.alloc_size) {
4473          anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_CURBE_LOAD), curbe) {
4474             curbe.CURBETotalDataLength    = push_state.alloc_size;
4475             curbe.CURBEDataStartAddress   = push_state.offset;
4476          }
4477       }
4478 
4479       cmd_buffer->state.push_constants_dirty &= ~VK_SHADER_STAGE_COMPUTE_BIT;
4480    }
4481 
4482    cmd_buffer->state.compute.pipeline_dirty = false;
4483 
4484    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
4485 }
4486 
4487 #if GEN_GEN == 7
4488 
4489 static VkResult
verify_cmd_parser(const struct anv_device * device,int required_version,const char * function)4490 verify_cmd_parser(const struct anv_device *device,
4491                   int required_version,
4492                   const char *function)
4493 {
4494    if (device->physical->cmd_parser_version < required_version) {
4495       return vk_errorf(device, device->physical,
4496                        VK_ERROR_FEATURE_NOT_PRESENT,
4497                        "cmd parser version %d is required for %s",
4498                        required_version, function);
4499    } else {
4500       return VK_SUCCESS;
4501    }
4502 }
4503 
4504 #endif
4505 
4506 static void
anv_cmd_buffer_push_base_group_id(struct anv_cmd_buffer * cmd_buffer,uint32_t baseGroupX,uint32_t baseGroupY,uint32_t baseGroupZ)4507 anv_cmd_buffer_push_base_group_id(struct anv_cmd_buffer *cmd_buffer,
4508                                   uint32_t baseGroupX,
4509                                   uint32_t baseGroupY,
4510                                   uint32_t baseGroupZ)
4511 {
4512    if (anv_batch_has_error(&cmd_buffer->batch))
4513       return;
4514 
4515    struct anv_push_constants *push =
4516       &cmd_buffer->state.compute.base.push_constants;
4517    if (push->cs.base_work_group_id[0] != baseGroupX ||
4518        push->cs.base_work_group_id[1] != baseGroupY ||
4519        push->cs.base_work_group_id[2] != baseGroupZ) {
4520       push->cs.base_work_group_id[0] = baseGroupX;
4521       push->cs.base_work_group_id[1] = baseGroupY;
4522       push->cs.base_work_group_id[2] = baseGroupZ;
4523 
4524       cmd_buffer->state.push_constants_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4525    }
4526 }
4527 
genX(CmdDispatch)4528 void genX(CmdDispatch)(
4529     VkCommandBuffer                             commandBuffer,
4530     uint32_t                                    x,
4531     uint32_t                                    y,
4532     uint32_t                                    z)
4533 {
4534    genX(CmdDispatchBase)(commandBuffer, 0, 0, 0, x, y, z);
4535 }
4536 
4537 static inline void
emit_gpgpu_walker(struct anv_cmd_buffer * cmd_buffer,const struct anv_compute_pipeline * pipeline,bool indirect,const struct brw_cs_prog_data * prog_data,uint32_t groupCountX,uint32_t groupCountY,uint32_t groupCountZ)4538 emit_gpgpu_walker(struct anv_cmd_buffer *cmd_buffer,
4539                   const struct anv_compute_pipeline *pipeline, bool indirect,
4540                   const struct brw_cs_prog_data *prog_data,
4541                   uint32_t groupCountX, uint32_t groupCountY,
4542                   uint32_t groupCountZ)
4543 {
4544    bool predicate = (GEN_GEN <= 7 && indirect) ||
4545       cmd_buffer->state.conditional_render_enabled;
4546    const struct anv_cs_parameters cs_params = anv_cs_parameters(pipeline);
4547 
4548    anv_batch_emit(&cmd_buffer->batch, GENX(GPGPU_WALKER), ggw) {
4549       ggw.IndirectParameterEnable      = indirect;
4550       ggw.PredicateEnable              = predicate;
4551       ggw.SIMDSize                     = cs_params.simd_size / 16;
4552       ggw.ThreadDepthCounterMaximum    = 0;
4553       ggw.ThreadHeightCounterMaximum   = 0;
4554       ggw.ThreadWidthCounterMaximum    = cs_params.threads - 1;
4555       ggw.ThreadGroupIDXDimension      = groupCountX;
4556       ggw.ThreadGroupIDYDimension      = groupCountY;
4557       ggw.ThreadGroupIDZDimension      = groupCountZ;
4558       ggw.RightExecutionMask           = pipeline->cs_right_mask;
4559       ggw.BottomExecutionMask          = 0xffffffff;
4560    }
4561 
4562    anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_STATE_FLUSH), msf);
4563 }
4564 
genX(CmdDispatchBase)4565 void genX(CmdDispatchBase)(
4566     VkCommandBuffer                             commandBuffer,
4567     uint32_t                                    baseGroupX,
4568     uint32_t                                    baseGroupY,
4569     uint32_t                                    baseGroupZ,
4570     uint32_t                                    groupCountX,
4571     uint32_t                                    groupCountY,
4572     uint32_t                                    groupCountZ)
4573 {
4574    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4575    struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
4576    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
4577 
4578    anv_cmd_buffer_push_base_group_id(cmd_buffer, baseGroupX,
4579                                      baseGroupY, baseGroupZ);
4580 
4581    if (anv_batch_has_error(&cmd_buffer->batch))
4582       return;
4583 
4584    if (prog_data->uses_num_work_groups) {
4585       struct anv_state state =
4586          anv_cmd_buffer_alloc_dynamic_state(cmd_buffer, 12, 4);
4587       uint32_t *sizes = state.map;
4588       sizes[0] = groupCountX;
4589       sizes[1] = groupCountY;
4590       sizes[2] = groupCountZ;
4591       cmd_buffer->state.compute.num_workgroups = (struct anv_address) {
4592          .bo = cmd_buffer->device->dynamic_state_pool.block_pool.bo,
4593          .offset = state.offset,
4594       };
4595 
4596       /* The num_workgroups buffer goes in the binding table */
4597       cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4598    }
4599 
4600    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
4601 
4602    if (cmd_buffer->state.conditional_render_enabled)
4603       genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4604 
4605    emit_gpgpu_walker(cmd_buffer, pipeline, false, prog_data, groupCountX,
4606                      groupCountY, groupCountZ);
4607 }
4608 
4609 #define GPGPU_DISPATCHDIMX 0x2500
4610 #define GPGPU_DISPATCHDIMY 0x2504
4611 #define GPGPU_DISPATCHDIMZ 0x2508
4612 
genX(CmdDispatchIndirect)4613 void genX(CmdDispatchIndirect)(
4614     VkCommandBuffer                             commandBuffer,
4615     VkBuffer                                    _buffer,
4616     VkDeviceSize                                offset)
4617 {
4618    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
4619    ANV_FROM_HANDLE(anv_buffer, buffer, _buffer);
4620    struct anv_compute_pipeline *pipeline = cmd_buffer->state.compute.pipeline;
4621    const struct brw_cs_prog_data *prog_data = get_cs_prog_data(pipeline);
4622    struct anv_address addr = anv_address_add(buffer->address, offset);
4623    UNUSED struct anv_batch *batch = &cmd_buffer->batch;
4624 
4625    anv_cmd_buffer_push_base_group_id(cmd_buffer, 0, 0, 0);
4626 
4627 #if GEN_GEN == 7
4628    /* Linux 4.4 added command parser version 5 which allows the GPGPU
4629     * indirect dispatch registers to be written.
4630     */
4631    if (verify_cmd_parser(cmd_buffer->device, 5,
4632                          "vkCmdDispatchIndirect") != VK_SUCCESS)
4633       return;
4634 #endif
4635 
4636    if (prog_data->uses_num_work_groups) {
4637       cmd_buffer->state.compute.num_workgroups = addr;
4638 
4639       /* The num_workgroups buffer goes in the binding table */
4640       cmd_buffer->state.descriptors_dirty |= VK_SHADER_STAGE_COMPUTE_BIT;
4641    }
4642 
4643    genX(cmd_buffer_flush_compute_state)(cmd_buffer);
4644 
4645    struct gen_mi_builder b;
4646    gen_mi_builder_init(&b, &cmd_buffer->batch);
4647 
4648    struct gen_mi_value size_x = gen_mi_mem32(anv_address_add(addr, 0));
4649    struct gen_mi_value size_y = gen_mi_mem32(anv_address_add(addr, 4));
4650    struct gen_mi_value size_z = gen_mi_mem32(anv_address_add(addr, 8));
4651 
4652    gen_mi_store(&b, gen_mi_reg32(GPGPU_DISPATCHDIMX), size_x);
4653    gen_mi_store(&b, gen_mi_reg32(GPGPU_DISPATCHDIMY), size_y);
4654    gen_mi_store(&b, gen_mi_reg32(GPGPU_DISPATCHDIMZ), size_z);
4655 
4656 #if GEN_GEN <= 7
4657    /* predicate = (compute_dispatch_indirect_x_size == 0); */
4658    gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC0), size_x);
4659    gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC1), gen_mi_imm(0));
4660    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
4661       mip.LoadOperation    = LOAD_LOAD;
4662       mip.CombineOperation = COMBINE_SET;
4663       mip.CompareOperation = COMPARE_SRCS_EQUAL;
4664    }
4665 
4666    /* predicate |= (compute_dispatch_indirect_y_size == 0); */
4667    gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_SRC0), size_y);
4668    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
4669       mip.LoadOperation    = LOAD_LOAD;
4670       mip.CombineOperation = COMBINE_OR;
4671       mip.CompareOperation = COMPARE_SRCS_EQUAL;
4672    }
4673 
4674    /* predicate |= (compute_dispatch_indirect_z_size == 0); */
4675    gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_SRC0), size_z);
4676    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
4677       mip.LoadOperation    = LOAD_LOAD;
4678       mip.CombineOperation = COMBINE_OR;
4679       mip.CompareOperation = COMPARE_SRCS_EQUAL;
4680    }
4681 
4682    /* predicate = !predicate; */
4683    anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
4684       mip.LoadOperation    = LOAD_LOADINV;
4685       mip.CombineOperation = COMBINE_OR;
4686       mip.CompareOperation = COMPARE_FALSE;
4687    }
4688 
4689 #if GEN_IS_HASWELL
4690    if (cmd_buffer->state.conditional_render_enabled) {
4691       /* predicate &= !(conditional_rendering_predicate == 0); */
4692       gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_SRC0),
4693                        gen_mi_reg32(ANV_PREDICATE_RESULT_REG));
4694       anv_batch_emit(batch, GENX(MI_PREDICATE), mip) {
4695          mip.LoadOperation    = LOAD_LOADINV;
4696          mip.CombineOperation = COMBINE_AND;
4697          mip.CompareOperation = COMPARE_SRCS_EQUAL;
4698       }
4699    }
4700 #endif
4701 
4702 #else /* GEN_GEN > 7 */
4703    if (cmd_buffer->state.conditional_render_enabled)
4704       genX(cmd_emit_conditional_render_predicate)(cmd_buffer);
4705 #endif
4706 
4707    emit_gpgpu_walker(cmd_buffer, pipeline, true, prog_data, 0, 0, 0);
4708 }
4709 
4710 static void
genX(flush_pipeline_select)4711 genX(flush_pipeline_select)(struct anv_cmd_buffer *cmd_buffer,
4712                             uint32_t pipeline)
4713 {
4714    UNUSED const struct gen_device_info *devinfo = &cmd_buffer->device->info;
4715 
4716    if (cmd_buffer->state.current_pipeline == pipeline)
4717       return;
4718 
4719 #if GEN_GEN >= 8 && GEN_GEN < 10
4720    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
4721     *
4722     *   Software must clear the COLOR_CALC_STATE Valid field in
4723     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
4724     *   with Pipeline Select set to GPGPU.
4725     *
4726     * The internal hardware docs recommend the same workaround for Gen9
4727     * hardware too.
4728     */
4729    if (pipeline == GPGPU)
4730       anv_batch_emit(&cmd_buffer->batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
4731 #endif
4732 
4733 #if GEN_GEN == 9
4734    if (pipeline == _3D) {
4735       /* There is a mid-object preemption workaround which requires you to
4736        * re-emit MEDIA_VFE_STATE after switching from GPGPU to 3D.  However,
4737        * even without preemption, we have issues with geometry flickering when
4738        * GPGPU and 3D are back-to-back and this seems to fix it.  We don't
4739        * really know why.
4740        */
4741       const uint32_t subslices =
4742          MAX2(cmd_buffer->device->physical->subslice_total, 1);
4743       anv_batch_emit(&cmd_buffer->batch, GENX(MEDIA_VFE_STATE), vfe) {
4744          vfe.MaximumNumberofThreads =
4745             devinfo->max_cs_threads * subslices - 1;
4746          vfe.NumberofURBEntries     = 2;
4747          vfe.URBEntryAllocationSize = 2;
4748       }
4749 
4750       /* We just emitted a dummy MEDIA_VFE_STATE so now that packet is
4751        * invalid. Set the compute pipeline to dirty to force a re-emit of the
4752        * pipeline in case we get back-to-back dispatch calls with the same
4753        * pipeline and a PIPELINE_SELECT in between.
4754        */
4755       cmd_buffer->state.compute.pipeline_dirty = true;
4756    }
4757 #endif
4758 
4759    /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
4760     * PIPELINE_SELECT [DevBWR+]":
4761     *
4762     *   Project: DEVSNB+
4763     *
4764     *   Software must ensure all the write caches are flushed through a
4765     *   stalling PIPE_CONTROL command followed by another PIPE_CONTROL
4766     *   command to invalidate read only caches prior to programming
4767     *   MI_PIPELINE_SELECT command to change the Pipeline Select Mode.
4768     */
4769    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
4770       pc.RenderTargetCacheFlushEnable  = true;
4771       pc.DepthCacheFlushEnable         = true;
4772       pc.DCFlushEnable                 = true;
4773       pc.PostSyncOperation             = NoWrite;
4774       pc.CommandStreamerStallEnable    = true;
4775 #if GEN_GEN >= 12
4776       pc.TileCacheFlushEnable = true;
4777 
4778       /* GEN:BUG:1409600907: "PIPE_CONTROL with Depth Stall Enable bit must be
4779        * set with any PIPE_CONTROL with Depth Flush Enable bit set.
4780        */
4781       pc.DepthStallEnable = true;
4782 #endif
4783    }
4784 
4785    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
4786       pc.TextureCacheInvalidationEnable   = true;
4787       pc.ConstantCacheInvalidationEnable  = true;
4788       pc.StateCacheInvalidationEnable     = true;
4789       pc.InstructionCacheInvalidateEnable = true;
4790       pc.PostSyncOperation                = NoWrite;
4791 #if GEN_GEN >= 12
4792       pc.TileCacheFlushEnable = true;
4793 #endif
4794    }
4795 
4796    anv_batch_emit(&cmd_buffer->batch, GENX(PIPELINE_SELECT), ps) {
4797 #if GEN_GEN >= 9
4798       ps.MaskBits = GEN_GEN >= 12 ? 0x13 : 3;
4799       ps.MediaSamplerDOPClockGateEnable = GEN_GEN >= 12;
4800 #endif
4801       ps.PipelineSelection = pipeline;
4802    }
4803 
4804 #if GEN_GEN == 9
4805    if (devinfo->is_geminilake) {
4806       /* Project: DevGLK
4807        *
4808        * "This chicken bit works around a hardware issue with barrier logic
4809        *  encountered when switching between GPGPU and 3D pipelines.  To
4810        *  workaround the issue, this mode bit should be set after a pipeline
4811        *  is selected."
4812        */
4813       uint32_t scec;
4814       anv_pack_struct(&scec, GENX(SLICE_COMMON_ECO_CHICKEN1),
4815                       .GLKBarrierMode =
4816                           pipeline == GPGPU ? GLK_BARRIER_MODE_GPGPU
4817                                             : GLK_BARRIER_MODE_3D_HULL,
4818                       .GLKBarrierModeMask = 1);
4819       emit_lri(&cmd_buffer->batch, GENX(SLICE_COMMON_ECO_CHICKEN1_num), scec);
4820    }
4821 #endif
4822 
4823    cmd_buffer->state.current_pipeline = pipeline;
4824 }
4825 
4826 void
genX(flush_pipeline_select_3d)4827 genX(flush_pipeline_select_3d)(struct anv_cmd_buffer *cmd_buffer)
4828 {
4829    genX(flush_pipeline_select)(cmd_buffer, _3D);
4830 }
4831 
4832 void
genX(flush_pipeline_select_gpgpu)4833 genX(flush_pipeline_select_gpgpu)(struct anv_cmd_buffer *cmd_buffer)
4834 {
4835    genX(flush_pipeline_select)(cmd_buffer, GPGPU);
4836 }
4837 
4838 void
genX(cmd_buffer_emit_gen7_depth_flush)4839 genX(cmd_buffer_emit_gen7_depth_flush)(struct anv_cmd_buffer *cmd_buffer)
4840 {
4841    if (GEN_GEN >= 8)
4842       return;
4843 
4844    /* From the Haswell PRM, documentation for 3DSTATE_DEPTH_BUFFER:
4845     *
4846     *    "Restriction: Prior to changing Depth/Stencil Buffer state (i.e., any
4847     *    combination of 3DSTATE_DEPTH_BUFFER, 3DSTATE_CLEAR_PARAMS,
4848     *    3DSTATE_STENCIL_BUFFER, 3DSTATE_HIER_DEPTH_BUFFER) SW must first
4849     *    issue a pipelined depth stall (PIPE_CONTROL with Depth Stall bit
4850     *    set), followed by a pipelined depth cache flush (PIPE_CONTROL with
4851     *    Depth Flush Bit set, followed by another pipelined depth stall
4852     *    (PIPE_CONTROL with Depth Stall Bit set), unless SW can otherwise
4853     *    guarantee that the pipeline from WM onwards is already flushed (e.g.,
4854     *    via a preceding MI_FLUSH)."
4855     */
4856    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
4857       pipe.DepthStallEnable = true;
4858    }
4859    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
4860       pipe.DepthCacheFlushEnable = true;
4861 #if GEN_GEN >= 12
4862       pipe.TileCacheFlushEnable = true;
4863 #endif
4864    }
4865    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pipe) {
4866       pipe.DepthStallEnable = true;
4867    }
4868 }
4869 
4870 /* From the Skylake PRM, 3DSTATE_VERTEX_BUFFERS:
4871  *
4872  *    "The VF cache needs to be invalidated before binding and then using
4873  *    Vertex Buffers that overlap with any previously bound Vertex Buffer
4874  *    (at a 64B granularity) since the last invalidation.  A VF cache
4875  *    invalidate is performed by setting the "VF Cache Invalidation Enable"
4876  *    bit in PIPE_CONTROL."
4877  *
4878  * This is implemented by carefully tracking all vertex and index buffer
4879  * bindings and flushing if the cache ever ends up with a range in the cache
4880  * that would exceed 4 GiB.  This is implemented in three parts:
4881  *
4882  *    1. genX(cmd_buffer_set_binding_for_gen8_vb_flush)() which must be called
4883  *       every time a 3DSTATE_VERTEX_BUFFER packet is emitted and informs the
4884  *       tracking code of the new binding.  If this new binding would cause
4885  *       the cache to have a too-large range on the next draw call, a pipeline
4886  *       stall and VF cache invalidate are added to pending_pipeline_bits.
4887  *
4888  *    2. genX(cmd_buffer_apply_pipe_flushes)() resets the cache tracking to
4889  *       empty whenever we emit a VF invalidate.
4890  *
4891  *    3. genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)() must be called
4892  *       after every 3DPRIMITIVE and copies the bound range into the dirty
4893  *       range for each used buffer.  This has to be a separate step because
4894  *       we don't always re-bind all buffers and so 1. can't know which
4895  *       buffers are actually bound.
4896  */
4897 void
genX(cmd_buffer_set_binding_for_gen8_vb_flush)4898 genX(cmd_buffer_set_binding_for_gen8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
4899                                                int vb_index,
4900                                                struct anv_address vb_address,
4901                                                uint32_t vb_size)
4902 {
4903    if (GEN_GEN < 8 || GEN_GEN > 9 ||
4904        !cmd_buffer->device->physical->use_softpin)
4905       return;
4906 
4907    struct anv_vb_cache_range *bound, *dirty;
4908    if (vb_index == -1) {
4909       bound = &cmd_buffer->state.gfx.ib_bound_range;
4910       dirty = &cmd_buffer->state.gfx.ib_dirty_range;
4911    } else {
4912       assert(vb_index >= 0);
4913       assert(vb_index < ARRAY_SIZE(cmd_buffer->state.gfx.vb_bound_ranges));
4914       assert(vb_index < ARRAY_SIZE(cmd_buffer->state.gfx.vb_dirty_ranges));
4915       bound = &cmd_buffer->state.gfx.vb_bound_ranges[vb_index];
4916       dirty = &cmd_buffer->state.gfx.vb_dirty_ranges[vb_index];
4917    }
4918 
4919    if (vb_size == 0) {
4920       bound->start = 0;
4921       bound->end = 0;
4922       return;
4923    }
4924 
4925    assert(vb_address.bo && (vb_address.bo->flags & EXEC_OBJECT_PINNED));
4926    bound->start = gen_48b_address(anv_address_physical(vb_address));
4927    bound->end = bound->start + vb_size;
4928    assert(bound->end > bound->start); /* No overflow */
4929 
4930    /* Align everything to a cache line */
4931    bound->start &= ~(64ull - 1ull);
4932    bound->end = align_u64(bound->end, 64);
4933 
4934    /* Compute the dirty range */
4935    dirty->start = MIN2(dirty->start, bound->start);
4936    dirty->end = MAX2(dirty->end, bound->end);
4937 
4938    /* If our range is larger than 32 bits, we have to flush */
4939    assert(bound->end - bound->start <= (1ull << 32));
4940    if (dirty->end - dirty->start > (1ull << 32)) {
4941       cmd_buffer->state.pending_pipe_bits |=
4942          ANV_PIPE_CS_STALL_BIT | ANV_PIPE_VF_CACHE_INVALIDATE_BIT;
4943    }
4944 }
4945 
4946 void
genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)4947 genX(cmd_buffer_update_dirty_vbs_for_gen8_vb_flush)(struct anv_cmd_buffer *cmd_buffer,
4948                                                     uint32_t access_type,
4949                                                     uint64_t vb_used)
4950 {
4951    if (GEN_GEN < 8 || GEN_GEN > 9 ||
4952        !cmd_buffer->device->physical->use_softpin)
4953       return;
4954 
4955    if (access_type == RANDOM) {
4956       /* We have an index buffer */
4957       struct anv_vb_cache_range *bound = &cmd_buffer->state.gfx.ib_bound_range;
4958       struct anv_vb_cache_range *dirty = &cmd_buffer->state.gfx.ib_dirty_range;
4959 
4960       if (bound->end > bound->start) {
4961          dirty->start = MIN2(dirty->start, bound->start);
4962          dirty->end = MAX2(dirty->end, bound->end);
4963       }
4964    }
4965 
4966    uint64_t mask = vb_used;
4967    while (mask) {
4968       int i = u_bit_scan64(&mask);
4969       assert(i >= 0);
4970       assert(i < ARRAY_SIZE(cmd_buffer->state.gfx.vb_bound_ranges));
4971       assert(i < ARRAY_SIZE(cmd_buffer->state.gfx.vb_dirty_ranges));
4972 
4973       struct anv_vb_cache_range *bound, *dirty;
4974       bound = &cmd_buffer->state.gfx.vb_bound_ranges[i];
4975       dirty = &cmd_buffer->state.gfx.vb_dirty_ranges[i];
4976 
4977       if (bound->end > bound->start) {
4978          dirty->start = MIN2(dirty->start, bound->start);
4979          dirty->end = MAX2(dirty->end, bound->end);
4980       }
4981    }
4982 }
4983 
4984 /**
4985  * Update the pixel hashing modes that determine the balancing of PS threads
4986  * across subslices and slices.
4987  *
4988  * \param width Width bound of the rendering area (already scaled down if \p
4989  *              scale is greater than 1).
4990  * \param height Height bound of the rendering area (already scaled down if \p
4991  *               scale is greater than 1).
4992  * \param scale The number of framebuffer samples that could potentially be
4993  *              affected by an individual channel of the PS thread.  This is
4994  *              typically one for single-sampled rendering, but for operations
4995  *              like CCS resolves and fast clears a single PS invocation may
4996  *              update a huge number of pixels, in which case a finer
4997  *              balancing is desirable in order to maximally utilize the
4998  *              bandwidth available.  UINT_MAX can be used as shorthand for
4999  *              "finest hashing mode available".
5000  */
5001 void
genX(cmd_buffer_emit_hashing_mode)5002 genX(cmd_buffer_emit_hashing_mode)(struct anv_cmd_buffer *cmd_buffer,
5003                                    unsigned width, unsigned height,
5004                                    unsigned scale)
5005 {
5006 #if GEN_GEN == 9
5007    const struct gen_device_info *devinfo = &cmd_buffer->device->info;
5008    const unsigned slice_hashing[] = {
5009       /* Because all Gen9 platforms with more than one slice require
5010        * three-way subslice hashing, a single "normal" 16x16 slice hashing
5011        * block is guaranteed to suffer from substantial imbalance, with one
5012        * subslice receiving twice as much work as the other two in the
5013        * slice.
5014        *
5015        * The performance impact of that would be particularly severe when
5016        * three-way hashing is also in use for slice balancing (which is the
5017        * case for all Gen9 GT4 platforms), because one of the slices
5018        * receives one every three 16x16 blocks in either direction, which
5019        * is roughly the periodicity of the underlying subslice imbalance
5020        * pattern ("roughly" because in reality the hardware's
5021        * implementation of three-way hashing doesn't do exact modulo 3
5022        * arithmetic, which somewhat decreases the magnitude of this effect
5023        * in practice).  This leads to a systematic subslice imbalance
5024        * within that slice regardless of the size of the primitive.  The
5025        * 32x32 hashing mode guarantees that the subslice imbalance within a
5026        * single slice hashing block is minimal, largely eliminating this
5027        * effect.
5028        */
5029       _32x32,
5030       /* Finest slice hashing mode available. */
5031       NORMAL
5032    };
5033    const unsigned subslice_hashing[] = {
5034       /* 16x16 would provide a slight cache locality benefit especially
5035        * visible in the sampler L1 cache efficiency of low-bandwidth
5036        * non-LLC platforms, but it comes at the cost of greater subslice
5037        * imbalance for primitives of dimensions approximately intermediate
5038        * between 16x4 and 16x16.
5039        */
5040       _16x4,
5041       /* Finest subslice hashing mode available. */
5042       _8x4
5043    };
5044    /* Dimensions of the smallest hashing block of a given hashing mode.  If
5045     * the rendering area is smaller than this there can't possibly be any
5046     * benefit from switching to this mode, so we optimize out the
5047     * transition.
5048     */
5049    const unsigned min_size[][2] = {
5050          { 16, 4 },
5051          { 8, 4 }
5052    };
5053    const unsigned idx = scale > 1;
5054 
5055    if (cmd_buffer->state.current_hash_scale != scale &&
5056        (width > min_size[idx][0] || height > min_size[idx][1])) {
5057       uint32_t gt_mode;
5058 
5059       anv_pack_struct(&gt_mode, GENX(GT_MODE),
5060                       .SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0),
5061                       .SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0),
5062                       .SubsliceHashing = subslice_hashing[idx],
5063                       .SubsliceHashingMask = -1);
5064 
5065       cmd_buffer->state.pending_pipe_bits |=
5066          ANV_PIPE_CS_STALL_BIT | ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
5067       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5068 
5069       emit_lri(&cmd_buffer->batch, GENX(GT_MODE_num), gt_mode);
5070 
5071       cmd_buffer->state.current_hash_scale = scale;
5072    }
5073 #endif
5074 }
5075 
5076 static void
cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer * cmd_buffer)5077 cmd_buffer_emit_depth_stencil(struct anv_cmd_buffer *cmd_buffer)
5078 {
5079    struct anv_device *device = cmd_buffer->device;
5080    const struct anv_image_view *iview =
5081       anv_cmd_buffer_get_depth_stencil_view(cmd_buffer);
5082    const struct anv_image *image = iview ? iview->image : NULL;
5083 
5084    /* FIXME: Width and Height are wrong */
5085 
5086    genX(cmd_buffer_emit_gen7_depth_flush)(cmd_buffer);
5087 
5088    uint32_t *dw = anv_batch_emit_dwords(&cmd_buffer->batch,
5089                                         device->isl_dev.ds.size / 4);
5090    if (dw == NULL)
5091       return;
5092 
5093    struct isl_depth_stencil_hiz_emit_info info = { };
5094 
5095    if (iview)
5096       info.view = &iview->planes[0].isl;
5097 
5098    if (image && (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
5099       uint32_t depth_plane =
5100          anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_DEPTH_BIT);
5101       const struct anv_surface *surface = &image->planes[depth_plane].surface;
5102 
5103       info.depth_surf = &surface->isl;
5104 
5105       info.depth_address =
5106          anv_batch_emit_reloc(&cmd_buffer->batch,
5107                               dw + device->isl_dev.ds.depth_offset / 4,
5108                               image->planes[depth_plane].address.bo,
5109                               image->planes[depth_plane].address.offset +
5110                               surface->offset);
5111       info.mocs =
5112          anv_mocs(device, image->planes[depth_plane].address.bo,
5113                   ISL_SURF_USAGE_DEPTH_BIT);
5114 
5115       const uint32_t ds =
5116          cmd_buffer->state.subpass->depth_stencil_attachment->attachment;
5117       info.hiz_usage = cmd_buffer->state.attachments[ds].aux_usage;
5118       if (info.hiz_usage != ISL_AUX_USAGE_NONE) {
5119          assert(isl_aux_usage_has_hiz(info.hiz_usage));
5120          info.hiz_surf = &image->planes[depth_plane].aux_surface.isl;
5121 
5122          info.hiz_address =
5123             anv_batch_emit_reloc(&cmd_buffer->batch,
5124                                  dw + device->isl_dev.ds.hiz_offset / 4,
5125                                  image->planes[depth_plane].address.bo,
5126                                  image->planes[depth_plane].address.offset +
5127                                  image->planes[depth_plane].aux_surface.offset);
5128 
5129          info.depth_clear_value = ANV_HZ_FC_VAL;
5130       }
5131    }
5132 
5133    if (image && (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT)) {
5134       uint32_t stencil_plane =
5135          anv_image_aspect_to_plane(image->aspects, VK_IMAGE_ASPECT_STENCIL_BIT);
5136       const struct anv_surface *surface = &image->planes[stencil_plane].surface;
5137 
5138       info.stencil_surf = &surface->isl;
5139 
5140       info.stencil_aux_usage = image->planes[stencil_plane].aux_usage;
5141       info.stencil_address =
5142          anv_batch_emit_reloc(&cmd_buffer->batch,
5143                               dw + device->isl_dev.ds.stencil_offset / 4,
5144                               image->planes[stencil_plane].address.bo,
5145                               image->planes[stencil_plane].address.offset +
5146                               surface->offset);
5147       info.mocs =
5148          anv_mocs(device, image->planes[stencil_plane].address.bo,
5149                   ISL_SURF_USAGE_STENCIL_BIT);
5150    }
5151 
5152    isl_emit_depth_stencil_hiz_s(&device->isl_dev, dw, &info);
5153 
5154    if (GEN_GEN >= 12) {
5155       cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
5156       genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5157 
5158       /* GEN:BUG:1408224581
5159        *
5160        * Workaround: Gen12LP Astep only An additional pipe control with
5161        * post-sync = store dword operation would be required.( w/a is to
5162        * have an additional pipe control after the stencil state whenever
5163        * the surface state bits of this state is changing).
5164        */
5165       anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
5166          pc.PostSyncOperation = WriteImmediateData;
5167          pc.Address = cmd_buffer->device->workaround_address;
5168       }
5169    }
5170    cmd_buffer->state.hiz_enabled = isl_aux_usage_has_hiz(info.hiz_usage);
5171 }
5172 
5173 /**
5174  * This ANDs the view mask of the current subpass with the pending clear
5175  * views in the attachment to get the mask of views active in the subpass
5176  * that still need to be cleared.
5177  */
5178 static inline uint32_t
get_multiview_subpass_clear_mask(const struct anv_cmd_state * cmd_state,const struct anv_attachment_state * att_state)5179 get_multiview_subpass_clear_mask(const struct anv_cmd_state *cmd_state,
5180                                  const struct anv_attachment_state *att_state)
5181 {
5182    return cmd_state->subpass->view_mask & att_state->pending_clear_views;
5183 }
5184 
5185 static inline bool
do_first_layer_clear(const struct anv_cmd_state * cmd_state,const struct anv_attachment_state * att_state)5186 do_first_layer_clear(const struct anv_cmd_state *cmd_state,
5187                      const struct anv_attachment_state *att_state)
5188 {
5189    if (!cmd_state->subpass->view_mask)
5190       return true;
5191 
5192    uint32_t pending_clear_mask =
5193       get_multiview_subpass_clear_mask(cmd_state, att_state);
5194 
5195    return pending_clear_mask & 1;
5196 }
5197 
5198 static inline bool
current_subpass_is_last_for_attachment(const struct anv_cmd_state * cmd_state,uint32_t att_idx)5199 current_subpass_is_last_for_attachment(const struct anv_cmd_state *cmd_state,
5200                                        uint32_t att_idx)
5201 {
5202    const uint32_t last_subpass_idx =
5203       cmd_state->pass->attachments[att_idx].last_subpass_idx;
5204    const struct anv_subpass *last_subpass =
5205       &cmd_state->pass->subpasses[last_subpass_idx];
5206    return last_subpass == cmd_state->subpass;
5207 }
5208 
5209 static void
cmd_buffer_begin_subpass(struct anv_cmd_buffer * cmd_buffer,uint32_t subpass_id)5210 cmd_buffer_begin_subpass(struct anv_cmd_buffer *cmd_buffer,
5211                          uint32_t subpass_id)
5212 {
5213    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
5214    struct anv_render_pass *pass = cmd_state->pass;
5215    struct anv_subpass *subpass = &pass->subpasses[subpass_id];
5216    cmd_state->subpass = subpass;
5217 
5218    cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_RENDER_TARGETS;
5219 
5220    /* Our implementation of VK_KHR_multiview uses instancing to draw the
5221     * different views.  If the client asks for instancing, we need to use the
5222     * Instance Data Step Rate to ensure that we repeat the client's
5223     * per-instance data once for each view.  Since this bit is in
5224     * VERTEX_BUFFER_STATE on gen7, we need to dirty vertex buffers at the top
5225     * of each subpass.
5226     */
5227    if (GEN_GEN == 7)
5228       cmd_buffer->state.gfx.vb_dirty |= ~0;
5229 
5230    /* It is possible to start a render pass with an old pipeline.  Because the
5231     * render pass and subpass index are both baked into the pipeline, this is
5232     * highly unlikely.  In order to do so, it requires that you have a render
5233     * pass with a single subpass and that you use that render pass twice
5234     * back-to-back and use the same pipeline at the start of the second render
5235     * pass as at the end of the first.  In order to avoid unpredictable issues
5236     * with this edge case, we just dirty the pipeline at the start of every
5237     * subpass.
5238     */
5239    cmd_buffer->state.gfx.dirty |= ANV_CMD_DIRTY_PIPELINE;
5240 
5241    /* Accumulate any subpass flushes that need to happen before the subpass */
5242    cmd_buffer->state.pending_pipe_bits |=
5243       cmd_buffer->state.pass->subpass_flushes[subpass_id];
5244 
5245    VkRect2D render_area = cmd_buffer->state.render_area;
5246    struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
5247 
5248    bool is_multiview = subpass->view_mask != 0;
5249 
5250    for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
5251       const uint32_t a = subpass->attachments[i].attachment;
5252       if (a == VK_ATTACHMENT_UNUSED)
5253          continue;
5254 
5255       assert(a < cmd_state->pass->attachment_count);
5256       struct anv_attachment_state *att_state = &cmd_state->attachments[a];
5257 
5258       struct anv_image_view *iview = cmd_state->attachments[a].image_view;
5259       const struct anv_image *image = iview->image;
5260 
5261       VkImageLayout target_layout = subpass->attachments[i].layout;
5262       VkImageLayout target_stencil_layout =
5263          subpass->attachments[i].stencil_layout;
5264 
5265       uint32_t level = iview->planes[0].isl.base_level;
5266       uint32_t width = anv_minify(iview->image->extent.width, level);
5267       uint32_t height = anv_minify(iview->image->extent.height, level);
5268       bool full_surface_draw =
5269          render_area.offset.x == 0 && render_area.offset.y == 0 &&
5270          render_area.extent.width == width &&
5271          render_area.extent.height == height;
5272 
5273       uint32_t base_layer, layer_count;
5274       if (image->type == VK_IMAGE_TYPE_3D) {
5275          base_layer = 0;
5276          layer_count = anv_minify(iview->image->extent.depth, level);
5277       } else {
5278          base_layer = iview->planes[0].isl.base_array_layer;
5279          layer_count = fb->layers;
5280       }
5281 
5282       if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
5283          bool will_full_fast_clear =
5284             (att_state->pending_clear_aspects & VK_IMAGE_ASPECT_COLOR_BIT) &&
5285             att_state->fast_clear && full_surface_draw;
5286 
5287          assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
5288          transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
5289                                  level, 1, base_layer, layer_count,
5290                                  att_state->current_layout, target_layout,
5291                                  will_full_fast_clear);
5292          att_state->aux_usage =
5293             anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
5294                                     VK_IMAGE_ASPECT_COLOR_BIT,
5295                                     VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT,
5296                                     target_layout);
5297       }
5298 
5299       if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
5300          bool will_full_fast_clear =
5301             (att_state->pending_clear_aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
5302             att_state->fast_clear && full_surface_draw;
5303 
5304          transition_depth_buffer(cmd_buffer, image,
5305                                  base_layer, layer_count,
5306                                  att_state->current_layout, target_layout,
5307                                  will_full_fast_clear);
5308          att_state->aux_usage =
5309             anv_layout_to_aux_usage(&cmd_buffer->device->info, image,
5310                                     VK_IMAGE_ASPECT_DEPTH_BIT,
5311                                     VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT,
5312                                     target_layout);
5313       }
5314 
5315       if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
5316          bool will_full_fast_clear =
5317             (att_state->pending_clear_aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
5318             att_state->fast_clear && full_surface_draw;
5319 
5320          transition_stencil_buffer(cmd_buffer, image,
5321                                    level, 1, base_layer, layer_count,
5322                                    att_state->current_stencil_layout,
5323                                    target_stencil_layout,
5324                                    will_full_fast_clear);
5325       }
5326       att_state->current_layout = target_layout;
5327       att_state->current_stencil_layout = target_stencil_layout;
5328 
5329       if (att_state->pending_clear_aspects & VK_IMAGE_ASPECT_COLOR_BIT) {
5330          assert(att_state->pending_clear_aspects == VK_IMAGE_ASPECT_COLOR_BIT);
5331 
5332          /* Multi-planar images are not supported as attachments */
5333          assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
5334          assert(image->n_planes == 1);
5335 
5336          uint32_t base_clear_layer = iview->planes[0].isl.base_array_layer;
5337          uint32_t clear_layer_count = fb->layers;
5338 
5339          if (att_state->fast_clear &&
5340              do_first_layer_clear(cmd_state, att_state)) {
5341             /* We only support fast-clears on the first layer */
5342             assert(level == 0 && base_layer == 0);
5343 
5344             union isl_color_value clear_color = {};
5345             anv_clear_color_from_att_state(&clear_color, att_state, iview);
5346             if (iview->image->samples == 1) {
5347                anv_image_ccs_op(cmd_buffer, image,
5348                                 iview->planes[0].isl.format,
5349                                 iview->planes[0].isl.swizzle,
5350                                 VK_IMAGE_ASPECT_COLOR_BIT,
5351                                 0, 0, 1, ISL_AUX_OP_FAST_CLEAR,
5352                                 &clear_color,
5353                                 false);
5354             } else {
5355                anv_image_mcs_op(cmd_buffer, image,
5356                                 iview->planes[0].isl.format,
5357                                 iview->planes[0].isl.swizzle,
5358                                 VK_IMAGE_ASPECT_COLOR_BIT,
5359                                 0, 1, ISL_AUX_OP_FAST_CLEAR,
5360                                 &clear_color,
5361                                 false);
5362             }
5363             base_clear_layer++;
5364             clear_layer_count--;
5365             if (is_multiview)
5366                att_state->pending_clear_views &= ~1;
5367 
5368             if (isl_color_value_is_zero(clear_color,
5369                                         iview->planes[0].isl.format)) {
5370                /* This image has the auxiliary buffer enabled. We can mark the
5371                 * subresource as not needing a resolve because the clear color
5372                 * will match what's in every RENDER_SURFACE_STATE object when
5373                 * it's being used for sampling.
5374                 */
5375                set_image_fast_clear_state(cmd_buffer, iview->image,
5376                                           VK_IMAGE_ASPECT_COLOR_BIT,
5377                                           ANV_FAST_CLEAR_DEFAULT_VALUE);
5378             } else {
5379                set_image_fast_clear_state(cmd_buffer, iview->image,
5380                                           VK_IMAGE_ASPECT_COLOR_BIT,
5381                                           ANV_FAST_CLEAR_ANY);
5382             }
5383          }
5384 
5385          /* From the VkFramebufferCreateInfo spec:
5386           *
5387           * "If the render pass uses multiview, then layers must be one and each
5388           *  attachment requires a number of layers that is greater than the
5389           *  maximum bit index set in the view mask in the subpasses in which it
5390           *  is used."
5391           *
5392           * So if multiview is active we ignore the number of layers in the
5393           * framebuffer and instead we honor the view mask from the subpass.
5394           */
5395          if (is_multiview) {
5396             assert(image->n_planes == 1);
5397             uint32_t pending_clear_mask =
5398                get_multiview_subpass_clear_mask(cmd_state, att_state);
5399 
5400             uint32_t layer_idx;
5401             for_each_bit(layer_idx, pending_clear_mask) {
5402                uint32_t layer =
5403                   iview->planes[0].isl.base_array_layer + layer_idx;
5404 
5405                anv_image_clear_color(cmd_buffer, image,
5406                                      VK_IMAGE_ASPECT_COLOR_BIT,
5407                                      att_state->aux_usage,
5408                                      iview->planes[0].isl.format,
5409                                      iview->planes[0].isl.swizzle,
5410                                      level, layer, 1,
5411                                      render_area,
5412                                      vk_to_isl_color(att_state->clear_value.color));
5413             }
5414 
5415             att_state->pending_clear_views &= ~pending_clear_mask;
5416          } else if (clear_layer_count > 0) {
5417             assert(image->n_planes == 1);
5418             anv_image_clear_color(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
5419                                   att_state->aux_usage,
5420                                   iview->planes[0].isl.format,
5421                                   iview->planes[0].isl.swizzle,
5422                                   level, base_clear_layer, clear_layer_count,
5423                                   render_area,
5424                                   vk_to_isl_color(att_state->clear_value.color));
5425          }
5426       } else if (att_state->pending_clear_aspects & (VK_IMAGE_ASPECT_DEPTH_BIT |
5427                                                      VK_IMAGE_ASPECT_STENCIL_BIT)) {
5428          if (att_state->fast_clear &&
5429              (att_state->pending_clear_aspects & VK_IMAGE_ASPECT_DEPTH_BIT)) {
5430             /* We currently only support HiZ for single-LOD images */
5431             assert(isl_aux_usage_has_hiz(iview->image->planes[0].aux_usage));
5432             assert(iview->planes[0].isl.base_level == 0);
5433             assert(iview->planes[0].isl.levels == 1);
5434          }
5435 
5436          if (is_multiview) {
5437             uint32_t pending_clear_mask =
5438               get_multiview_subpass_clear_mask(cmd_state, att_state);
5439 
5440             uint32_t layer_idx;
5441             for_each_bit(layer_idx, pending_clear_mask) {
5442                uint32_t layer =
5443                   iview->planes[0].isl.base_array_layer + layer_idx;
5444 
5445                if (att_state->fast_clear) {
5446                   anv_image_hiz_clear(cmd_buffer, image,
5447                                       att_state->pending_clear_aspects,
5448                                       level, layer, 1, render_area,
5449                                       att_state->clear_value.depthStencil.stencil);
5450                } else {
5451                   anv_image_clear_depth_stencil(cmd_buffer, image,
5452                                                 att_state->pending_clear_aspects,
5453                                                 att_state->aux_usage,
5454                                                 level, layer, 1, render_area,
5455                                                 att_state->clear_value.depthStencil.depth,
5456                                                 att_state->clear_value.depthStencil.stencil);
5457                }
5458             }
5459 
5460             att_state->pending_clear_views &= ~pending_clear_mask;
5461          } else {
5462             if (att_state->fast_clear) {
5463                anv_image_hiz_clear(cmd_buffer, image,
5464                                    att_state->pending_clear_aspects,
5465                                    level, base_layer, layer_count,
5466                                    render_area,
5467                                    att_state->clear_value.depthStencil.stencil);
5468             } else {
5469                anv_image_clear_depth_stencil(cmd_buffer, image,
5470                                              att_state->pending_clear_aspects,
5471                                              att_state->aux_usage,
5472                                              level, base_layer, layer_count,
5473                                              render_area,
5474                                              att_state->clear_value.depthStencil.depth,
5475                                              att_state->clear_value.depthStencil.stencil);
5476             }
5477          }
5478       } else  {
5479          assert(att_state->pending_clear_aspects == 0);
5480       }
5481 
5482       /* If multiview is enabled, then we are only done clearing when we no
5483        * longer have pending layers to clear, or when we have processed the
5484        * last subpass that uses this attachment.
5485        */
5486       if (!is_multiview ||
5487           att_state->pending_clear_views == 0 ||
5488           current_subpass_is_last_for_attachment(cmd_state, a)) {
5489          att_state->pending_clear_aspects = 0;
5490       }
5491 
5492       att_state->pending_load_aspects = 0;
5493    }
5494 
5495    /* We've transitioned all our images possibly fast clearing them.  Now we
5496     * can fill out the surface states that we will use as render targets
5497     * during actual subpass rendering.
5498     */
5499    VkResult result = genX(cmd_buffer_alloc_att_surf_states)(cmd_buffer,
5500                                                             pass, subpass);
5501    if (result != VK_SUCCESS)
5502       return;
5503 
5504    isl_null_fill_state(&cmd_buffer->device->isl_dev,
5505                        cmd_state->null_surface_state.map,
5506                        isl_extent3d(fb->width, fb->height, fb->layers));
5507 
5508    for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
5509       const uint32_t att = subpass->attachments[i].attachment;
5510       if (att == VK_ATTACHMENT_UNUSED)
5511          continue;
5512 
5513       assert(att < cmd_state->pass->attachment_count);
5514       struct anv_render_pass_attachment *pass_att = &pass->attachments[att];
5515       struct anv_attachment_state *att_state = &cmd_state->attachments[att];
5516       struct anv_image_view *iview = att_state->image_view;
5517 
5518       if (!vk_format_is_color(pass_att->format))
5519          continue;
5520 
5521       const VkImageUsageFlagBits att_usage = subpass->attachments[i].usage;
5522       assert(util_bitcount(att_usage) == 1);
5523 
5524       struct anv_surface_state *surface_state;
5525       isl_surf_usage_flags_t isl_surf_usage;
5526       enum isl_aux_usage isl_aux_usage;
5527       if (att_usage == VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
5528          surface_state = &att_state->color;
5529          isl_surf_usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
5530          isl_aux_usage = att_state->aux_usage;
5531       } else if (att_usage == VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) {
5532          surface_state = &att_state->input;
5533          isl_surf_usage = ISL_SURF_USAGE_TEXTURE_BIT;
5534          isl_aux_usage =
5535             anv_layout_to_aux_usage(&cmd_buffer->device->info, iview->image,
5536                                     VK_IMAGE_ASPECT_COLOR_BIT,
5537                                     VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT,
5538                                     att_state->current_layout);
5539       } else {
5540          continue;
5541       }
5542 
5543       /* We had better have a surface state when we get here */
5544       assert(surface_state->state.map);
5545 
5546       union isl_color_value clear_color = { .u32 = { 0, } };
5547       if (pass_att->load_op == VK_ATTACHMENT_LOAD_OP_CLEAR &&
5548           att_state->fast_clear)
5549          anv_clear_color_from_att_state(&clear_color, att_state, iview);
5550 
5551       anv_image_fill_surface_state(cmd_buffer->device,
5552                                    iview->image,
5553                                    VK_IMAGE_ASPECT_COLOR_BIT,
5554                                    &iview->planes[0].isl,
5555                                    isl_surf_usage,
5556                                    isl_aux_usage,
5557                                    &clear_color,
5558                                    0,
5559                                    surface_state,
5560                                    NULL);
5561 
5562       add_surface_state_relocs(cmd_buffer, *surface_state);
5563 
5564       if (GEN_GEN < 10 &&
5565           pass_att->load_op == VK_ATTACHMENT_LOAD_OP_LOAD &&
5566           iview->image->planes[0].aux_usage != ISL_AUX_USAGE_NONE &&
5567           iview->planes[0].isl.base_level == 0 &&
5568           iview->planes[0].isl.base_array_layer == 0) {
5569          genX(copy_fast_clear_dwords)(cmd_buffer, surface_state->state,
5570                                       iview->image,
5571                                       VK_IMAGE_ASPECT_COLOR_BIT,
5572                                       false /* copy to ss */);
5573       }
5574    }
5575 
5576 #if GEN_GEN >= 11
5577    /* The PIPE_CONTROL command description says:
5578     *
5579     *    "Whenever a Binding Table Index (BTI) used by a Render Taget Message
5580     *     points to a different RENDER_SURFACE_STATE, SW must issue a Render
5581     *     Target Cache Flush by enabling this bit. When render target flush
5582     *     is set due to new association of BTI, PS Scoreboard Stall bit must
5583     *     be set in this packet."
5584     */
5585    cmd_buffer->state.pending_pipe_bits |=
5586       ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT |
5587       ANV_PIPE_STALL_AT_SCOREBOARD_BIT;
5588 #endif
5589 
5590 #if GEN_GEN == 12
5591    /* GEN:BUG:14010455700
5592     *
5593     * ISL will change some CHICKEN registers depending on the depth surface
5594     * format, along with emitting the depth and stencil packets. In that case,
5595     * we want to do a depth flush and stall, so the pipeline is not using these
5596     * settings while we change the registers.
5597     */
5598    cmd_buffer->state.pending_pipe_bits |=
5599       ANV_PIPE_DEPTH_CACHE_FLUSH_BIT |
5600       ANV_PIPE_DEPTH_STALL_BIT |
5601       ANV_PIPE_END_OF_PIPE_SYNC_BIT;
5602    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
5603 #endif
5604 
5605    cmd_buffer_emit_depth_stencil(cmd_buffer);
5606 }
5607 
5608 static enum blorp_filter
vk_to_blorp_resolve_mode(VkResolveModeFlagBitsKHR vk_mode)5609 vk_to_blorp_resolve_mode(VkResolveModeFlagBitsKHR vk_mode)
5610 {
5611    switch (vk_mode) {
5612    case VK_RESOLVE_MODE_SAMPLE_ZERO_BIT_KHR:
5613       return BLORP_FILTER_SAMPLE_0;
5614    case VK_RESOLVE_MODE_AVERAGE_BIT_KHR:
5615       return BLORP_FILTER_AVERAGE;
5616    case VK_RESOLVE_MODE_MIN_BIT_KHR:
5617       return BLORP_FILTER_MIN_SAMPLE;
5618    case VK_RESOLVE_MODE_MAX_BIT_KHR:
5619       return BLORP_FILTER_MAX_SAMPLE;
5620    default:
5621       return BLORP_FILTER_NONE;
5622    }
5623 }
5624 
5625 static void
cmd_buffer_end_subpass(struct anv_cmd_buffer * cmd_buffer)5626 cmd_buffer_end_subpass(struct anv_cmd_buffer *cmd_buffer)
5627 {
5628    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
5629    struct anv_subpass *subpass = cmd_state->subpass;
5630    uint32_t subpass_id = anv_get_subpass_id(&cmd_buffer->state);
5631    struct anv_framebuffer *fb = cmd_buffer->state.framebuffer;
5632 
5633    /* We are done with the previous subpass and all rendering directly to that
5634     * subpass is now complete.  Zero out all the surface states so we don't
5635     * accidentally use them between now and the next subpass.
5636     */
5637    for (uint32_t i = 0; i < cmd_state->pass->attachment_count; ++i) {
5638       memset(&cmd_state->attachments[i].color, 0,
5639              sizeof(cmd_state->attachments[i].color));
5640       memset(&cmd_state->attachments[i].input, 0,
5641              sizeof(cmd_state->attachments[i].input));
5642    }
5643    cmd_state->null_surface_state = ANV_STATE_NULL;
5644    cmd_state->attachment_states = ANV_STATE_NULL;
5645 
5646    for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
5647       const uint32_t a = subpass->attachments[i].attachment;
5648       if (a == VK_ATTACHMENT_UNUSED)
5649          continue;
5650 
5651       assert(a < cmd_state->pass->attachment_count);
5652       struct anv_attachment_state *att_state = &cmd_state->attachments[a];
5653       struct anv_image_view *iview = att_state->image_view;
5654 
5655       assert(util_bitcount(subpass->attachments[i].usage) == 1);
5656       if (subpass->attachments[i].usage ==
5657           VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) {
5658          /* We assume that if we're ending a subpass, we did do some rendering
5659           * so we may end up with compressed data.
5660           */
5661          genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
5662                                              VK_IMAGE_ASPECT_COLOR_BIT,
5663                                              att_state->aux_usage,
5664                                              iview->planes[0].isl.base_level,
5665                                              iview->planes[0].isl.base_array_layer,
5666                                              fb->layers);
5667       } else if (subpass->attachments[i].usage ==
5668                  VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) {
5669          /* We may be writing depth or stencil so we need to mark the surface.
5670           * Unfortunately, there's no way to know at this point whether the
5671           * depth or stencil tests used will actually write to the surface.
5672           *
5673           * Even though stencil may be plane 1, it always shares a base_level
5674           * with depth.
5675           */
5676          const struct isl_view *ds_view = &iview->planes[0].isl;
5677          if (iview->aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) {
5678             genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
5679                                                 VK_IMAGE_ASPECT_DEPTH_BIT,
5680                                                 att_state->aux_usage,
5681                                                 ds_view->base_level,
5682                                                 ds_view->base_array_layer,
5683                                                 fb->layers);
5684          }
5685          if (iview->aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) {
5686             /* Even though stencil may be plane 1, it always shares a
5687              * base_level with depth.
5688              */
5689             genX(cmd_buffer_mark_image_written)(cmd_buffer, iview->image,
5690                                                 VK_IMAGE_ASPECT_STENCIL_BIT,
5691                                                 ISL_AUX_USAGE_NONE,
5692                                                 ds_view->base_level,
5693                                                 ds_view->base_array_layer,
5694                                                 fb->layers);
5695          }
5696       }
5697    }
5698 
5699    if (subpass->has_color_resolve) {
5700       /* We are about to do some MSAA resolves.  We need to flush so that the
5701        * result of writes to the MSAA color attachments show up in the sampler
5702        * when we blit to the single-sampled resolve target.
5703        */
5704       cmd_buffer->state.pending_pipe_bits |=
5705          ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
5706          ANV_PIPE_RENDER_TARGET_CACHE_FLUSH_BIT;
5707 
5708       for (uint32_t i = 0; i < subpass->color_count; ++i) {
5709          uint32_t src_att = subpass->color_attachments[i].attachment;
5710          uint32_t dst_att = subpass->resolve_attachments[i].attachment;
5711 
5712          if (dst_att == VK_ATTACHMENT_UNUSED)
5713             continue;
5714 
5715          assert(src_att < cmd_buffer->state.pass->attachment_count);
5716          assert(dst_att < cmd_buffer->state.pass->attachment_count);
5717 
5718          if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
5719             /* From the Vulkan 1.0 spec:
5720              *
5721              *    If the first use of an attachment in a render pass is as a
5722              *    resolve attachment, then the loadOp is effectively ignored
5723              *    as the resolve is guaranteed to overwrite all pixels in the
5724              *    render area.
5725              */
5726             cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
5727          }
5728 
5729          struct anv_image_view *src_iview = cmd_state->attachments[src_att].image_view;
5730          struct anv_image_view *dst_iview = cmd_state->attachments[dst_att].image_view;
5731 
5732          const VkRect2D render_area = cmd_buffer->state.render_area;
5733 
5734          enum isl_aux_usage src_aux_usage =
5735             cmd_buffer->state.attachments[src_att].aux_usage;
5736          enum isl_aux_usage dst_aux_usage =
5737             cmd_buffer->state.attachments[dst_att].aux_usage;
5738 
5739          assert(src_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT &&
5740                 dst_iview->aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT);
5741 
5742          anv_image_msaa_resolve(cmd_buffer,
5743                                 src_iview->image, src_aux_usage,
5744                                 src_iview->planes[0].isl.base_level,
5745                                 src_iview->planes[0].isl.base_array_layer,
5746                                 dst_iview->image, dst_aux_usage,
5747                                 dst_iview->planes[0].isl.base_level,
5748                                 dst_iview->planes[0].isl.base_array_layer,
5749                                 VK_IMAGE_ASPECT_COLOR_BIT,
5750                                 render_area.offset.x, render_area.offset.y,
5751                                 render_area.offset.x, render_area.offset.y,
5752                                 render_area.extent.width,
5753                                 render_area.extent.height,
5754                                 fb->layers, BLORP_FILTER_NONE);
5755       }
5756    }
5757 
5758    if (subpass->ds_resolve_attachment) {
5759       /* We are about to do some MSAA resolves.  We need to flush so that the
5760        * result of writes to the MSAA depth attachments show up in the sampler
5761        * when we blit to the single-sampled resolve target.
5762        */
5763       cmd_buffer->state.pending_pipe_bits |=
5764          ANV_PIPE_TEXTURE_CACHE_INVALIDATE_BIT |
5765          ANV_PIPE_DEPTH_CACHE_FLUSH_BIT;
5766 
5767       uint32_t src_att = subpass->depth_stencil_attachment->attachment;
5768       uint32_t dst_att = subpass->ds_resolve_attachment->attachment;
5769 
5770       assert(src_att < cmd_buffer->state.pass->attachment_count);
5771       assert(dst_att < cmd_buffer->state.pass->attachment_count);
5772 
5773       if (cmd_buffer->state.attachments[dst_att].pending_clear_aspects) {
5774          /* From the Vulkan 1.0 spec:
5775           *
5776           *    If the first use of an attachment in a render pass is as a
5777           *    resolve attachment, then the loadOp is effectively ignored
5778           *    as the resolve is guaranteed to overwrite all pixels in the
5779           *    render area.
5780           */
5781          cmd_buffer->state.attachments[dst_att].pending_clear_aspects = 0;
5782       }
5783 
5784       struct anv_image_view *src_iview = cmd_state->attachments[src_att].image_view;
5785       struct anv_image_view *dst_iview = cmd_state->attachments[dst_att].image_view;
5786 
5787       const VkRect2D render_area = cmd_buffer->state.render_area;
5788 
5789       struct anv_attachment_state *src_state =
5790          &cmd_state->attachments[src_att];
5791       struct anv_attachment_state *dst_state =
5792          &cmd_state->attachments[dst_att];
5793 
5794       if ((src_iview->image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) &&
5795           subpass->depth_resolve_mode != VK_RESOLVE_MODE_NONE_KHR) {
5796 
5797          /* MSAA resolves sample from the source attachment.  Transition the
5798           * depth attachment first to get rid of any HiZ that we may not be
5799           * able to handle.
5800           */
5801          transition_depth_buffer(cmd_buffer, src_iview->image,
5802                                  src_iview->planes[0].isl.base_array_layer,
5803                                  fb->layers,
5804                                  src_state->current_layout,
5805                                  VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
5806                                  false /* will_full_fast_clear */);
5807          src_state->aux_usage =
5808             anv_layout_to_aux_usage(&cmd_buffer->device->info, src_iview->image,
5809                                     VK_IMAGE_ASPECT_DEPTH_BIT,
5810                                     VK_IMAGE_USAGE_TRANSFER_SRC_BIT,
5811                                     VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL);
5812          src_state->current_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
5813 
5814          /* MSAA resolves write to the resolve attachment as if it were any
5815           * other transfer op.  Transition the resolve attachment accordingly.
5816           */
5817          VkImageLayout dst_initial_layout = dst_state->current_layout;
5818 
5819          /* If our render area is the entire size of the image, we're going to
5820           * blow it all away so we can claim the initial layout is UNDEFINED
5821           * and we'll get a HiZ ambiguate instead of a resolve.
5822           */
5823          if (dst_iview->image->type != VK_IMAGE_TYPE_3D &&
5824              render_area.offset.x == 0 && render_area.offset.y == 0 &&
5825              render_area.extent.width == dst_iview->extent.width &&
5826              render_area.extent.height == dst_iview->extent.height)
5827             dst_initial_layout = VK_IMAGE_LAYOUT_UNDEFINED;
5828 
5829          transition_depth_buffer(cmd_buffer, dst_iview->image,
5830                                  dst_iview->planes[0].isl.base_array_layer,
5831                                  fb->layers,
5832                                  dst_initial_layout,
5833                                  VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
5834                                  false /* will_full_fast_clear */);
5835          dst_state->aux_usage =
5836             anv_layout_to_aux_usage(&cmd_buffer->device->info, dst_iview->image,
5837                                     VK_IMAGE_ASPECT_DEPTH_BIT,
5838                                     VK_IMAGE_USAGE_TRANSFER_DST_BIT,
5839                                     VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL);
5840          dst_state->current_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
5841 
5842          enum blorp_filter filter =
5843             vk_to_blorp_resolve_mode(subpass->depth_resolve_mode);
5844 
5845          anv_image_msaa_resolve(cmd_buffer,
5846                                 src_iview->image, src_state->aux_usage,
5847                                 src_iview->planes[0].isl.base_level,
5848                                 src_iview->planes[0].isl.base_array_layer,
5849                                 dst_iview->image, dst_state->aux_usage,
5850                                 dst_iview->planes[0].isl.base_level,
5851                                 dst_iview->planes[0].isl.base_array_layer,
5852                                 VK_IMAGE_ASPECT_DEPTH_BIT,
5853                                 render_area.offset.x, render_area.offset.y,
5854                                 render_area.offset.x, render_area.offset.y,
5855                                 render_area.extent.width,
5856                                 render_area.extent.height,
5857                                 fb->layers, filter);
5858       }
5859 
5860       if ((src_iview->image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) &&
5861           subpass->stencil_resolve_mode != VK_RESOLVE_MODE_NONE_KHR) {
5862 
5863          src_state->current_stencil_layout = VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL;
5864          dst_state->current_stencil_layout = VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL;
5865 
5866          enum isl_aux_usage src_aux_usage = ISL_AUX_USAGE_NONE;
5867          uint32_t plane = anv_image_aspect_to_plane(dst_iview->image->aspects,
5868                                                     VK_IMAGE_ASPECT_STENCIL_BIT);
5869          enum isl_aux_usage dst_aux_usage =
5870             dst_iview->image->planes[plane].aux_usage;
5871 
5872          enum blorp_filter filter =
5873             vk_to_blorp_resolve_mode(subpass->stencil_resolve_mode);
5874 
5875          anv_image_msaa_resolve(cmd_buffer,
5876                                 src_iview->image, src_aux_usage,
5877                                 src_iview->planes[0].isl.base_level,
5878                                 src_iview->planes[0].isl.base_array_layer,
5879                                 dst_iview->image, dst_aux_usage,
5880                                 dst_iview->planes[0].isl.base_level,
5881                                 dst_iview->planes[0].isl.base_array_layer,
5882                                 VK_IMAGE_ASPECT_STENCIL_BIT,
5883                                 render_area.offset.x, render_area.offset.y,
5884                                 render_area.offset.x, render_area.offset.y,
5885                                 render_area.extent.width,
5886                                 render_area.extent.height,
5887                                 fb->layers, filter);
5888       }
5889    }
5890 
5891 #if GEN_GEN == 7
5892    /* On gen7, we have to store a texturable version of the stencil buffer in
5893     * a shadow whenever VK_IMAGE_USAGE_SAMPLED_BIT is set and copy back and
5894     * forth at strategic points. Stencil writes are only allowed in following
5895     * layouts:
5896     *
5897     *  - VK_IMAGE_LAYOUT_GENERAL
5898     *  - VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL
5899     *  - VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL
5900     *  - VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL
5901     *  - VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL_KHR
5902     *
5903     * For general, we have no nice opportunity to transition so we do the copy
5904     * to the shadow unconditionally at the end of the subpass. For transfer
5905     * destinations, we can update it as part of the transfer op. For the other
5906     * layouts, we delay the copy until a transition into some other layout.
5907     */
5908    if (subpass->depth_stencil_attachment) {
5909       uint32_t a = subpass->depth_stencil_attachment->attachment;
5910       assert(a != VK_ATTACHMENT_UNUSED);
5911 
5912       struct anv_attachment_state *att_state = &cmd_state->attachments[a];
5913       struct anv_image_view *iview = cmd_state->attachments[a].image_view;;
5914       const struct anv_image *image = iview->image;
5915 
5916       if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
5917          uint32_t plane = anv_image_aspect_to_plane(image->aspects,
5918                                                     VK_IMAGE_ASPECT_STENCIL_BIT);
5919 
5920          if (image->planes[plane].shadow_surface.isl.size_B > 0 &&
5921              att_state->current_stencil_layout == VK_IMAGE_LAYOUT_GENERAL) {
5922             assert(image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT);
5923             anv_image_copy_to_shadow(cmd_buffer, image,
5924                                      VK_IMAGE_ASPECT_STENCIL_BIT,
5925                                      iview->planes[plane].isl.base_level, 1,
5926                                      iview->planes[plane].isl.base_array_layer,
5927                                      fb->layers);
5928          }
5929       }
5930    }
5931 #endif /* GEN_GEN == 7 */
5932 
5933    for (uint32_t i = 0; i < subpass->attachment_count; ++i) {
5934       const uint32_t a = subpass->attachments[i].attachment;
5935       if (a == VK_ATTACHMENT_UNUSED)
5936          continue;
5937 
5938       if (cmd_state->pass->attachments[a].last_subpass_idx != subpass_id)
5939          continue;
5940 
5941       assert(a < cmd_state->pass->attachment_count);
5942       struct anv_attachment_state *att_state = &cmd_state->attachments[a];
5943       struct anv_image_view *iview = cmd_state->attachments[a].image_view;
5944       const struct anv_image *image = iview->image;
5945 
5946       /* Transition the image into the final layout for this render pass */
5947       VkImageLayout target_layout =
5948          cmd_state->pass->attachments[a].final_layout;
5949       VkImageLayout target_stencil_layout =
5950          cmd_state->pass->attachments[a].stencil_final_layout;
5951 
5952       uint32_t base_layer, layer_count;
5953       if (image->type == VK_IMAGE_TYPE_3D) {
5954          base_layer = 0;
5955          layer_count = anv_minify(iview->image->extent.depth,
5956                                   iview->planes[0].isl.base_level);
5957       } else {
5958          base_layer = iview->planes[0].isl.base_array_layer;
5959          layer_count = fb->layers;
5960       }
5961 
5962       if (image->aspects & VK_IMAGE_ASPECT_ANY_COLOR_BIT_ANV) {
5963          assert(image->aspects == VK_IMAGE_ASPECT_COLOR_BIT);
5964          transition_color_buffer(cmd_buffer, image, VK_IMAGE_ASPECT_COLOR_BIT,
5965                                  iview->planes[0].isl.base_level, 1,
5966                                  base_layer, layer_count,
5967                                  att_state->current_layout, target_layout,
5968                                  false /* will_full_fast_clear */);
5969       }
5970 
5971       if (image->aspects & VK_IMAGE_ASPECT_DEPTH_BIT) {
5972          transition_depth_buffer(cmd_buffer, image,
5973                                  base_layer, layer_count,
5974                                  att_state->current_layout, target_layout,
5975                                  false /* will_full_fast_clear */);
5976       }
5977 
5978       if (image->aspects & VK_IMAGE_ASPECT_STENCIL_BIT) {
5979          transition_stencil_buffer(cmd_buffer, image,
5980                                    iview->planes[0].isl.base_level, 1,
5981                                    base_layer, layer_count,
5982                                    att_state->current_stencil_layout,
5983                                    target_stencil_layout,
5984                                    false /* will_full_fast_clear */);
5985       }
5986    }
5987 
5988    /* Accumulate any subpass flushes that need to happen after the subpass.
5989     * Yes, they do get accumulated twice in the NextSubpass case but since
5990     * genX_CmdNextSubpass just calls end/begin back-to-back, we just end up
5991     * ORing the bits in twice so it's harmless.
5992     */
5993    cmd_buffer->state.pending_pipe_bits |=
5994       cmd_buffer->state.pass->subpass_flushes[subpass_id + 1];
5995 }
5996 
genX(CmdBeginRenderPass)5997 void genX(CmdBeginRenderPass)(
5998     VkCommandBuffer                             commandBuffer,
5999     const VkRenderPassBeginInfo*                pRenderPassBegin,
6000     VkSubpassContents                           contents)
6001 {
6002    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6003    ANV_FROM_HANDLE(anv_render_pass, pass, pRenderPassBegin->renderPass);
6004    ANV_FROM_HANDLE(anv_framebuffer, framebuffer, pRenderPassBegin->framebuffer);
6005    VkResult result;
6006 
6007    cmd_buffer->state.framebuffer = framebuffer;
6008    cmd_buffer->state.pass = pass;
6009    cmd_buffer->state.render_area = pRenderPassBegin->renderArea;
6010 
6011    result = genX(cmd_buffer_setup_attachments)(cmd_buffer, pass,
6012                                                framebuffer,
6013                                                pRenderPassBegin);
6014    if (result != VK_SUCCESS) {
6015       assert(anv_batch_has_error(&cmd_buffer->batch));
6016       return;
6017    }
6018 
6019    genX(flush_pipeline_select_3d)(cmd_buffer);
6020 
6021    cmd_buffer_begin_subpass(cmd_buffer, 0);
6022 }
6023 
genX(CmdBeginRenderPass2)6024 void genX(CmdBeginRenderPass2)(
6025     VkCommandBuffer                             commandBuffer,
6026     const VkRenderPassBeginInfo*                pRenderPassBeginInfo,
6027     const VkSubpassBeginInfoKHR*                pSubpassBeginInfo)
6028 {
6029    genX(CmdBeginRenderPass)(commandBuffer, pRenderPassBeginInfo,
6030                             pSubpassBeginInfo->contents);
6031 }
6032 
genX(CmdNextSubpass)6033 void genX(CmdNextSubpass)(
6034     VkCommandBuffer                             commandBuffer,
6035     VkSubpassContents                           contents)
6036 {
6037    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6038 
6039    if (anv_batch_has_error(&cmd_buffer->batch))
6040       return;
6041 
6042    assert(cmd_buffer->level == VK_COMMAND_BUFFER_LEVEL_PRIMARY);
6043 
6044    uint32_t prev_subpass = anv_get_subpass_id(&cmd_buffer->state);
6045    cmd_buffer_end_subpass(cmd_buffer);
6046    cmd_buffer_begin_subpass(cmd_buffer, prev_subpass + 1);
6047 }
6048 
genX(CmdNextSubpass2)6049 void genX(CmdNextSubpass2)(
6050     VkCommandBuffer                             commandBuffer,
6051     const VkSubpassBeginInfoKHR*                pSubpassBeginInfo,
6052     const VkSubpassEndInfoKHR*                  pSubpassEndInfo)
6053 {
6054    genX(CmdNextSubpass)(commandBuffer, pSubpassBeginInfo->contents);
6055 }
6056 
genX(CmdEndRenderPass)6057 void genX(CmdEndRenderPass)(
6058     VkCommandBuffer                             commandBuffer)
6059 {
6060    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6061 
6062    if (anv_batch_has_error(&cmd_buffer->batch))
6063       return;
6064 
6065    cmd_buffer_end_subpass(cmd_buffer);
6066 
6067    cmd_buffer->state.hiz_enabled = false;
6068 
6069 #ifndef NDEBUG
6070    anv_dump_add_attachments(cmd_buffer);
6071 #endif
6072 
6073    /* Remove references to render pass specific state. This enables us to
6074     * detect whether or not we're in a renderpass.
6075     */
6076    cmd_buffer->state.framebuffer = NULL;
6077    cmd_buffer->state.pass = NULL;
6078    cmd_buffer->state.subpass = NULL;
6079 }
6080 
genX(CmdEndRenderPass2)6081 void genX(CmdEndRenderPass2)(
6082     VkCommandBuffer                             commandBuffer,
6083     const VkSubpassEndInfoKHR*                  pSubpassEndInfo)
6084 {
6085    genX(CmdEndRenderPass)(commandBuffer);
6086 }
6087 
6088 void
genX(cmd_emit_conditional_render_predicate)6089 genX(cmd_emit_conditional_render_predicate)(struct anv_cmd_buffer *cmd_buffer)
6090 {
6091 #if GEN_GEN >= 8 || GEN_IS_HASWELL
6092    struct gen_mi_builder b;
6093    gen_mi_builder_init(&b, &cmd_buffer->batch);
6094 
6095    gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC0),
6096                     gen_mi_reg32(ANV_PREDICATE_RESULT_REG));
6097    gen_mi_store(&b, gen_mi_reg64(MI_PREDICATE_SRC1), gen_mi_imm(0));
6098 
6099    anv_batch_emit(&cmd_buffer->batch, GENX(MI_PREDICATE), mip) {
6100       mip.LoadOperation    = LOAD_LOADINV;
6101       mip.CombineOperation = COMBINE_SET;
6102       mip.CompareOperation = COMPARE_SRCS_EQUAL;
6103    }
6104 #endif
6105 }
6106 
6107 #if GEN_GEN >= 8 || GEN_IS_HASWELL
genX(CmdBeginConditionalRenderingEXT)6108 void genX(CmdBeginConditionalRenderingEXT)(
6109    VkCommandBuffer                             commandBuffer,
6110    const VkConditionalRenderingBeginInfoEXT*   pConditionalRenderingBegin)
6111 {
6112    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6113    ANV_FROM_HANDLE(anv_buffer, buffer, pConditionalRenderingBegin->buffer);
6114    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
6115    struct anv_address value_address =
6116       anv_address_add(buffer->address, pConditionalRenderingBegin->offset);
6117 
6118    const bool isInverted = pConditionalRenderingBegin->flags &
6119                            VK_CONDITIONAL_RENDERING_INVERTED_BIT_EXT;
6120 
6121    cmd_state->conditional_render_enabled = true;
6122 
6123    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6124 
6125    struct gen_mi_builder b;
6126    gen_mi_builder_init(&b, &cmd_buffer->batch);
6127 
6128    /* Section 19.4 of the Vulkan 1.1.85 spec says:
6129     *
6130     *    If the value of the predicate in buffer memory changes
6131     *    while conditional rendering is active, the rendering commands
6132     *    may be discarded in an implementation-dependent way.
6133     *    Some implementations may latch the value of the predicate
6134     *    upon beginning conditional rendering while others
6135     *    may read it before every rendering command.
6136     *
6137     * So it's perfectly fine to read a value from the buffer once.
6138     */
6139    struct gen_mi_value value =  gen_mi_mem32(value_address);
6140 
6141    /* Precompute predicate result, it is necessary to support secondary
6142     * command buffers since it is unknown if conditional rendering is
6143     * inverted when populating them.
6144     */
6145    gen_mi_store(&b, gen_mi_reg64(ANV_PREDICATE_RESULT_REG),
6146                     isInverted ? gen_mi_uge(&b, gen_mi_imm(0), value) :
6147                                  gen_mi_ult(&b, gen_mi_imm(0), value));
6148 }
6149 
genX(CmdEndConditionalRenderingEXT)6150 void genX(CmdEndConditionalRenderingEXT)(
6151 	VkCommandBuffer                             commandBuffer)
6152 {
6153    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6154    struct anv_cmd_state *cmd_state = &cmd_buffer->state;
6155 
6156    cmd_state->conditional_render_enabled = false;
6157 }
6158 #endif
6159 
6160 /* Set of stage bits for which are pipelined, i.e. they get queued by the
6161  * command streamer for later execution.
6162  */
6163 #define ANV_PIPELINE_STAGE_PIPELINED_BITS \
6164    (VK_PIPELINE_STAGE_VERTEX_INPUT_BIT | \
6165     VK_PIPELINE_STAGE_VERTEX_SHADER_BIT | \
6166     VK_PIPELINE_STAGE_TESSELLATION_CONTROL_SHADER_BIT | \
6167     VK_PIPELINE_STAGE_TESSELLATION_EVALUATION_SHADER_BIT | \
6168     VK_PIPELINE_STAGE_GEOMETRY_SHADER_BIT | \
6169     VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT | \
6170     VK_PIPELINE_STAGE_EARLY_FRAGMENT_TESTS_BIT | \
6171     VK_PIPELINE_STAGE_LATE_FRAGMENT_TESTS_BIT | \
6172     VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | \
6173     VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT | \
6174     VK_PIPELINE_STAGE_TRANSFER_BIT | \
6175     VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT | \
6176     VK_PIPELINE_STAGE_ALL_GRAPHICS_BIT | \
6177     VK_PIPELINE_STAGE_ALL_COMMANDS_BIT)
6178 
genX(CmdSetEvent)6179 void genX(CmdSetEvent)(
6180     VkCommandBuffer                             commandBuffer,
6181     VkEvent                                     _event,
6182     VkPipelineStageFlags                        stageMask)
6183 {
6184    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6185    ANV_FROM_HANDLE(anv_event, event, _event);
6186 
6187    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
6188    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6189 
6190    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
6191       if (stageMask & ANV_PIPELINE_STAGE_PIPELINED_BITS) {
6192          pc.StallAtPixelScoreboard = true;
6193          pc.CommandStreamerStallEnable = true;
6194       }
6195 
6196       pc.DestinationAddressType  = DAT_PPGTT,
6197       pc.PostSyncOperation       = WriteImmediateData,
6198       pc.Address = (struct anv_address) {
6199          cmd_buffer->device->dynamic_state_pool.block_pool.bo,
6200          event->state.offset
6201       };
6202       pc.ImmediateData           = VK_EVENT_SET;
6203    }
6204 }
6205 
genX(CmdResetEvent)6206 void genX(CmdResetEvent)(
6207     VkCommandBuffer                             commandBuffer,
6208     VkEvent                                     _event,
6209     VkPipelineStageFlags                        stageMask)
6210 {
6211    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6212    ANV_FROM_HANDLE(anv_event, event, _event);
6213 
6214    cmd_buffer->state.pending_pipe_bits |= ANV_PIPE_POST_SYNC_BIT;
6215    genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6216 
6217    anv_batch_emit(&cmd_buffer->batch, GENX(PIPE_CONTROL), pc) {
6218       if (stageMask & ANV_PIPELINE_STAGE_PIPELINED_BITS) {
6219          pc.StallAtPixelScoreboard = true;
6220          pc.CommandStreamerStallEnable = true;
6221       }
6222 
6223       pc.DestinationAddressType  = DAT_PPGTT;
6224       pc.PostSyncOperation       = WriteImmediateData;
6225       pc.Address = (struct anv_address) {
6226          cmd_buffer->device->dynamic_state_pool.block_pool.bo,
6227          event->state.offset
6228       };
6229       pc.ImmediateData           = VK_EVENT_RESET;
6230    }
6231 }
6232 
genX(CmdWaitEvents)6233 void genX(CmdWaitEvents)(
6234     VkCommandBuffer                             commandBuffer,
6235     uint32_t                                    eventCount,
6236     const VkEvent*                              pEvents,
6237     VkPipelineStageFlags                        srcStageMask,
6238     VkPipelineStageFlags                        destStageMask,
6239     uint32_t                                    memoryBarrierCount,
6240     const VkMemoryBarrier*                      pMemoryBarriers,
6241     uint32_t                                    bufferMemoryBarrierCount,
6242     const VkBufferMemoryBarrier*                pBufferMemoryBarriers,
6243     uint32_t                                    imageMemoryBarrierCount,
6244     const VkImageMemoryBarrier*                 pImageMemoryBarriers)
6245 {
6246 #if GEN_GEN >= 8
6247    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6248 
6249    for (uint32_t i = 0; i < eventCount; i++) {
6250       ANV_FROM_HANDLE(anv_event, event, pEvents[i]);
6251 
6252       anv_batch_emit(&cmd_buffer->batch, GENX(MI_SEMAPHORE_WAIT), sem) {
6253          sem.WaitMode            = PollingMode,
6254          sem.CompareOperation    = COMPARE_SAD_EQUAL_SDD,
6255          sem.SemaphoreDataDword  = VK_EVENT_SET,
6256          sem.SemaphoreAddress = (struct anv_address) {
6257             cmd_buffer->device->dynamic_state_pool.block_pool.bo,
6258             event->state.offset
6259          };
6260       }
6261    }
6262 #else
6263    anv_finishme("Implement events on gen7");
6264 #endif
6265 
6266    genX(CmdPipelineBarrier)(commandBuffer, srcStageMask, destStageMask,
6267                             false, /* byRegion */
6268                             memoryBarrierCount, pMemoryBarriers,
6269                             bufferMemoryBarrierCount, pBufferMemoryBarriers,
6270                             imageMemoryBarrierCount, pImageMemoryBarriers);
6271 }
6272 
genX(CmdSetPerformanceOverrideINTEL)6273 VkResult genX(CmdSetPerformanceOverrideINTEL)(
6274     VkCommandBuffer                             commandBuffer,
6275     const VkPerformanceOverrideInfoINTEL*       pOverrideInfo)
6276 {
6277    ANV_FROM_HANDLE(anv_cmd_buffer, cmd_buffer, commandBuffer);
6278 
6279    switch (pOverrideInfo->type) {
6280    case VK_PERFORMANCE_OVERRIDE_TYPE_NULL_HARDWARE_INTEL: {
6281       uint32_t dw;
6282 
6283 #if GEN_GEN >= 9
6284       anv_pack_struct(&dw, GENX(CS_DEBUG_MODE2),
6285                       ._3DRenderingInstructionDisable = pOverrideInfo->enable,
6286                       .MediaInstructionDisable = pOverrideInfo->enable,
6287                       ._3DRenderingInstructionDisableMask = true,
6288                       .MediaInstructionDisableMask = true);
6289       emit_lri(&cmd_buffer->batch, GENX(CS_DEBUG_MODE2_num), dw);
6290 #else
6291       anv_pack_struct(&dw, GENX(INSTPM),
6292                       ._3DRenderingInstructionDisable = pOverrideInfo->enable,
6293                       .MediaInstructionDisable = pOverrideInfo->enable,
6294                       ._3DRenderingInstructionDisableMask = true,
6295                       .MediaInstructionDisableMask = true);
6296       emit_lri(&cmd_buffer->batch, GENX(INSTPM_num), dw);
6297 #endif
6298       break;
6299    }
6300 
6301    case VK_PERFORMANCE_OVERRIDE_TYPE_FLUSH_GPU_CACHES_INTEL:
6302       if (pOverrideInfo->enable) {
6303          /* FLUSH ALL THE THINGS! As requested by the MDAPI team. */
6304          cmd_buffer->state.pending_pipe_bits |=
6305             ANV_PIPE_FLUSH_BITS |
6306             ANV_PIPE_INVALIDATE_BITS;
6307          genX(cmd_buffer_apply_pipe_flushes)(cmd_buffer);
6308       }
6309       break;
6310 
6311    default:
6312       unreachable("Invalid override");
6313    }
6314 
6315    return VK_SUCCESS;
6316 }
6317 
genX(CmdSetPerformanceStreamMarkerINTEL)6318 VkResult genX(CmdSetPerformanceStreamMarkerINTEL)(
6319     VkCommandBuffer                             commandBuffer,
6320     const VkPerformanceStreamMarkerInfoINTEL*   pMarkerInfo)
6321 {
6322    /* TODO: Waiting on the register to write, might depend on generation. */
6323 
6324    return VK_SUCCESS;
6325 }
6326