1 /*
2  * Copyright © 2017 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included
12  * in all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15  * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
18  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
19  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
20  * DEALINGS IN THE SOFTWARE.
21  */
22 
23 /**
24  * @file iris_state.c
25  *
26  * ============================= GENXML CODE =============================
27  *              [This file is compiled once per generation.]
28  * =======================================================================
29  *
30  * This is the main state upload code.
31  *
32  * Gallium uses Constant State Objects, or CSOs, for most state.  Large,
33  * complex, or highly reusable state can be created once, and bound and
34  * rebound multiple times.  This is modeled with the pipe->create_*_state()
35  * and pipe->bind_*_state() hooks.  Highly dynamic or inexpensive state is
36  * streamed out on the fly, via pipe->set_*_state() hooks.
37  *
38  * OpenGL involves frequently mutating context state, which is mirrored in
39  * core Mesa by highly mutable data structures.  However, most applications
40  * typically draw the same things over and over - from frame to frame, most
41  * of the same objects are still visible and need to be redrawn.  So, rather
42  * than inventing new state all the time, applications usually mutate to swap
43  * between known states that we've seen before.
44  *
45  * Gallium isolates us from this mutation by tracking API state, and
46  * distilling it into a set of Constant State Objects, or CSOs.  Large,
47  * complex, or typically reusable state can be created once, then reused
48  * multiple times.  Drivers can create and store their own associated data.
49  * This create/bind model corresponds to the pipe->create_*_state() and
50  * pipe->bind_*_state() driver hooks.
51  *
52  * Some state is cheap to create, or expected to be highly dynamic.  Rather
53  * than creating and caching piles of CSOs for these, Gallium simply streams
54  * them out, via the pipe->set_*_state() driver hooks.
55  *
56  * To reduce draw time overhead, we try to compute as much state at create
57  * time as possible.  Wherever possible, we translate the Gallium pipe state
58  * to 3DSTATE commands, and store those commands in the CSO.  At draw time,
59  * we can simply memcpy them into a batch buffer.
60  *
61  * No hardware matches the abstraction perfectly, so some commands require
62  * information from multiple CSOs.  In this case, we can store two copies
63  * of the packet (one in each CSO), and simply | together their DWords at
64  * draw time.  Sometimes the second set is trivial (one or two fields), so
65  * we simply pack it at draw time.
66  *
67  * There are two main components in the file below.  First, the CSO hooks
68  * create/bind/track state.  The second are the draw-time upload functions,
69  * iris_upload_render_state() and iris_upload_compute_state(), which read
70  * the context state and emit the commands into the actual batch.
71  */
72 
73 #include <stdio.h>
74 #include <errno.h>
75 
76 #if HAVE_VALGRIND
77 #include <valgrind.h>
78 #include <memcheck.h>
79 #define VG(x) x
80 #ifdef DEBUG
81 #define __gen_validate_value(x) VALGRIND_CHECK_MEM_IS_DEFINED(&(x), sizeof(x))
82 #endif
83 #else
84 #define VG(x)
85 #endif
86 
87 #include "pipe/p_defines.h"
88 #include "pipe/p_state.h"
89 #include "pipe/p_context.h"
90 #include "pipe/p_screen.h"
91 #include "util/u_dual_blend.h"
92 #include "util/u_inlines.h"
93 #include "util/format/u_format.h"
94 #include "util/u_framebuffer.h"
95 #include "util/u_transfer.h"
96 #include "util/u_upload_mgr.h"
97 #include "util/u_viewport.h"
98 #include "util/u_memory.h"
99 #include "drm-uapi/i915_drm.h"
100 #include "nir.h"
101 #include "intel/compiler/brw_compiler.h"
102 #include "intel/common/gen_aux_map.h"
103 #include "intel/common/gen_l3_config.h"
104 #include "intel/common/gen_sample_positions.h"
105 #include "iris_batch.h"
106 #include "iris_context.h"
107 #include "iris_defines.h"
108 #include "iris_pipe.h"
109 #include "iris_resource.h"
110 
111 #include "iris_genx_macros.h"
112 #include "intel/common/gen_guardband.h"
113 
114 /**
115  * Statically assert that PIPE_* enums match the hardware packets.
116  * (As long as they match, we don't need to translate them.)
117  */
pipe_asserts()118 UNUSED static void pipe_asserts()
119 {
120 #define PIPE_ASSERT(x) STATIC_ASSERT((int)x)
121 
122    /* pipe_logicop happens to match the hardware. */
123    PIPE_ASSERT(PIPE_LOGICOP_CLEAR == LOGICOP_CLEAR);
124    PIPE_ASSERT(PIPE_LOGICOP_NOR == LOGICOP_NOR);
125    PIPE_ASSERT(PIPE_LOGICOP_AND_INVERTED == LOGICOP_AND_INVERTED);
126    PIPE_ASSERT(PIPE_LOGICOP_COPY_INVERTED == LOGICOP_COPY_INVERTED);
127    PIPE_ASSERT(PIPE_LOGICOP_AND_REVERSE == LOGICOP_AND_REVERSE);
128    PIPE_ASSERT(PIPE_LOGICOP_INVERT == LOGICOP_INVERT);
129    PIPE_ASSERT(PIPE_LOGICOP_XOR == LOGICOP_XOR);
130    PIPE_ASSERT(PIPE_LOGICOP_NAND == LOGICOP_NAND);
131    PIPE_ASSERT(PIPE_LOGICOP_AND == LOGICOP_AND);
132    PIPE_ASSERT(PIPE_LOGICOP_EQUIV == LOGICOP_EQUIV);
133    PIPE_ASSERT(PIPE_LOGICOP_NOOP == LOGICOP_NOOP);
134    PIPE_ASSERT(PIPE_LOGICOP_OR_INVERTED == LOGICOP_OR_INVERTED);
135    PIPE_ASSERT(PIPE_LOGICOP_COPY == LOGICOP_COPY);
136    PIPE_ASSERT(PIPE_LOGICOP_OR_REVERSE == LOGICOP_OR_REVERSE);
137    PIPE_ASSERT(PIPE_LOGICOP_OR == LOGICOP_OR);
138    PIPE_ASSERT(PIPE_LOGICOP_SET == LOGICOP_SET);
139 
140    /* pipe_blend_func happens to match the hardware. */
141    PIPE_ASSERT(PIPE_BLENDFACTOR_ONE == BLENDFACTOR_ONE);
142    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_COLOR == BLENDFACTOR_SRC_COLOR);
143    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA == BLENDFACTOR_SRC_ALPHA);
144    PIPE_ASSERT(PIPE_BLENDFACTOR_DST_ALPHA == BLENDFACTOR_DST_ALPHA);
145    PIPE_ASSERT(PIPE_BLENDFACTOR_DST_COLOR == BLENDFACTOR_DST_COLOR);
146    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE == BLENDFACTOR_SRC_ALPHA_SATURATE);
147    PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_COLOR == BLENDFACTOR_CONST_COLOR);
148    PIPE_ASSERT(PIPE_BLENDFACTOR_CONST_ALPHA == BLENDFACTOR_CONST_ALPHA);
149    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_COLOR == BLENDFACTOR_SRC1_COLOR);
150    PIPE_ASSERT(PIPE_BLENDFACTOR_SRC1_ALPHA == BLENDFACTOR_SRC1_ALPHA);
151    PIPE_ASSERT(PIPE_BLENDFACTOR_ZERO == BLENDFACTOR_ZERO);
152    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_COLOR == BLENDFACTOR_INV_SRC_COLOR);
153    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC_ALPHA == BLENDFACTOR_INV_SRC_ALPHA);
154    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_ALPHA == BLENDFACTOR_INV_DST_ALPHA);
155    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_DST_COLOR == BLENDFACTOR_INV_DST_COLOR);
156    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_COLOR == BLENDFACTOR_INV_CONST_COLOR);
157    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_CONST_ALPHA == BLENDFACTOR_INV_CONST_ALPHA);
158    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_COLOR == BLENDFACTOR_INV_SRC1_COLOR);
159    PIPE_ASSERT(PIPE_BLENDFACTOR_INV_SRC1_ALPHA == BLENDFACTOR_INV_SRC1_ALPHA);
160 
161    /* pipe_blend_func happens to match the hardware. */
162    PIPE_ASSERT(PIPE_BLEND_ADD == BLENDFUNCTION_ADD);
163    PIPE_ASSERT(PIPE_BLEND_SUBTRACT == BLENDFUNCTION_SUBTRACT);
164    PIPE_ASSERT(PIPE_BLEND_REVERSE_SUBTRACT == BLENDFUNCTION_REVERSE_SUBTRACT);
165    PIPE_ASSERT(PIPE_BLEND_MIN == BLENDFUNCTION_MIN);
166    PIPE_ASSERT(PIPE_BLEND_MAX == BLENDFUNCTION_MAX);
167 
168    /* pipe_stencil_op happens to match the hardware. */
169    PIPE_ASSERT(PIPE_STENCIL_OP_KEEP == STENCILOP_KEEP);
170    PIPE_ASSERT(PIPE_STENCIL_OP_ZERO == STENCILOP_ZERO);
171    PIPE_ASSERT(PIPE_STENCIL_OP_REPLACE == STENCILOP_REPLACE);
172    PIPE_ASSERT(PIPE_STENCIL_OP_INCR == STENCILOP_INCRSAT);
173    PIPE_ASSERT(PIPE_STENCIL_OP_DECR == STENCILOP_DECRSAT);
174    PIPE_ASSERT(PIPE_STENCIL_OP_INCR_WRAP == STENCILOP_INCR);
175    PIPE_ASSERT(PIPE_STENCIL_OP_DECR_WRAP == STENCILOP_DECR);
176    PIPE_ASSERT(PIPE_STENCIL_OP_INVERT == STENCILOP_INVERT);
177 
178    /* pipe_sprite_coord_mode happens to match 3DSTATE_SBE */
179    PIPE_ASSERT(PIPE_SPRITE_COORD_UPPER_LEFT == UPPERLEFT);
180    PIPE_ASSERT(PIPE_SPRITE_COORD_LOWER_LEFT == LOWERLEFT);
181 #undef PIPE_ASSERT
182 }
183 
184 static unsigned
translate_prim_type(enum pipe_prim_type prim,uint8_t verts_per_patch)185 translate_prim_type(enum pipe_prim_type prim, uint8_t verts_per_patch)
186 {
187    static const unsigned map[] = {
188       [PIPE_PRIM_POINTS]                   = _3DPRIM_POINTLIST,
189       [PIPE_PRIM_LINES]                    = _3DPRIM_LINELIST,
190       [PIPE_PRIM_LINE_LOOP]                = _3DPRIM_LINELOOP,
191       [PIPE_PRIM_LINE_STRIP]               = _3DPRIM_LINESTRIP,
192       [PIPE_PRIM_TRIANGLES]                = _3DPRIM_TRILIST,
193       [PIPE_PRIM_TRIANGLE_STRIP]           = _3DPRIM_TRISTRIP,
194       [PIPE_PRIM_TRIANGLE_FAN]             = _3DPRIM_TRIFAN,
195       [PIPE_PRIM_QUADS]                    = _3DPRIM_QUADLIST,
196       [PIPE_PRIM_QUAD_STRIP]               = _3DPRIM_QUADSTRIP,
197       [PIPE_PRIM_POLYGON]                  = _3DPRIM_POLYGON,
198       [PIPE_PRIM_LINES_ADJACENCY]          = _3DPRIM_LINELIST_ADJ,
199       [PIPE_PRIM_LINE_STRIP_ADJACENCY]     = _3DPRIM_LINESTRIP_ADJ,
200       [PIPE_PRIM_TRIANGLES_ADJACENCY]      = _3DPRIM_TRILIST_ADJ,
201       [PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY] = _3DPRIM_TRISTRIP_ADJ,
202       [PIPE_PRIM_PATCHES]                  = _3DPRIM_PATCHLIST_1 - 1,
203    };
204 
205    return map[prim] + (prim == PIPE_PRIM_PATCHES ? verts_per_patch : 0);
206 }
207 
208 static unsigned
translate_compare_func(enum pipe_compare_func pipe_func)209 translate_compare_func(enum pipe_compare_func pipe_func)
210 {
211    static const unsigned map[] = {
212       [PIPE_FUNC_NEVER]    = COMPAREFUNCTION_NEVER,
213       [PIPE_FUNC_LESS]     = COMPAREFUNCTION_LESS,
214       [PIPE_FUNC_EQUAL]    = COMPAREFUNCTION_EQUAL,
215       [PIPE_FUNC_LEQUAL]   = COMPAREFUNCTION_LEQUAL,
216       [PIPE_FUNC_GREATER]  = COMPAREFUNCTION_GREATER,
217       [PIPE_FUNC_NOTEQUAL] = COMPAREFUNCTION_NOTEQUAL,
218       [PIPE_FUNC_GEQUAL]   = COMPAREFUNCTION_GEQUAL,
219       [PIPE_FUNC_ALWAYS]   = COMPAREFUNCTION_ALWAYS,
220    };
221    return map[pipe_func];
222 }
223 
224 static unsigned
translate_shadow_func(enum pipe_compare_func pipe_func)225 translate_shadow_func(enum pipe_compare_func pipe_func)
226 {
227    /* Gallium specifies the result of shadow comparisons as:
228     *
229     *    1 if ref <op> texel,
230     *    0 otherwise.
231     *
232     * The hardware does:
233     *
234     *    0 if texel <op> ref,
235     *    1 otherwise.
236     *
237     * So we need to flip the operator and also negate.
238     */
239    static const unsigned map[] = {
240       [PIPE_FUNC_NEVER]    = PREFILTEROPALWAYS,
241       [PIPE_FUNC_LESS]     = PREFILTEROPLEQUAL,
242       [PIPE_FUNC_EQUAL]    = PREFILTEROPNOTEQUAL,
243       [PIPE_FUNC_LEQUAL]   = PREFILTEROPLESS,
244       [PIPE_FUNC_GREATER]  = PREFILTEROPGEQUAL,
245       [PIPE_FUNC_NOTEQUAL] = PREFILTEROPEQUAL,
246       [PIPE_FUNC_GEQUAL]   = PREFILTEROPGREATER,
247       [PIPE_FUNC_ALWAYS]   = PREFILTEROPNEVER,
248    };
249    return map[pipe_func];
250 }
251 
252 static unsigned
translate_cull_mode(unsigned pipe_face)253 translate_cull_mode(unsigned pipe_face)
254 {
255    static const unsigned map[4] = {
256       [PIPE_FACE_NONE]           = CULLMODE_NONE,
257       [PIPE_FACE_FRONT]          = CULLMODE_FRONT,
258       [PIPE_FACE_BACK]           = CULLMODE_BACK,
259       [PIPE_FACE_FRONT_AND_BACK] = CULLMODE_BOTH,
260    };
261    return map[pipe_face];
262 }
263 
264 static unsigned
translate_fill_mode(unsigned pipe_polymode)265 translate_fill_mode(unsigned pipe_polymode)
266 {
267    static const unsigned map[4] = {
268       [PIPE_POLYGON_MODE_FILL]           = FILL_MODE_SOLID,
269       [PIPE_POLYGON_MODE_LINE]           = FILL_MODE_WIREFRAME,
270       [PIPE_POLYGON_MODE_POINT]          = FILL_MODE_POINT,
271       [PIPE_POLYGON_MODE_FILL_RECTANGLE] = FILL_MODE_SOLID,
272    };
273    return map[pipe_polymode];
274 }
275 
276 static unsigned
translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)277 translate_mip_filter(enum pipe_tex_mipfilter pipe_mip)
278 {
279    static const unsigned map[] = {
280       [PIPE_TEX_MIPFILTER_NEAREST] = MIPFILTER_NEAREST,
281       [PIPE_TEX_MIPFILTER_LINEAR]  = MIPFILTER_LINEAR,
282       [PIPE_TEX_MIPFILTER_NONE]    = MIPFILTER_NONE,
283    };
284    return map[pipe_mip];
285 }
286 
287 static uint32_t
translate_wrap(unsigned pipe_wrap)288 translate_wrap(unsigned pipe_wrap)
289 {
290    static const unsigned map[] = {
291       [PIPE_TEX_WRAP_REPEAT]                 = TCM_WRAP,
292       [PIPE_TEX_WRAP_CLAMP]                  = TCM_HALF_BORDER,
293       [PIPE_TEX_WRAP_CLAMP_TO_EDGE]          = TCM_CLAMP,
294       [PIPE_TEX_WRAP_CLAMP_TO_BORDER]        = TCM_CLAMP_BORDER,
295       [PIPE_TEX_WRAP_MIRROR_REPEAT]          = TCM_MIRROR,
296       [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE]   = TCM_MIRROR_ONCE,
297 
298       /* These are unsupported. */
299       [PIPE_TEX_WRAP_MIRROR_CLAMP]           = -1,
300       [PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER] = -1,
301    };
302    return map[pipe_wrap];
303 }
304 
305 /**
306  * Allocate space for some indirect state.
307  *
308  * Return a pointer to the map (to fill it out) and a state ref (for
309  * referring to the state in GPU commands).
310  */
311 static void *
upload_state(struct u_upload_mgr * uploader,struct iris_state_ref * ref,unsigned size,unsigned alignment)312 upload_state(struct u_upload_mgr *uploader,
313              struct iris_state_ref *ref,
314              unsigned size,
315              unsigned alignment)
316 {
317    void *p = NULL;
318    u_upload_alloc(uploader, 0, size, alignment, &ref->offset, &ref->res, &p);
319    return p;
320 }
321 
322 /**
323  * Stream out temporary/short-lived state.
324  *
325  * This allocates space, pins the BO, and includes the BO address in the
326  * returned offset (which works because all state lives in 32-bit memory
327  * zones).
328  */
329 static uint32_t *
stream_state(struct iris_batch * batch,struct u_upload_mgr * uploader,struct pipe_resource ** out_res,unsigned size,unsigned alignment,uint32_t * out_offset)330 stream_state(struct iris_batch *batch,
331              struct u_upload_mgr *uploader,
332              struct pipe_resource **out_res,
333              unsigned size,
334              unsigned alignment,
335              uint32_t *out_offset)
336 {
337    void *ptr = NULL;
338 
339    u_upload_alloc(uploader, 0, size, alignment, out_offset, out_res, &ptr);
340 
341    struct iris_bo *bo = iris_resource_bo(*out_res);
342    iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
343 
344    iris_record_state_size(batch->state_sizes,
345                           bo->gtt_offset + *out_offset, size);
346 
347    *out_offset += iris_bo_offset_from_base_address(bo);
348 
349    return ptr;
350 }
351 
352 /**
353  * stream_state() + memcpy.
354  */
355 static uint32_t
emit_state(struct iris_batch * batch,struct u_upload_mgr * uploader,struct pipe_resource ** out_res,const void * data,unsigned size,unsigned alignment)356 emit_state(struct iris_batch *batch,
357            struct u_upload_mgr *uploader,
358            struct pipe_resource **out_res,
359            const void *data,
360            unsigned size,
361            unsigned alignment)
362 {
363    unsigned offset = 0;
364    uint32_t *map =
365       stream_state(batch, uploader, out_res, size, alignment, &offset);
366 
367    if (map)
368       memcpy(map, data, size);
369 
370    return offset;
371 }
372 
373 /**
374  * Did field 'x' change between 'old_cso' and 'new_cso'?
375  *
376  * (If so, we may want to set some dirty flags.)
377  */
378 #define cso_changed(x) (!old_cso || (old_cso->x != new_cso->x))
379 #define cso_changed_memcmp(x) \
380    (!old_cso || memcmp(old_cso->x, new_cso->x, sizeof(old_cso->x)) != 0)
381 
382 static void
flush_before_state_base_change(struct iris_batch * batch)383 flush_before_state_base_change(struct iris_batch *batch)
384 {
385    const struct gen_device_info *devinfo = &batch->screen->devinfo;
386 
387    /* Flush before emitting STATE_BASE_ADDRESS.
388     *
389     * This isn't documented anywhere in the PRM.  However, it seems to be
390     * necessary prior to changing the surface state base adress.  We've
391     * seen issues in Vulkan where we get GPU hangs when using multi-level
392     * command buffers which clear depth, reset state base address, and then
393     * go render stuff.
394     *
395     * Normally, in GL, we would trust the kernel to do sufficient stalls
396     * and flushes prior to executing our batch.  However, it doesn't seem
397     * as if the kernel's flushing is always sufficient and we don't want to
398     * rely on it.
399     *
400     * We make this an end-of-pipe sync instead of a normal flush because we
401     * do not know the current status of the GPU.  On Haswell at least,
402     * having a fast-clear operation in flight at the same time as a normal
403     * rendering operation can cause hangs.  Since the kernel's flushing is
404     * insufficient, we need to ensure that any rendering operations from
405     * other processes are definitely complete before we try to do our own
406     * rendering.  It's a bit of a big hammer but it appears to work.
407     */
408    iris_emit_end_of_pipe_sync(batch,
409                               "change STATE_BASE_ADDRESS (flushes)",
410                               PIPE_CONTROL_RENDER_TARGET_FLUSH |
411                               PIPE_CONTROL_DEPTH_CACHE_FLUSH |
412                               PIPE_CONTROL_DATA_CACHE_FLUSH |
413                               /* GEN:BUG:1606662791:
414                                *
415                                *   Software must program PIPE_CONTROL command
416                                *   with "HDC Pipeline Flush" prior to
417                                *   programming of the below two non-pipeline
418                                *   state :
419                                *      * STATE_BASE_ADDRESS
420                                *      * 3DSTATE_BINDING_TABLE_POOL_ALLOC
421                                */
422                               ((GEN_GEN == 12 && devinfo->revision == 0 /* A0 */ ?
423                                 PIPE_CONTROL_FLUSH_HDC : 0)));
424 }
425 
426 static void
flush_after_state_base_change(struct iris_batch * batch)427 flush_after_state_base_change(struct iris_batch *batch)
428 {
429    /* After re-setting the surface state base address, we have to do some
430     * cache flusing so that the sampler engine will pick up the new
431     * SURFACE_STATE objects and binding tables. From the Broadwell PRM,
432     * Shared Function > 3D Sampler > State > State Caching (page 96):
433     *
434     *    Coherency with system memory in the state cache, like the texture
435     *    cache is handled partially by software. It is expected that the
436     *    command stream or shader will issue Cache Flush operation or
437     *    Cache_Flush sampler message to ensure that the L1 cache remains
438     *    coherent with system memory.
439     *
440     *    [...]
441     *
442     *    Whenever the value of the Dynamic_State_Base_Addr,
443     *    Surface_State_Base_Addr are altered, the L1 state cache must be
444     *    invalidated to ensure the new surface or sampler state is fetched
445     *    from system memory.
446     *
447     * The PIPE_CONTROL command has a "State Cache Invalidation Enable" bit
448     * which, according the PIPE_CONTROL instruction documentation in the
449     * Broadwell PRM:
450     *
451     *    Setting this bit is independent of any other bit in this packet.
452     *    This bit controls the invalidation of the L1 and L2 state caches
453     *    at the top of the pipe i.e. at the parsing time.
454     *
455     * Unfortunately, experimentation seems to indicate that state cache
456     * invalidation through a PIPE_CONTROL does nothing whatsoever in
457     * regards to surface state and binding tables.  In stead, it seems that
458     * invalidating the texture cache is what is actually needed.
459     *
460     * XXX:  As far as we have been able to determine through
461     * experimentation, shows that flush the texture cache appears to be
462     * sufficient.  The theory here is that all of the sampling/rendering
463     * units cache the binding table in the texture cache.  However, we have
464     * yet to be able to actually confirm this.
465     */
466    iris_emit_end_of_pipe_sync(batch,
467                               "change STATE_BASE_ADDRESS (invalidates)",
468                               PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
469                               PIPE_CONTROL_CONST_CACHE_INVALIDATE |
470                               PIPE_CONTROL_STATE_CACHE_INVALIDATE);
471 }
472 
473 static void
_iris_emit_lri(struct iris_batch * batch,uint32_t reg,uint32_t val)474 _iris_emit_lri(struct iris_batch *batch, uint32_t reg, uint32_t val)
475 {
476    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
477       lri.RegisterOffset = reg;
478       lri.DataDWord      = val;
479    }
480 }
481 #define iris_emit_lri(b, r, v) _iris_emit_lri(b, GENX(r##_num), v)
482 
483 static void
_iris_emit_lrr(struct iris_batch * batch,uint32_t dst,uint32_t src)484 _iris_emit_lrr(struct iris_batch *batch, uint32_t dst, uint32_t src)
485 {
486    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_REG), lrr) {
487       lrr.SourceRegisterAddress = src;
488       lrr.DestinationRegisterAddress = dst;
489    }
490 }
491 
492 static void
iris_load_register_reg32(struct iris_batch * batch,uint32_t dst,uint32_t src)493 iris_load_register_reg32(struct iris_batch *batch, uint32_t dst,
494                          uint32_t src)
495 {
496    _iris_emit_lrr(batch, dst, src);
497 }
498 
499 static void
iris_load_register_reg64(struct iris_batch * batch,uint32_t dst,uint32_t src)500 iris_load_register_reg64(struct iris_batch *batch, uint32_t dst,
501                          uint32_t src)
502 {
503    _iris_emit_lrr(batch, dst, src);
504    _iris_emit_lrr(batch, dst + 4, src + 4);
505 }
506 
507 static void
iris_load_register_imm32(struct iris_batch * batch,uint32_t reg,uint32_t val)508 iris_load_register_imm32(struct iris_batch *batch, uint32_t reg,
509                          uint32_t val)
510 {
511    _iris_emit_lri(batch, reg, val);
512 }
513 
514 static void
iris_load_register_imm64(struct iris_batch * batch,uint32_t reg,uint64_t val)515 iris_load_register_imm64(struct iris_batch *batch, uint32_t reg,
516                          uint64_t val)
517 {
518    _iris_emit_lri(batch, reg + 0, val & 0xffffffff);
519    _iris_emit_lri(batch, reg + 4, val >> 32);
520 }
521 
522 /**
523  * Emit MI_LOAD_REGISTER_MEM to load a 32-bit MMIO register from a buffer.
524  */
525 static void
iris_load_register_mem32(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset)526 iris_load_register_mem32(struct iris_batch *batch, uint32_t reg,
527                          struct iris_bo *bo, uint32_t offset)
528 {
529    iris_batch_sync_region_start(batch);
530    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
531       lrm.RegisterAddress = reg;
532       lrm.MemoryAddress = ro_bo(bo, offset);
533    }
534    iris_batch_sync_region_end(batch);
535 }
536 
537 /**
538  * Load a 64-bit value from a buffer into a MMIO register via
539  * two MI_LOAD_REGISTER_MEM commands.
540  */
541 static void
iris_load_register_mem64(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset)542 iris_load_register_mem64(struct iris_batch *batch, uint32_t reg,
543                          struct iris_bo *bo, uint32_t offset)
544 {
545    iris_load_register_mem32(batch, reg + 0, bo, offset + 0);
546    iris_load_register_mem32(batch, reg + 4, bo, offset + 4);
547 }
548 
549 static void
iris_store_register_mem32(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset,bool predicated)550 iris_store_register_mem32(struct iris_batch *batch, uint32_t reg,
551                           struct iris_bo *bo, uint32_t offset,
552                           bool predicated)
553 {
554    iris_batch_sync_region_start(batch);
555    iris_emit_cmd(batch, GENX(MI_STORE_REGISTER_MEM), srm) {
556       srm.RegisterAddress = reg;
557       srm.MemoryAddress = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
558       srm.PredicateEnable = predicated;
559    }
560    iris_batch_sync_region_end(batch);
561 }
562 
563 static void
iris_store_register_mem64(struct iris_batch * batch,uint32_t reg,struct iris_bo * bo,uint32_t offset,bool predicated)564 iris_store_register_mem64(struct iris_batch *batch, uint32_t reg,
565                           struct iris_bo *bo, uint32_t offset,
566                           bool predicated)
567 {
568    iris_store_register_mem32(batch, reg + 0, bo, offset + 0, predicated);
569    iris_store_register_mem32(batch, reg + 4, bo, offset + 4, predicated);
570 }
571 
572 static void
iris_store_data_imm32(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset,uint32_t imm)573 iris_store_data_imm32(struct iris_batch *batch,
574                       struct iris_bo *bo, uint32_t offset,
575                       uint32_t imm)
576 {
577    iris_batch_sync_region_start(batch);
578    iris_emit_cmd(batch, GENX(MI_STORE_DATA_IMM), sdi) {
579       sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
580       sdi.ImmediateData = imm;
581    }
582    iris_batch_sync_region_end(batch);
583 }
584 
585 static void
iris_store_data_imm64(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset,uint64_t imm)586 iris_store_data_imm64(struct iris_batch *batch,
587                       struct iris_bo *bo, uint32_t offset,
588                       uint64_t imm)
589 {
590    /* Can't use iris_emit_cmd because MI_STORE_DATA_IMM has a length of
591     * 2 in genxml but it's actually variable length and we need 5 DWords.
592     */
593    void *map = iris_get_command_space(batch, 4 * 5);
594    iris_batch_sync_region_start(batch);
595    _iris_pack_command(batch, GENX(MI_STORE_DATA_IMM), map, sdi) {
596       sdi.DWordLength = 5 - 2;
597       sdi.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
598       sdi.ImmediateData = imm;
599    }
600    iris_batch_sync_region_end(batch);
601 }
602 
603 static void
iris_copy_mem_mem(struct iris_batch * batch,struct iris_bo * dst_bo,uint32_t dst_offset,struct iris_bo * src_bo,uint32_t src_offset,unsigned bytes)604 iris_copy_mem_mem(struct iris_batch *batch,
605                   struct iris_bo *dst_bo, uint32_t dst_offset,
606                   struct iris_bo *src_bo, uint32_t src_offset,
607                   unsigned bytes)
608 {
609    /* MI_COPY_MEM_MEM operates on DWords. */
610    assert(bytes % 4 == 0);
611    assert(dst_offset % 4 == 0);
612    assert(src_offset % 4 == 0);
613    iris_batch_sync_region_start(batch);
614 
615    for (unsigned i = 0; i < bytes; i += 4) {
616       iris_emit_cmd(batch, GENX(MI_COPY_MEM_MEM), cp) {
617          cp.DestinationMemoryAddress = rw_bo(dst_bo, dst_offset + i,
618                                              IRIS_DOMAIN_OTHER_WRITE);
619          cp.SourceMemoryAddress = ro_bo(src_bo, src_offset + i);
620       }
621    }
622 
623    iris_batch_sync_region_end(batch);
624 }
625 
626 static void
emit_pipeline_select(struct iris_batch * batch,uint32_t pipeline)627 emit_pipeline_select(struct iris_batch *batch, uint32_t pipeline)
628 {
629 #if GEN_GEN >= 8 && GEN_GEN < 10
630    /* From the Broadwell PRM, Volume 2a: Instructions, PIPELINE_SELECT:
631     *
632     *   Software must clear the COLOR_CALC_STATE Valid field in
633     *   3DSTATE_CC_STATE_POINTERS command prior to send a PIPELINE_SELECT
634     *   with Pipeline Select set to GPGPU.
635     *
636     * The internal hardware docs recommend the same workaround for Gen9
637     * hardware too.
638     */
639    if (pipeline == GPGPU)
640       iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), t);
641 #endif
642 
643 
644    /* From "BXML » GT » MI » vol1a GPU Overview » [Instruction]
645     * PIPELINE_SELECT [DevBWR+]":
646     *
647     *    "Project: DEVSNB+
648     *
649     *     Software must ensure all the write caches are flushed through a
650     *     stalling PIPE_CONTROL command followed by another PIPE_CONTROL
651     *     command to invalidate read only caches prior to programming
652     *     MI_PIPELINE_SELECT command to change the Pipeline Select Mode."
653     */
654     iris_emit_pipe_control_flush(batch,
655                                  "workaround: PIPELINE_SELECT flushes (1/2)",
656                                  PIPE_CONTROL_RENDER_TARGET_FLUSH |
657                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH |
658                                  PIPE_CONTROL_DATA_CACHE_FLUSH |
659                                  PIPE_CONTROL_CS_STALL);
660 
661     iris_emit_pipe_control_flush(batch,
662                                  "workaround: PIPELINE_SELECT flushes (2/2)",
663                                  PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
664                                  PIPE_CONTROL_CONST_CACHE_INVALIDATE |
665                                  PIPE_CONTROL_STATE_CACHE_INVALIDATE |
666                                  PIPE_CONTROL_INSTRUCTION_INVALIDATE);
667 
668    iris_emit_cmd(batch, GENX(PIPELINE_SELECT), sel) {
669 #if GEN_GEN >= 9
670       sel.MaskBits = GEN_GEN >= 12 ? 0x13 : 3;
671       sel.MediaSamplerDOPClockGateEnable = GEN_GEN >= 12;
672 #endif
673       sel.PipelineSelection = pipeline;
674    }
675 }
676 
677 UNUSED static void
init_glk_barrier_mode(struct iris_batch * batch,uint32_t value)678 init_glk_barrier_mode(struct iris_batch *batch, uint32_t value)
679 {
680 #if GEN_GEN == 9
681    /* Project: DevGLK
682     *
683     *    "This chicken bit works around a hardware issue with barrier
684     *     logic encountered when switching between GPGPU and 3D pipelines.
685     *     To workaround the issue, this mode bit should be set after a
686     *     pipeline is selected."
687     */
688    uint32_t reg_val;
689    iris_pack_state(GENX(SLICE_COMMON_ECO_CHICKEN1), &reg_val, reg) {
690       reg.GLKBarrierMode = value;
691       reg.GLKBarrierModeMask = 1;
692    }
693    iris_emit_lri(batch, SLICE_COMMON_ECO_CHICKEN1, reg_val);
694 #endif
695 }
696 
697 static void
init_state_base_address(struct iris_batch * batch)698 init_state_base_address(struct iris_batch *batch)
699 {
700    struct isl_device *isl_dev = &batch->screen->isl_dev;
701    uint32_t mocs = isl_mocs(isl_dev, 0);
702    flush_before_state_base_change(batch);
703 
704    /* We program most base addresses once at context initialization time.
705     * Each base address points at a 4GB memory zone, and never needs to
706     * change.  See iris_bufmgr.h for a description of the memory zones.
707     *
708     * The one exception is Surface State Base Address, which needs to be
709     * updated occasionally.  See iris_binder.c for the details there.
710     */
711    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
712       sba.GeneralStateMOCS            = mocs;
713       sba.StatelessDataPortAccessMOCS = mocs;
714       sba.DynamicStateMOCS            = mocs;
715       sba.IndirectObjectMOCS          = mocs;
716       sba.InstructionMOCS             = mocs;
717       sba.SurfaceStateMOCS            = mocs;
718 
719       sba.GeneralStateBaseAddressModifyEnable   = true;
720       sba.DynamicStateBaseAddressModifyEnable   = true;
721       sba.IndirectObjectBaseAddressModifyEnable = true;
722       sba.InstructionBaseAddressModifyEnable    = true;
723       sba.GeneralStateBufferSizeModifyEnable    = true;
724       sba.DynamicStateBufferSizeModifyEnable    = true;
725 #if (GEN_GEN >= 9)
726       sba.BindlessSurfaceStateBaseAddressModifyEnable = true;
727       sba.BindlessSurfaceStateMOCS    = mocs;
728 #endif
729       sba.IndirectObjectBufferSizeModifyEnable  = true;
730       sba.InstructionBuffersizeModifyEnable     = true;
731 
732       sba.InstructionBaseAddress  = ro_bo(NULL, IRIS_MEMZONE_SHADER_START);
733       sba.DynamicStateBaseAddress = ro_bo(NULL, IRIS_MEMZONE_DYNAMIC_START);
734 
735       sba.GeneralStateBufferSize   = 0xfffff;
736       sba.IndirectObjectBufferSize = 0xfffff;
737       sba.InstructionBufferSize    = 0xfffff;
738       sba.DynamicStateBufferSize   = 0xfffff;
739    }
740 
741    flush_after_state_base_change(batch);
742 }
743 
744 static void
iris_emit_l3_config(struct iris_batch * batch,const struct gen_l3_config * cfg)745 iris_emit_l3_config(struct iris_batch *batch,
746                     const struct gen_l3_config *cfg)
747 {
748    uint32_t reg_val;
749    assert(cfg || GEN_GEN >= 12);
750 
751 #if GEN_GEN >= 12
752 #define L3_ALLOCATION_REG GENX(L3ALLOC)
753 #define L3_ALLOCATION_REG_num GENX(L3ALLOC_num)
754 #else
755 #define L3_ALLOCATION_REG GENX(L3CNTLREG)
756 #define L3_ALLOCATION_REG_num GENX(L3CNTLREG_num)
757 #endif
758 
759    iris_pack_state(L3_ALLOCATION_REG, &reg_val, reg) {
760 #if GEN_GEN < 11
761       reg.SLMEnable = cfg->n[GEN_L3P_SLM] > 0;
762 #endif
763 #if GEN_GEN == 11
764       /* WA_1406697149: Bit 9 "Error Detection Behavior Control" must be set
765        * in L3CNTLREG register. The default setting of the bit is not the
766        * desirable behavior.
767        */
768       reg.ErrorDetectionBehaviorControl = true;
769       reg.UseFullWays = true;
770 #endif
771       if (GEN_GEN < 12 || cfg) {
772          reg.URBAllocation = cfg->n[GEN_L3P_URB];
773          reg.ROAllocation = cfg->n[GEN_L3P_RO];
774          reg.DCAllocation = cfg->n[GEN_L3P_DC];
775          reg.AllAllocation = cfg->n[GEN_L3P_ALL];
776       } else {
777 #if GEN_GEN >= 12
778          reg.L3FullWayAllocationEnable = true;
779 #endif
780       }
781    }
782    _iris_emit_lri(batch, L3_ALLOCATION_REG_num, reg_val);
783 }
784 
785 #if GEN_GEN == 9
786 static void
iris_enable_obj_preemption(struct iris_batch * batch,bool enable)787 iris_enable_obj_preemption(struct iris_batch *batch, bool enable)
788 {
789    uint32_t reg_val;
790 
791    /* A fixed function pipe flush is required before modifying this field */
792    iris_emit_end_of_pipe_sync(batch, enable ? "enable preemption"
793                                             : "disable preemption",
794                               PIPE_CONTROL_RENDER_TARGET_FLUSH);
795 
796    /* enable object level preemption */
797    iris_pack_state(GENX(CS_CHICKEN1), &reg_val, reg) {
798       reg.ReplayMode = enable;
799       reg.ReplayModeMask = true;
800    }
801    iris_emit_lri(batch, CS_CHICKEN1, reg_val);
802 }
803 #endif
804 
805 #if GEN_GEN == 11
806 static void
iris_upload_slice_hashing_state(struct iris_batch * batch)807 iris_upload_slice_hashing_state(struct iris_batch *batch)
808 {
809    const struct gen_device_info *devinfo = &batch->screen->devinfo;
810    int subslices_delta =
811       devinfo->ppipe_subslices[0] - devinfo->ppipe_subslices[1];
812    if (subslices_delta == 0)
813       return;
814 
815    struct iris_context *ice = NULL;
816    ice = container_of(batch, ice, batches[IRIS_BATCH_RENDER]);
817    assert(&ice->batches[IRIS_BATCH_RENDER] == batch);
818 
819    unsigned size = GENX(SLICE_HASH_TABLE_length) * 4;
820    uint32_t hash_address;
821    struct pipe_resource *tmp = NULL;
822    uint32_t *map =
823       stream_state(batch, ice->state.dynamic_uploader, &tmp,
824                    size, 64, &hash_address);
825    pipe_resource_reference(&tmp, NULL);
826 
827    struct GENX(SLICE_HASH_TABLE) table0 = {
828       .Entry = {
829          { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
830          { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
831          { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
832          { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
833          { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
834          { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
835          { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
836          { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
837          { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
838          { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
839          { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
840          { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
841          { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 },
842          { 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1 },
843          { 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0 },
844          { 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1, 1, 0, 1 }
845       }
846    };
847 
848    struct GENX(SLICE_HASH_TABLE) table1 = {
849       .Entry = {
850          { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
851          { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
852          { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
853          { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
854          { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
855          { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
856          { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
857          { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
858          { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
859          { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
860          { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
861          { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
862          { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 },
863          { 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0 },
864          { 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1 },
865          { 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1, 0 }
866       }
867    };
868 
869    const struct GENX(SLICE_HASH_TABLE) *table =
870       subslices_delta < 0 ? &table0 : &table1;
871    GENX(SLICE_HASH_TABLE_pack)(NULL, map, table);
872 
873    iris_emit_cmd(batch, GENX(3DSTATE_SLICE_TABLE_STATE_POINTERS), ptr) {
874       ptr.SliceHashStatePointerValid = true;
875       ptr.SliceHashTableStatePointer = hash_address;
876    }
877 
878    iris_emit_cmd(batch, GENX(3DSTATE_3D_MODE), mode) {
879       mode.SliceHashingTableEnable = true;
880    }
881 }
882 #endif
883 
884 static void
iris_alloc_push_constants(struct iris_batch * batch)885 iris_alloc_push_constants(struct iris_batch *batch)
886 {
887    /* For now, we set a static partitioning of the push constant area,
888     * assuming that all stages could be in use.
889     *
890     * TODO: Try lazily allocating the HS/DS/GS sections as needed, and
891     *       see if that improves performance by offering more space to
892     *       the VS/FS when those aren't in use.  Also, try dynamically
893     *       enabling/disabling it like i965 does.  This would be more
894     *       stalls and may not actually help; we don't know yet.
895     */
896    for (int i = 0; i <= MESA_SHADER_FRAGMENT; i++) {
897       iris_emit_cmd(batch, GENX(3DSTATE_PUSH_CONSTANT_ALLOC_VS), alloc) {
898          alloc._3DCommandSubOpcode = 18 + i;
899          alloc.ConstantBufferOffset = 6 * i;
900          alloc.ConstantBufferSize = i == MESA_SHADER_FRAGMENT ? 8 : 6;
901       }
902    }
903 }
904 
905 #if GEN_GEN >= 12
906 static void
907 init_aux_map_state(struct iris_batch *batch);
908 #endif
909 
910 /**
911  * Upload initial GPU state for any kind of context.
912  *
913  * These need to happen for both render and compute.
914  */
915 static void
iris_init_common_context(struct iris_batch * batch)916 iris_init_common_context(struct iris_batch *batch)
917 {
918 #if GEN_GEN == 11
919    uint32_t reg_val;
920 
921    iris_pack_state(GENX(SAMPLER_MODE), &reg_val, reg) {
922       reg.HeaderlessMessageforPreemptableContexts = 1;
923       reg.HeaderlessMessageforPreemptableContextsMask = 1;
924    }
925    iris_emit_lri(batch, SAMPLER_MODE, reg_val);
926 
927    /* Bit 1 must be set in HALF_SLICE_CHICKEN7. */
928    iris_pack_state(GENX(HALF_SLICE_CHICKEN7), &reg_val, reg) {
929       reg.EnabledTexelOffsetPrecisionFix = 1;
930       reg.EnabledTexelOffsetPrecisionFixMask = 1;
931    }
932    iris_emit_lri(batch, HALF_SLICE_CHICKEN7, reg_val);
933 #endif
934 }
935 
936 /**
937  * Upload the initial GPU state for a render context.
938  *
939  * This sets some invariant state that needs to be programmed a particular
940  * way, but we never actually change.
941  */
942 static void
iris_init_render_context(struct iris_batch * batch)943 iris_init_render_context(struct iris_batch *batch)
944 {
945    UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
946    uint32_t reg_val;
947 
948    iris_batch_sync_region_start(batch);
949 
950    emit_pipeline_select(batch, _3D);
951 
952    iris_emit_l3_config(batch, batch->screen->l3_config_3d);
953 
954    init_state_base_address(batch);
955 
956    iris_init_common_context(batch);
957 
958 #if GEN_GEN >= 9
959    iris_pack_state(GENX(CS_DEBUG_MODE2), &reg_val, reg) {
960       reg.CONSTANT_BUFFERAddressOffsetDisable = true;
961       reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
962    }
963    iris_emit_lri(batch, CS_DEBUG_MODE2, reg_val);
964 #else
965    iris_pack_state(GENX(INSTPM), &reg_val, reg) {
966       reg.CONSTANT_BUFFERAddressOffsetDisable = true;
967       reg.CONSTANT_BUFFERAddressOffsetDisableMask = true;
968    }
969    iris_emit_lri(batch, INSTPM, reg_val);
970 #endif
971 
972 #if GEN_GEN == 9
973    iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
974       reg.FloatBlendOptimizationEnable = true;
975       reg.FloatBlendOptimizationEnableMask = true;
976       reg.MSCRAWHazardAvoidanceBit = true;
977       reg.MSCRAWHazardAvoidanceBitMask = true;
978       reg.PartialResolveDisableInVC = true;
979       reg.PartialResolveDisableInVCMask = true;
980    }
981    iris_emit_lri(batch, CACHE_MODE_1, reg_val);
982 
983    if (devinfo->is_geminilake)
984       init_glk_barrier_mode(batch, GLK_BARRIER_MODE_3D_HULL);
985 #endif
986 
987 #if GEN_GEN == 11
988    iris_pack_state(GENX(TCCNTLREG), &reg_val, reg) {
989       reg.L3DataPartialWriteMergingEnable = true;
990       reg.ColorZPartialWriteMergingEnable = true;
991       reg.URBPartialWriteMergingEnable = true;
992       reg.TCDisable = true;
993    }
994    iris_emit_lri(batch, TCCNTLREG, reg_val);
995 
996    /* Hardware specification recommends disabling repacking for the
997     * compatibility with decompression mechanism in display controller.
998     */
999    if (devinfo->disable_ccs_repack) {
1000       iris_pack_state(GENX(CACHE_MODE_0), &reg_val, reg) {
1001          reg.DisableRepackingforCompression = true;
1002          reg.DisableRepackingforCompressionMask = true;
1003       }
1004       iris_emit_lri(batch, CACHE_MODE_0, reg_val);
1005    }
1006 
1007    iris_upload_slice_hashing_state(batch);
1008 #endif
1009 
1010    /* 3DSTATE_DRAWING_RECTANGLE is non-pipelined, so we want to avoid
1011     * changing it dynamically.  We set it to the maximum size here, and
1012     * instead include the render target dimensions in the viewport, so
1013     * viewport extents clipping takes care of pruning stray geometry.
1014     */
1015    iris_emit_cmd(batch, GENX(3DSTATE_DRAWING_RECTANGLE), rect) {
1016       rect.ClippedDrawingRectangleXMax = UINT16_MAX;
1017       rect.ClippedDrawingRectangleYMax = UINT16_MAX;
1018    }
1019 
1020    /* Set the initial MSAA sample positions. */
1021    iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_PATTERN), pat) {
1022       GEN_SAMPLE_POS_1X(pat._1xSample);
1023       GEN_SAMPLE_POS_2X(pat._2xSample);
1024       GEN_SAMPLE_POS_4X(pat._4xSample);
1025       GEN_SAMPLE_POS_8X(pat._8xSample);
1026 #if GEN_GEN >= 9
1027       GEN_SAMPLE_POS_16X(pat._16xSample);
1028 #endif
1029    }
1030 
1031    /* Use the legacy AA line coverage computation. */
1032    iris_emit_cmd(batch, GENX(3DSTATE_AA_LINE_PARAMETERS), foo);
1033 
1034    /* Disable chromakeying (it's for media) */
1035    iris_emit_cmd(batch, GENX(3DSTATE_WM_CHROMAKEY), foo);
1036 
1037    /* We want regular rendering, not special HiZ operations. */
1038    iris_emit_cmd(batch, GENX(3DSTATE_WM_HZ_OP), foo);
1039 
1040    /* No polygon stippling offsets are necessary. */
1041    /* TODO: may need to set an offset for origin-UL framebuffers */
1042    iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_OFFSET), foo);
1043 
1044    iris_alloc_push_constants(batch);
1045 
1046 
1047 #if GEN_GEN >= 12
1048    init_aux_map_state(batch);
1049 #endif
1050 
1051    iris_batch_sync_region_end(batch);
1052 }
1053 
1054 static void
iris_init_compute_context(struct iris_batch * batch)1055 iris_init_compute_context(struct iris_batch *batch)
1056 {
1057    UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
1058 
1059    iris_batch_sync_region_start(batch);
1060 
1061    /* GEN:BUG:1607854226:
1062     *
1063     *  Start with pipeline in 3D mode to set the STATE_BASE_ADDRESS.
1064     */
1065 #if GEN_GEN == 12
1066    emit_pipeline_select(batch, _3D);
1067 #else
1068    emit_pipeline_select(batch, GPGPU);
1069 #endif
1070 
1071    iris_emit_l3_config(batch, batch->screen->l3_config_cs);
1072 
1073    init_state_base_address(batch);
1074 
1075    iris_init_common_context(batch);
1076 
1077 #if GEN_GEN == 12
1078    emit_pipeline_select(batch, GPGPU);
1079 #endif
1080 
1081 #if GEN_GEN == 9
1082    if (devinfo->is_geminilake)
1083       init_glk_barrier_mode(batch, GLK_BARRIER_MODE_GPGPU);
1084 #endif
1085 
1086 #if GEN_GEN >= 12
1087    init_aux_map_state(batch);
1088 #endif
1089 
1090    iris_batch_sync_region_end(batch);
1091 }
1092 
1093 struct iris_vertex_buffer_state {
1094    /** The VERTEX_BUFFER_STATE hardware structure. */
1095    uint32_t state[GENX(VERTEX_BUFFER_STATE_length)];
1096 
1097    /** The resource to source vertex data from. */
1098    struct pipe_resource *resource;
1099 
1100    int offset;
1101 };
1102 
1103 struct iris_depth_buffer_state {
1104    /* Depth/HiZ/Stencil related hardware packets. */
1105    uint32_t packets[GENX(3DSTATE_DEPTH_BUFFER_length) +
1106                     GENX(3DSTATE_STENCIL_BUFFER_length) +
1107                     GENX(3DSTATE_HIER_DEPTH_BUFFER_length) +
1108                     GENX(3DSTATE_CLEAR_PARAMS_length) +
1109                     GENX(MI_LOAD_REGISTER_IMM_length) * 2];
1110 };
1111 
1112 /**
1113  * Generation-specific context state (ice->state.genx->...).
1114  *
1115  * Most state can go in iris_context directly, but these encode hardware
1116  * packets which vary by generation.
1117  */
1118 struct iris_genx_state {
1119    struct iris_vertex_buffer_state vertex_buffers[33];
1120    uint32_t last_index_buffer[GENX(3DSTATE_INDEX_BUFFER_length)];
1121 
1122    struct iris_depth_buffer_state depth_buffer;
1123 
1124    uint32_t so_buffers[4 * GENX(3DSTATE_SO_BUFFER_length)];
1125 
1126 #if GEN_GEN == 8
1127    bool pma_fix_enabled;
1128 #endif
1129 
1130 #if GEN_GEN == 9
1131    /* Is object level preemption enabled? */
1132    bool object_preemption;
1133 #endif
1134 
1135    struct {
1136 #if GEN_GEN == 8
1137       struct brw_image_param image_param[PIPE_MAX_SHADER_IMAGES];
1138 #endif
1139    } shaders[MESA_SHADER_STAGES];
1140 };
1141 
1142 /**
1143  * The pipe->set_blend_color() driver hook.
1144  *
1145  * This corresponds to our COLOR_CALC_STATE.
1146  */
1147 static void
iris_set_blend_color(struct pipe_context * ctx,const struct pipe_blend_color * state)1148 iris_set_blend_color(struct pipe_context *ctx,
1149                      const struct pipe_blend_color *state)
1150 {
1151    struct iris_context *ice = (struct iris_context *) ctx;
1152 
1153    /* Our COLOR_CALC_STATE is exactly pipe_blend_color, so just memcpy */
1154    memcpy(&ice->state.blend_color, state, sizeof(struct pipe_blend_color));
1155    ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1156 }
1157 
1158 /**
1159  * Gallium CSO for blend state (see pipe_blend_state).
1160  */
1161 struct iris_blend_state {
1162    /** Partial 3DSTATE_PS_BLEND */
1163    uint32_t ps_blend[GENX(3DSTATE_PS_BLEND_length)];
1164 
1165    /** Partial BLEND_STATE */
1166    uint32_t blend_state[GENX(BLEND_STATE_length) +
1167                         BRW_MAX_DRAW_BUFFERS * GENX(BLEND_STATE_ENTRY_length)];
1168 
1169    bool alpha_to_coverage; /* for shader key */
1170 
1171    /** Bitfield of whether blending is enabled for RT[i] - for aux resolves */
1172    uint8_t blend_enables;
1173 
1174    /** Bitfield of whether color writes are enabled for RT[i] */
1175    uint8_t color_write_enables;
1176 
1177    /** Does RT[0] use dual color blending? */
1178    bool dual_color_blending;
1179 };
1180 
1181 static enum pipe_blendfactor
fix_blendfactor(enum pipe_blendfactor f,bool alpha_to_one)1182 fix_blendfactor(enum pipe_blendfactor f, bool alpha_to_one)
1183 {
1184    if (alpha_to_one) {
1185       if (f == PIPE_BLENDFACTOR_SRC1_ALPHA)
1186          return PIPE_BLENDFACTOR_ONE;
1187 
1188       if (f == PIPE_BLENDFACTOR_INV_SRC1_ALPHA)
1189          return PIPE_BLENDFACTOR_ZERO;
1190    }
1191 
1192    return f;
1193 }
1194 
1195 /**
1196  * The pipe->create_blend_state() driver hook.
1197  *
1198  * Translates a pipe_blend_state into iris_blend_state.
1199  */
1200 static void *
iris_create_blend_state(struct pipe_context * ctx,const struct pipe_blend_state * state)1201 iris_create_blend_state(struct pipe_context *ctx,
1202                         const struct pipe_blend_state *state)
1203 {
1204    struct iris_blend_state *cso = malloc(sizeof(struct iris_blend_state));
1205    uint32_t *blend_entry = cso->blend_state + GENX(BLEND_STATE_length);
1206 
1207    cso->blend_enables = 0;
1208    cso->color_write_enables = 0;
1209    STATIC_ASSERT(BRW_MAX_DRAW_BUFFERS <= 8);
1210 
1211    cso->alpha_to_coverage = state->alpha_to_coverage;
1212 
1213    bool indep_alpha_blend = false;
1214 
1215    for (int i = 0; i < BRW_MAX_DRAW_BUFFERS; i++) {
1216       const struct pipe_rt_blend_state *rt =
1217          &state->rt[state->independent_blend_enable ? i : 0];
1218 
1219       enum pipe_blendfactor src_rgb =
1220          fix_blendfactor(rt->rgb_src_factor, state->alpha_to_one);
1221       enum pipe_blendfactor src_alpha =
1222          fix_blendfactor(rt->alpha_src_factor, state->alpha_to_one);
1223       enum pipe_blendfactor dst_rgb =
1224          fix_blendfactor(rt->rgb_dst_factor, state->alpha_to_one);
1225       enum pipe_blendfactor dst_alpha =
1226          fix_blendfactor(rt->alpha_dst_factor, state->alpha_to_one);
1227 
1228       if (rt->rgb_func != rt->alpha_func ||
1229           src_rgb != src_alpha || dst_rgb != dst_alpha)
1230          indep_alpha_blend = true;
1231 
1232       if (rt->blend_enable)
1233          cso->blend_enables |= 1u << i;
1234 
1235       if (rt->colormask)
1236          cso->color_write_enables |= 1u << i;
1237 
1238       iris_pack_state(GENX(BLEND_STATE_ENTRY), blend_entry, be) {
1239          be.LogicOpEnable = state->logicop_enable;
1240          be.LogicOpFunction = state->logicop_func;
1241 
1242          be.PreBlendSourceOnlyClampEnable = false;
1243          be.ColorClampRange = COLORCLAMP_RTFORMAT;
1244          be.PreBlendColorClampEnable = true;
1245          be.PostBlendColorClampEnable = true;
1246 
1247          be.ColorBufferBlendEnable = rt->blend_enable;
1248 
1249          be.ColorBlendFunction          = rt->rgb_func;
1250          be.AlphaBlendFunction          = rt->alpha_func;
1251          be.SourceBlendFactor           = src_rgb;
1252          be.SourceAlphaBlendFactor      = src_alpha;
1253          be.DestinationBlendFactor      = dst_rgb;
1254          be.DestinationAlphaBlendFactor = dst_alpha;
1255 
1256          be.WriteDisableRed   = !(rt->colormask & PIPE_MASK_R);
1257          be.WriteDisableGreen = !(rt->colormask & PIPE_MASK_G);
1258          be.WriteDisableBlue  = !(rt->colormask & PIPE_MASK_B);
1259          be.WriteDisableAlpha = !(rt->colormask & PIPE_MASK_A);
1260       }
1261       blend_entry += GENX(BLEND_STATE_ENTRY_length);
1262    }
1263 
1264    iris_pack_command(GENX(3DSTATE_PS_BLEND), cso->ps_blend, pb) {
1265       /* pb.HasWriteableRT is filled in at draw time.
1266        * pb.AlphaTestEnable is filled in at draw time.
1267        *
1268        * pb.ColorBufferBlendEnable is filled in at draw time so we can avoid
1269        * setting it when dual color blending without an appropriate shader.
1270        */
1271 
1272       pb.AlphaToCoverageEnable = state->alpha_to_coverage;
1273       pb.IndependentAlphaBlendEnable = indep_alpha_blend;
1274 
1275       pb.SourceBlendFactor =
1276          fix_blendfactor(state->rt[0].rgb_src_factor, state->alpha_to_one);
1277       pb.SourceAlphaBlendFactor =
1278          fix_blendfactor(state->rt[0].alpha_src_factor, state->alpha_to_one);
1279       pb.DestinationBlendFactor =
1280          fix_blendfactor(state->rt[0].rgb_dst_factor, state->alpha_to_one);
1281       pb.DestinationAlphaBlendFactor =
1282          fix_blendfactor(state->rt[0].alpha_dst_factor, state->alpha_to_one);
1283    }
1284 
1285    iris_pack_state(GENX(BLEND_STATE), cso->blend_state, bs) {
1286       bs.AlphaToCoverageEnable = state->alpha_to_coverage;
1287       bs.IndependentAlphaBlendEnable = indep_alpha_blend;
1288       bs.AlphaToOneEnable = state->alpha_to_one;
1289       bs.AlphaToCoverageDitherEnable = state->alpha_to_coverage;
1290       bs.ColorDitherEnable = state->dither;
1291       /* bl.AlphaTestEnable and bs.AlphaTestFunction are filled in later. */
1292    }
1293 
1294    cso->dual_color_blending = util_blend_state_is_dual(state, 0);
1295 
1296    return cso;
1297 }
1298 
1299 /**
1300  * The pipe->bind_blend_state() driver hook.
1301  *
1302  * Bind a blending CSO and flag related dirty bits.
1303  */
1304 static void
iris_bind_blend_state(struct pipe_context * ctx,void * state)1305 iris_bind_blend_state(struct pipe_context *ctx, void *state)
1306 {
1307    struct iris_context *ice = (struct iris_context *) ctx;
1308    struct iris_blend_state *cso = state;
1309 
1310    ice->state.cso_blend = cso;
1311 
1312    ice->state.dirty |= IRIS_DIRTY_PS_BLEND;
1313    ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1314    ice->state.stage_dirty |= ice->state.stage_dirty_for_nos[IRIS_NOS_BLEND];
1315 
1316    if (GEN_GEN == 8)
1317       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1318 }
1319 
1320 /**
1321  * Return true if the FS writes to any color outputs which are not disabled
1322  * via color masking.
1323  */
1324 static bool
has_writeable_rt(const struct iris_blend_state * cso_blend,const struct shader_info * fs_info)1325 has_writeable_rt(const struct iris_blend_state *cso_blend,
1326                  const struct shader_info *fs_info)
1327 {
1328    if (!fs_info)
1329       return false;
1330 
1331    unsigned rt_outputs = fs_info->outputs_written >> FRAG_RESULT_DATA0;
1332 
1333    if (fs_info->outputs_written & BITFIELD64_BIT(FRAG_RESULT_COLOR))
1334       rt_outputs = (1 << BRW_MAX_DRAW_BUFFERS) - 1;
1335 
1336    return cso_blend->color_write_enables & rt_outputs;
1337 }
1338 
1339 /**
1340  * Gallium CSO for depth, stencil, and alpha testing state.
1341  */
1342 struct iris_depth_stencil_alpha_state {
1343    /** Partial 3DSTATE_WM_DEPTH_STENCIL. */
1344    uint32_t wmds[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
1345 
1346 #if GEN_GEN >= 12
1347    uint32_t depth_bounds[GENX(3DSTATE_DEPTH_BOUNDS_length)];
1348 #endif
1349 
1350    /** Outbound to BLEND_STATE, 3DSTATE_PS_BLEND, COLOR_CALC_STATE. */
1351    struct pipe_alpha_state alpha;
1352 
1353    /** Outbound to resolve and cache set tracking. */
1354    bool depth_writes_enabled;
1355    bool stencil_writes_enabled;
1356 
1357    /** Outbound to Gen8-9 PMA stall equations */
1358    bool depth_test_enabled;
1359 };
1360 
1361 /**
1362  * The pipe->create_depth_stencil_alpha_state() driver hook.
1363  *
1364  * We encode most of 3DSTATE_WM_DEPTH_STENCIL, and just save off the alpha
1365  * testing state since we need pieces of it in a variety of places.
1366  */
1367 static void *
iris_create_zsa_state(struct pipe_context * ctx,const struct pipe_depth_stencil_alpha_state * state)1368 iris_create_zsa_state(struct pipe_context *ctx,
1369                       const struct pipe_depth_stencil_alpha_state *state)
1370 {
1371    struct iris_depth_stencil_alpha_state *cso =
1372       malloc(sizeof(struct iris_depth_stencil_alpha_state));
1373 
1374    bool two_sided_stencil = state->stencil[1].enabled;
1375 
1376    cso->alpha = state->alpha;
1377    cso->depth_writes_enabled = state->depth.writemask;
1378    cso->depth_test_enabled = state->depth.enabled;
1379    cso->stencil_writes_enabled =
1380       state->stencil[0].writemask != 0 ||
1381       (two_sided_stencil && state->stencil[1].writemask != 0);
1382 
1383    /* gallium frontends need to optimize away EQUAL writes for us. */
1384    assert(!(state->depth.func == PIPE_FUNC_EQUAL && state->depth.writemask));
1385 
1386    iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), cso->wmds, wmds) {
1387       wmds.StencilFailOp = state->stencil[0].fail_op;
1388       wmds.StencilPassDepthFailOp = state->stencil[0].zfail_op;
1389       wmds.StencilPassDepthPassOp = state->stencil[0].zpass_op;
1390       wmds.StencilTestFunction =
1391          translate_compare_func(state->stencil[0].func);
1392       wmds.BackfaceStencilFailOp = state->stencil[1].fail_op;
1393       wmds.BackfaceStencilPassDepthFailOp = state->stencil[1].zfail_op;
1394       wmds.BackfaceStencilPassDepthPassOp = state->stencil[1].zpass_op;
1395       wmds.BackfaceStencilTestFunction =
1396          translate_compare_func(state->stencil[1].func);
1397       wmds.DepthTestFunction = translate_compare_func(state->depth.func);
1398       wmds.DoubleSidedStencilEnable = two_sided_stencil;
1399       wmds.StencilTestEnable = state->stencil[0].enabled;
1400       wmds.StencilBufferWriteEnable =
1401          state->stencil[0].writemask != 0 ||
1402          (two_sided_stencil && state->stencil[1].writemask != 0);
1403       wmds.DepthTestEnable = state->depth.enabled;
1404       wmds.DepthBufferWriteEnable = state->depth.writemask;
1405       wmds.StencilTestMask = state->stencil[0].valuemask;
1406       wmds.StencilWriteMask = state->stencil[0].writemask;
1407       wmds.BackfaceStencilTestMask = state->stencil[1].valuemask;
1408       wmds.BackfaceStencilWriteMask = state->stencil[1].writemask;
1409       /* wmds.[Backface]StencilReferenceValue are merged later */
1410 #if GEN_GEN >= 12
1411       wmds.StencilReferenceValueModifyDisable = true;
1412 #endif
1413    }
1414 
1415 #if GEN_GEN >= 12
1416    iris_pack_command(GENX(3DSTATE_DEPTH_BOUNDS), cso->depth_bounds, depth_bounds) {
1417       depth_bounds.DepthBoundsTestValueModifyDisable = false;
1418       depth_bounds.DepthBoundsTestEnableModifyDisable = false;
1419       depth_bounds.DepthBoundsTestEnable = state->depth.bounds_test;
1420       depth_bounds.DepthBoundsTestMinValue = state->depth.bounds_min;
1421       depth_bounds.DepthBoundsTestMaxValue = state->depth.bounds_max;
1422    }
1423 #endif
1424 
1425    return cso;
1426 }
1427 
1428 /**
1429  * The pipe->bind_depth_stencil_alpha_state() driver hook.
1430  *
1431  * Bind a depth/stencil/alpha CSO and flag related dirty bits.
1432  */
1433 static void
iris_bind_zsa_state(struct pipe_context * ctx,void * state)1434 iris_bind_zsa_state(struct pipe_context *ctx, void *state)
1435 {
1436    struct iris_context *ice = (struct iris_context *) ctx;
1437    struct iris_depth_stencil_alpha_state *old_cso = ice->state.cso_zsa;
1438    struct iris_depth_stencil_alpha_state *new_cso = state;
1439 
1440    if (new_cso) {
1441       if (cso_changed(alpha.ref_value))
1442          ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
1443 
1444       if (cso_changed(alpha.enabled))
1445          ice->state.dirty |= IRIS_DIRTY_PS_BLEND | IRIS_DIRTY_BLEND_STATE;
1446 
1447       if (cso_changed(alpha.func))
1448          ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
1449 
1450       if (cso_changed(depth_writes_enabled))
1451          ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
1452 
1453       ice->state.depth_writes_enabled = new_cso->depth_writes_enabled;
1454       ice->state.stencil_writes_enabled = new_cso->stencil_writes_enabled;
1455 
1456 #if GEN_GEN >= 12
1457       if (cso_changed(depth_bounds))
1458          ice->state.dirty |= IRIS_DIRTY_DEPTH_BOUNDS;
1459 #endif
1460    }
1461 
1462    ice->state.cso_zsa = new_cso;
1463    ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1464    ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
1465    ice->state.stage_dirty |=
1466       ice->state.stage_dirty_for_nos[IRIS_NOS_DEPTH_STENCIL_ALPHA];
1467 
1468    if (GEN_GEN == 8)
1469       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
1470 }
1471 
1472 #if GEN_GEN == 8
1473 static bool
want_pma_fix(struct iris_context * ice)1474 want_pma_fix(struct iris_context *ice)
1475 {
1476    UNUSED struct iris_screen *screen = (void *) ice->ctx.screen;
1477    UNUSED const struct gen_device_info *devinfo = &screen->devinfo;
1478    const struct brw_wm_prog_data *wm_prog_data = (void *)
1479       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
1480    const struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
1481    const struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
1482    const struct iris_blend_state *cso_blend = ice->state.cso_blend;
1483 
1484    /* In very specific combinations of state, we can instruct Gen8-9 hardware
1485     * to avoid stalling at the pixel mask array.  The state equations are
1486     * documented in these places:
1487     *
1488     * - Gen8 Depth PMA Fix:   CACHE_MODE_1::NP_PMA_FIX_ENABLE
1489     * - Gen9 Stencil PMA Fix: CACHE_MODE_0::STC PMA Optimization Enable
1490     *
1491     * Both equations share some common elements:
1492     *
1493     *    no_hiz_op =
1494     *       !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1495     *         3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1496     *         3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1497     *         3DSTATE_WM_HZ_OP::StencilBufferClear) &&
1498     *
1499     *    killpixels =
1500     *       3DSTATE_WM::ForceKillPix != ForceOff &&
1501     *       (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1502     *        3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1503     *        3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1504     *        3DSTATE_PS_BLEND::AlphaTestEnable ||
1505     *        3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1506     *
1507     *    (Technically the stencil PMA treats ForceKillPix differently,
1508     *     but I think this is a documentation oversight, and we don't
1509     *     ever use it in this way, so it doesn't matter).
1510     *
1511     *    common_pma_fix =
1512     *       3DSTATE_WM::ForceThreadDispatch != 1 &&
1513     *       3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0 &&
1514     *       3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1515     *       3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1516     *       3DSTATE_WM::EDSC_Mode != EDSC_PREPS &&
1517     *       3DSTATE_PS_EXTRA::PixelShaderValid &&
1518     *       no_hiz_op
1519     *
1520     * These are always true:
1521     *
1522     *    3DSTATE_RASTER::ForceSampleCount == NUMRASTSAMPLES_0
1523     *    3DSTATE_PS_EXTRA::PixelShaderValid
1524     *
1525     * Also, we never use the normal drawing path for HiZ ops; these are true:
1526     *
1527     *    !(3DSTATE_WM_HZ_OP::DepthBufferClear ||
1528     *      3DSTATE_WM_HZ_OP::DepthBufferResolve ||
1529     *      3DSTATE_WM_HZ_OP::Hierarchical Depth Buffer Resolve Enable ||
1530     *      3DSTATE_WM_HZ_OP::StencilBufferClear)
1531     *
1532     * This happens sometimes:
1533     *
1534     *    3DSTATE_WM::ForceThreadDispatch != 1
1535     *
1536     * However, we choose to ignore it as it either agrees with the signal
1537     * (dispatch was already enabled, so nothing out of the ordinary), or
1538     * there are no framebuffer attachments (so no depth or HiZ anyway,
1539     * meaning the PMA signal will already be disabled).
1540     */
1541 
1542    if (!cso_fb->zsbuf)
1543       return false;
1544 
1545    struct iris_resource *zres, *sres;
1546    iris_get_depth_stencil_resources(cso_fb->zsbuf->texture, &zres, &sres);
1547 
1548    /* 3DSTATE_DEPTH_BUFFER::SURFACE_TYPE != NULL &&
1549     * 3DSTATE_DEPTH_BUFFER::HIZ Enable &&
1550     */
1551    if (!zres || !iris_resource_level_has_hiz(zres, cso_fb->zsbuf->u.tex.level))
1552       return false;
1553 
1554    /* 3DSTATE_WM::EDSC_Mode != EDSC_PREPS */
1555    if (wm_prog_data->early_fragment_tests)
1556       return false;
1557 
1558    /* 3DSTATE_WM::ForceKillPix != ForceOff &&
1559     * (3DSTATE_PS_EXTRA::PixelShaderKillsPixels ||
1560     *  3DSTATE_PS_EXTRA::oMask Present to RenderTarget ||
1561     *  3DSTATE_PS_BLEND::AlphaToCoverageEnable ||
1562     *  3DSTATE_PS_BLEND::AlphaTestEnable ||
1563     *  3DSTATE_WM_CHROMAKEY::ChromaKeyKillEnable)
1564     */
1565    bool killpixels = wm_prog_data->uses_kill || wm_prog_data->uses_omask ||
1566                      cso_blend->alpha_to_coverage || cso_zsa->alpha.enabled;
1567 
1568    /* The Gen8 depth PMA equation becomes:
1569     *
1570     *    depth_writes =
1571     *       3DSTATE_WM_DEPTH_STENCIL::DepthWriteEnable &&
1572     *       3DSTATE_DEPTH_BUFFER::DEPTH_WRITE_ENABLE
1573     *
1574     *    stencil_writes =
1575     *       3DSTATE_WM_DEPTH_STENCIL::Stencil Buffer Write Enable &&
1576     *       3DSTATE_DEPTH_BUFFER::STENCIL_WRITE_ENABLE &&
1577     *       3DSTATE_STENCIL_BUFFER::STENCIL_BUFFER_ENABLE
1578     *
1579     *    Z_PMA_OPT =
1580     *       common_pma_fix &&
1581     *       3DSTATE_WM_DEPTH_STENCIL::DepthTestEnable &&
1582     *       ((killpixels && (depth_writes || stencil_writes)) ||
1583     *        3DSTATE_PS_EXTRA::PixelShaderComputedDepthMode != PSCDEPTH_OFF)
1584     *
1585     */
1586    if (!cso_zsa->depth_test_enabled)
1587       return false;
1588 
1589    return wm_prog_data->computed_depth_mode != PSCDEPTH_OFF ||
1590           (killpixels && (cso_zsa->depth_writes_enabled ||
1591                           (sres && cso_zsa->stencil_writes_enabled)));
1592 }
1593 #endif
1594 
1595 void
genX(update_pma_fix)1596 genX(update_pma_fix)(struct iris_context *ice,
1597                      struct iris_batch *batch,
1598                      bool enable)
1599 {
1600 #if GEN_GEN == 8
1601    struct iris_genx_state *genx = ice->state.genx;
1602 
1603    if (genx->pma_fix_enabled == enable)
1604       return;
1605 
1606    genx->pma_fix_enabled = enable;
1607 
1608    /* According to the Broadwell PIPE_CONTROL documentation, software should
1609     * emit a PIPE_CONTROL with the CS Stall and Depth Cache Flush bits set
1610     * prior to the LRI.  If stencil buffer writes are enabled, then a Render        * Cache Flush is also necessary.
1611     *
1612     * The Gen9 docs say to use a depth stall rather than a command streamer
1613     * stall.  However, the hardware seems to violently disagree.  A full
1614     * command streamer stall seems to be needed in both cases.
1615     */
1616    iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1617                                 PIPE_CONTROL_CS_STALL |
1618                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1619                                 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1620 
1621    uint32_t reg_val;
1622    iris_pack_state(GENX(CACHE_MODE_1), &reg_val, reg) {
1623       reg.NPPMAFixEnable = enable;
1624       reg.NPEarlyZFailsDisable = enable;
1625       reg.NPPMAFixEnableMask = true;
1626       reg.NPEarlyZFailsDisableMask = true;
1627    }
1628    iris_emit_lri(batch, CACHE_MODE_1, reg_val);
1629 
1630    /* After the LRI, a PIPE_CONTROL with both the Depth Stall and Depth Cache
1631     * Flush bits is often necessary.  We do it regardless because it's easier.
1632     * The render cache flush is also necessary if stencil writes are enabled.
1633     *
1634     * Again, the Gen9 docs give a different set of flushes but the Broadwell
1635     * flushes seem to work just as well.
1636     */
1637    iris_emit_pipe_control_flush(batch, "PMA fix change (1/2)",
1638                                 PIPE_CONTROL_DEPTH_STALL |
1639                                 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
1640                                 PIPE_CONTROL_RENDER_TARGET_FLUSH);
1641 #endif
1642 }
1643 
1644 /**
1645  * Gallium CSO for rasterizer state.
1646  */
1647 struct iris_rasterizer_state {
1648    uint32_t sf[GENX(3DSTATE_SF_length)];
1649    uint32_t clip[GENX(3DSTATE_CLIP_length)];
1650    uint32_t raster[GENX(3DSTATE_RASTER_length)];
1651    uint32_t wm[GENX(3DSTATE_WM_length)];
1652    uint32_t line_stipple[GENX(3DSTATE_LINE_STIPPLE_length)];
1653 
1654    uint8_t num_clip_plane_consts;
1655    bool clip_halfz; /* for CC_VIEWPORT */
1656    bool depth_clip_near; /* for CC_VIEWPORT */
1657    bool depth_clip_far; /* for CC_VIEWPORT */
1658    bool flatshade; /* for shader state */
1659    bool flatshade_first; /* for stream output */
1660    bool clamp_fragment_color; /* for shader state */
1661    bool light_twoside; /* for shader state */
1662    bool rasterizer_discard; /* for 3DSTATE_STREAMOUT and 3DSTATE_CLIP */
1663    bool half_pixel_center; /* for 3DSTATE_MULTISAMPLE */
1664    bool line_stipple_enable;
1665    bool poly_stipple_enable;
1666    bool multisample;
1667    bool force_persample_interp;
1668    bool conservative_rasterization;
1669    bool fill_mode_point;
1670    bool fill_mode_line;
1671    bool fill_mode_point_or_line;
1672    enum pipe_sprite_coord_mode sprite_coord_mode; /* PIPE_SPRITE_* */
1673    uint16_t sprite_coord_enable;
1674 };
1675 
1676 static float
get_line_width(const struct pipe_rasterizer_state * state)1677 get_line_width(const struct pipe_rasterizer_state *state)
1678 {
1679    float line_width = state->line_width;
1680 
1681    /* From the OpenGL 4.4 spec:
1682     *
1683     * "The actual width of non-antialiased lines is determined by rounding
1684     *  the supplied width to the nearest integer, then clamping it to the
1685     *  implementation-dependent maximum non-antialiased line width."
1686     */
1687    if (!state->multisample && !state->line_smooth)
1688       line_width = roundf(state->line_width);
1689 
1690    if (!state->multisample && state->line_smooth && line_width < 1.5f) {
1691       /* For 1 pixel line thickness or less, the general anti-aliasing
1692        * algorithm gives up, and a garbage line is generated.  Setting a
1693        * Line Width of 0.0 specifies the rasterization of the "thinnest"
1694        * (one-pixel-wide), non-antialiased lines.
1695        *
1696        * Lines rendered with zero Line Width are rasterized using the
1697        * "Grid Intersection Quantization" rules as specified by the
1698        * "Zero-Width (Cosmetic) Line Rasterization" section of the docs.
1699        */
1700       line_width = 0.0f;
1701    }
1702 
1703    return line_width;
1704 }
1705 
1706 /**
1707  * The pipe->create_rasterizer_state() driver hook.
1708  */
1709 static void *
iris_create_rasterizer_state(struct pipe_context * ctx,const struct pipe_rasterizer_state * state)1710 iris_create_rasterizer_state(struct pipe_context *ctx,
1711                              const struct pipe_rasterizer_state *state)
1712 {
1713    struct iris_rasterizer_state *cso =
1714       malloc(sizeof(struct iris_rasterizer_state));
1715 
1716    cso->multisample = state->multisample;
1717    cso->force_persample_interp = state->force_persample_interp;
1718    cso->clip_halfz = state->clip_halfz;
1719    cso->depth_clip_near = state->depth_clip_near;
1720    cso->depth_clip_far = state->depth_clip_far;
1721    cso->flatshade = state->flatshade;
1722    cso->flatshade_first = state->flatshade_first;
1723    cso->clamp_fragment_color = state->clamp_fragment_color;
1724    cso->light_twoside = state->light_twoside;
1725    cso->rasterizer_discard = state->rasterizer_discard;
1726    cso->half_pixel_center = state->half_pixel_center;
1727    cso->sprite_coord_mode = state->sprite_coord_mode;
1728    cso->sprite_coord_enable = state->sprite_coord_enable;
1729    cso->line_stipple_enable = state->line_stipple_enable;
1730    cso->poly_stipple_enable = state->poly_stipple_enable;
1731    cso->conservative_rasterization =
1732       state->conservative_raster_mode == PIPE_CONSERVATIVE_RASTER_POST_SNAP;
1733 
1734    cso->fill_mode_point =
1735       state->fill_front == PIPE_POLYGON_MODE_POINT ||
1736       state->fill_back == PIPE_POLYGON_MODE_POINT;
1737    cso->fill_mode_line =
1738       state->fill_front == PIPE_POLYGON_MODE_LINE ||
1739       state->fill_back == PIPE_POLYGON_MODE_LINE;
1740    cso->fill_mode_point_or_line =
1741       cso->fill_mode_point ||
1742       cso->fill_mode_line;
1743 
1744    if (state->clip_plane_enable != 0)
1745       cso->num_clip_plane_consts = util_logbase2(state->clip_plane_enable) + 1;
1746    else
1747       cso->num_clip_plane_consts = 0;
1748 
1749    float line_width = get_line_width(state);
1750 
1751    iris_pack_command(GENX(3DSTATE_SF), cso->sf, sf) {
1752       sf.StatisticsEnable = true;
1753       sf.AALineDistanceMode = AALINEDISTANCE_TRUE;
1754       sf.LineEndCapAntialiasingRegionWidth =
1755          state->line_smooth ? _10pixels : _05pixels;
1756       sf.LastPixelEnable = state->line_last_pixel;
1757       sf.LineWidth = line_width;
1758       sf.SmoothPointEnable = (state->point_smooth || state->multisample) &&
1759                              !state->point_quad_rasterization;
1760       sf.PointWidthSource = state->point_size_per_vertex ? Vertex : State;
1761       sf.PointWidth = state->point_size;
1762 
1763       if (state->flatshade_first) {
1764          sf.TriangleFanProvokingVertexSelect = 1;
1765       } else {
1766          sf.TriangleStripListProvokingVertexSelect = 2;
1767          sf.TriangleFanProvokingVertexSelect = 2;
1768          sf.LineStripListProvokingVertexSelect = 1;
1769       }
1770    }
1771 
1772    iris_pack_command(GENX(3DSTATE_RASTER), cso->raster, rr) {
1773       rr.FrontWinding = state->front_ccw ? CounterClockwise : Clockwise;
1774       rr.CullMode = translate_cull_mode(state->cull_face);
1775       rr.FrontFaceFillMode = translate_fill_mode(state->fill_front);
1776       rr.BackFaceFillMode = translate_fill_mode(state->fill_back);
1777       rr.DXMultisampleRasterizationEnable = state->multisample;
1778       rr.GlobalDepthOffsetEnableSolid = state->offset_tri;
1779       rr.GlobalDepthOffsetEnableWireframe = state->offset_line;
1780       rr.GlobalDepthOffsetEnablePoint = state->offset_point;
1781       rr.GlobalDepthOffsetConstant = state->offset_units * 2;
1782       rr.GlobalDepthOffsetScale = state->offset_scale;
1783       rr.GlobalDepthOffsetClamp = state->offset_clamp;
1784       rr.SmoothPointEnable = state->point_smooth;
1785       rr.AntialiasingEnable = state->line_smooth;
1786       rr.ScissorRectangleEnable = state->scissor;
1787 #if GEN_GEN >= 9
1788       rr.ViewportZNearClipTestEnable = state->depth_clip_near;
1789       rr.ViewportZFarClipTestEnable = state->depth_clip_far;
1790       rr.ConservativeRasterizationEnable =
1791          cso->conservative_rasterization;
1792 #else
1793       rr.ViewportZClipTestEnable = (state->depth_clip_near || state->depth_clip_far);
1794 #endif
1795    }
1796 
1797    iris_pack_command(GENX(3DSTATE_CLIP), cso->clip, cl) {
1798       /* cl.NonPerspectiveBarycentricEnable is filled in at draw time from
1799        * the FS program; cl.ForceZeroRTAIndexEnable is filled in from the FB.
1800        */
1801       cl.EarlyCullEnable = true;
1802       cl.UserClipDistanceClipTestEnableBitmask = state->clip_plane_enable;
1803       cl.ForceUserClipDistanceClipTestEnableBitmask = true;
1804       cl.APIMode = state->clip_halfz ? APIMODE_D3D : APIMODE_OGL;
1805       cl.GuardbandClipTestEnable = true;
1806       cl.ClipEnable = true;
1807       cl.MinimumPointWidth = 0.125;
1808       cl.MaximumPointWidth = 255.875;
1809 
1810       if (state->flatshade_first) {
1811          cl.TriangleFanProvokingVertexSelect = 1;
1812       } else {
1813          cl.TriangleStripListProvokingVertexSelect = 2;
1814          cl.TriangleFanProvokingVertexSelect = 2;
1815          cl.LineStripListProvokingVertexSelect = 1;
1816       }
1817    }
1818 
1819    iris_pack_command(GENX(3DSTATE_WM), cso->wm, wm) {
1820       /* wm.BarycentricInterpolationMode and wm.EarlyDepthStencilControl are
1821        * filled in at draw time from the FS program.
1822        */
1823       wm.LineAntialiasingRegionWidth = _10pixels;
1824       wm.LineEndCapAntialiasingRegionWidth = _05pixels;
1825       wm.PointRasterizationRule = RASTRULE_UPPER_RIGHT;
1826       wm.LineStippleEnable = state->line_stipple_enable;
1827       wm.PolygonStippleEnable = state->poly_stipple_enable;
1828    }
1829 
1830    /* Remap from 0..255 back to 1..256 */
1831    const unsigned line_stipple_factor = state->line_stipple_factor + 1;
1832 
1833    iris_pack_command(GENX(3DSTATE_LINE_STIPPLE), cso->line_stipple, line) {
1834       if (state->line_stipple_enable) {
1835          line.LineStipplePattern = state->line_stipple_pattern;
1836          line.LineStippleInverseRepeatCount = 1.0f / line_stipple_factor;
1837          line.LineStippleRepeatCount = line_stipple_factor;
1838       }
1839    }
1840 
1841    return cso;
1842 }
1843 
1844 /**
1845  * The pipe->bind_rasterizer_state() driver hook.
1846  *
1847  * Bind a rasterizer CSO and flag related dirty bits.
1848  */
1849 static void
iris_bind_rasterizer_state(struct pipe_context * ctx,void * state)1850 iris_bind_rasterizer_state(struct pipe_context *ctx, void *state)
1851 {
1852    struct iris_context *ice = (struct iris_context *) ctx;
1853    struct iris_rasterizer_state *old_cso = ice->state.cso_rast;
1854    struct iris_rasterizer_state *new_cso = state;
1855 
1856    if (new_cso) {
1857       /* Try to avoid re-emitting 3DSTATE_LINE_STIPPLE, it's non-pipelined */
1858       if (cso_changed_memcmp(line_stipple))
1859          ice->state.dirty |= IRIS_DIRTY_LINE_STIPPLE;
1860 
1861       if (cso_changed(half_pixel_center))
1862          ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
1863 
1864       if (cso_changed(line_stipple_enable) || cso_changed(poly_stipple_enable))
1865          ice->state.dirty |= IRIS_DIRTY_WM;
1866 
1867       if (cso_changed(rasterizer_discard))
1868          ice->state.dirty |= IRIS_DIRTY_STREAMOUT | IRIS_DIRTY_CLIP;
1869 
1870       if (cso_changed(flatshade_first))
1871          ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
1872 
1873       if (cso_changed(depth_clip_near) || cso_changed(depth_clip_far) ||
1874           cso_changed(clip_halfz))
1875          ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
1876 
1877       if (cso_changed(sprite_coord_enable) ||
1878           cso_changed(sprite_coord_mode) ||
1879           cso_changed(light_twoside))
1880          ice->state.dirty |= IRIS_DIRTY_SBE;
1881 
1882       if (cso_changed(conservative_rasterization))
1883          ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
1884    }
1885 
1886    ice->state.cso_rast = new_cso;
1887    ice->state.dirty |= IRIS_DIRTY_RASTER;
1888    ice->state.dirty |= IRIS_DIRTY_CLIP;
1889    ice->state.stage_dirty |=
1890       ice->state.stage_dirty_for_nos[IRIS_NOS_RASTERIZER];
1891 }
1892 
1893 /**
1894  * Return true if the given wrap mode requires the border color to exist.
1895  *
1896  * (We can skip uploading it if the sampler isn't going to use it.)
1897  */
1898 static bool
wrap_mode_needs_border_color(unsigned wrap_mode)1899 wrap_mode_needs_border_color(unsigned wrap_mode)
1900 {
1901    return wrap_mode == TCM_CLAMP_BORDER || wrap_mode == TCM_HALF_BORDER;
1902 }
1903 
1904 /**
1905  * Gallium CSO for sampler state.
1906  */
1907 struct iris_sampler_state {
1908    union pipe_color_union border_color;
1909    bool needs_border_color;
1910 
1911    uint32_t sampler_state[GENX(SAMPLER_STATE_length)];
1912 };
1913 
1914 /**
1915  * The pipe->create_sampler_state() driver hook.
1916  *
1917  * We fill out SAMPLER_STATE (except for the border color pointer), and
1918  * store that on the CPU.  It doesn't make sense to upload it to a GPU
1919  * buffer object yet, because 3DSTATE_SAMPLER_STATE_POINTERS requires
1920  * all bound sampler states to be in contiguous memor.
1921  */
1922 static void *
iris_create_sampler_state(struct pipe_context * ctx,const struct pipe_sampler_state * state)1923 iris_create_sampler_state(struct pipe_context *ctx,
1924                           const struct pipe_sampler_state *state)
1925 {
1926    struct iris_sampler_state *cso = CALLOC_STRUCT(iris_sampler_state);
1927 
1928    if (!cso)
1929       return NULL;
1930 
1931    STATIC_ASSERT(PIPE_TEX_FILTER_NEAREST == MAPFILTER_NEAREST);
1932    STATIC_ASSERT(PIPE_TEX_FILTER_LINEAR == MAPFILTER_LINEAR);
1933 
1934    unsigned wrap_s = translate_wrap(state->wrap_s);
1935    unsigned wrap_t = translate_wrap(state->wrap_t);
1936    unsigned wrap_r = translate_wrap(state->wrap_r);
1937 
1938    memcpy(&cso->border_color, &state->border_color, sizeof(cso->border_color));
1939 
1940    cso->needs_border_color = wrap_mode_needs_border_color(wrap_s) ||
1941                              wrap_mode_needs_border_color(wrap_t) ||
1942                              wrap_mode_needs_border_color(wrap_r);
1943 
1944    float min_lod = state->min_lod;
1945    unsigned mag_img_filter = state->mag_img_filter;
1946 
1947    // XXX: explain this code ported from ilo...I don't get it at all...
1948    if (state->min_mip_filter == PIPE_TEX_MIPFILTER_NONE &&
1949        state->min_lod > 0.0f) {
1950       min_lod = 0.0f;
1951       mag_img_filter = state->min_img_filter;
1952    }
1953 
1954    iris_pack_state(GENX(SAMPLER_STATE), cso->sampler_state, samp) {
1955       samp.TCXAddressControlMode = wrap_s;
1956       samp.TCYAddressControlMode = wrap_t;
1957       samp.TCZAddressControlMode = wrap_r;
1958       samp.CubeSurfaceControlMode = state->seamless_cube_map;
1959       samp.NonnormalizedCoordinateEnable = !state->normalized_coords;
1960       samp.MinModeFilter = state->min_img_filter;
1961       samp.MagModeFilter = mag_img_filter;
1962       samp.MipModeFilter = translate_mip_filter(state->min_mip_filter);
1963       samp.MaximumAnisotropy = RATIO21;
1964 
1965       if (state->max_anisotropy >= 2) {
1966          if (state->min_img_filter == PIPE_TEX_FILTER_LINEAR) {
1967             samp.MinModeFilter = MAPFILTER_ANISOTROPIC;
1968             samp.AnisotropicAlgorithm = EWAApproximation;
1969          }
1970 
1971          if (state->mag_img_filter == PIPE_TEX_FILTER_LINEAR)
1972             samp.MagModeFilter = MAPFILTER_ANISOTROPIC;
1973 
1974          samp.MaximumAnisotropy =
1975             MIN2((state->max_anisotropy - 2) / 2, RATIO161);
1976       }
1977 
1978       /* Set address rounding bits if not using nearest filtering. */
1979       if (state->min_img_filter != PIPE_TEX_FILTER_NEAREST) {
1980          samp.UAddressMinFilterRoundingEnable = true;
1981          samp.VAddressMinFilterRoundingEnable = true;
1982          samp.RAddressMinFilterRoundingEnable = true;
1983       }
1984 
1985       if (state->mag_img_filter != PIPE_TEX_FILTER_NEAREST) {
1986          samp.UAddressMagFilterRoundingEnable = true;
1987          samp.VAddressMagFilterRoundingEnable = true;
1988          samp.RAddressMagFilterRoundingEnable = true;
1989       }
1990 
1991       if (state->compare_mode == PIPE_TEX_COMPARE_R_TO_TEXTURE)
1992          samp.ShadowFunction = translate_shadow_func(state->compare_func);
1993 
1994       const float hw_max_lod = GEN_GEN >= 7 ? 14 : 13;
1995 
1996       samp.LODPreClampMode = CLAMP_MODE_OGL;
1997       samp.MinLOD = CLAMP(min_lod, 0, hw_max_lod);
1998       samp.MaxLOD = CLAMP(state->max_lod, 0, hw_max_lod);
1999       samp.TextureLODBias = CLAMP(state->lod_bias, -16, 15);
2000 
2001       /* .BorderColorPointer is filled in by iris_bind_sampler_states. */
2002    }
2003 
2004    return cso;
2005 }
2006 
2007 /**
2008  * The pipe->bind_sampler_states() driver hook.
2009  */
2010 static void
iris_bind_sampler_states(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start,unsigned count,void ** states)2011 iris_bind_sampler_states(struct pipe_context *ctx,
2012                          enum pipe_shader_type p_stage,
2013                          unsigned start, unsigned count,
2014                          void **states)
2015 {
2016    struct iris_context *ice = (struct iris_context *) ctx;
2017    gl_shader_stage stage = stage_from_pipe(p_stage);
2018    struct iris_shader_state *shs = &ice->state.shaders[stage];
2019 
2020    assert(start + count <= IRIS_MAX_TEXTURE_SAMPLERS);
2021 
2022    bool dirty = false;
2023 
2024    for (int i = 0; i < count; i++) {
2025       if (shs->samplers[start + i] != states[i]) {
2026          shs->samplers[start + i] = states[i];
2027          dirty = true;
2028       }
2029    }
2030 
2031    if (dirty)
2032       ice->state.stage_dirty |= IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage;
2033 }
2034 
2035 /**
2036  * Upload the sampler states into a contiguous area of GPU memory, for
2037  * for 3DSTATE_SAMPLER_STATE_POINTERS_*.
2038  *
2039  * Also fill out the border color state pointers.
2040  */
2041 static void
iris_upload_sampler_states(struct iris_context * ice,gl_shader_stage stage)2042 iris_upload_sampler_states(struct iris_context *ice, gl_shader_stage stage)
2043 {
2044    struct iris_shader_state *shs = &ice->state.shaders[stage];
2045    const struct shader_info *info = iris_get_shader_info(ice, stage);
2046 
2047    /* We assume gallium frontends will call pipe->bind_sampler_states()
2048     * if the program's number of textures changes.
2049     */
2050    unsigned count = info ? util_last_bit(info->textures_used) : 0;
2051 
2052    if (!count)
2053       return;
2054 
2055    /* Assemble the SAMPLER_STATEs into a contiguous table that lives
2056     * in the dynamic state memory zone, so we can point to it via the
2057     * 3DSTATE_SAMPLER_STATE_POINTERS_* commands.
2058     */
2059    unsigned size = count * 4 * GENX(SAMPLER_STATE_length);
2060    uint32_t *map =
2061       upload_state(ice->state.dynamic_uploader, &shs->sampler_table, size, 32);
2062    if (unlikely(!map))
2063       return;
2064 
2065    struct pipe_resource *res = shs->sampler_table.res;
2066    struct iris_bo *bo = iris_resource_bo(res);
2067 
2068    iris_record_state_size(ice->state.sizes,
2069                           bo->gtt_offset + shs->sampler_table.offset, size);
2070 
2071    shs->sampler_table.offset += iris_bo_offset_from_base_address(bo);
2072 
2073    /* Make sure all land in the same BO */
2074    iris_border_color_pool_reserve(ice, IRIS_MAX_TEXTURE_SAMPLERS);
2075 
2076    ice->state.need_border_colors &= ~(1 << stage);
2077 
2078    for (int i = 0; i < count; i++) {
2079       struct iris_sampler_state *state = shs->samplers[i];
2080       struct iris_sampler_view *tex = shs->textures[i];
2081 
2082       if (!state) {
2083          memset(map, 0, 4 * GENX(SAMPLER_STATE_length));
2084       } else if (!state->needs_border_color) {
2085          memcpy(map, state->sampler_state, 4 * GENX(SAMPLER_STATE_length));
2086       } else {
2087          ice->state.need_border_colors |= 1 << stage;
2088 
2089          /* We may need to swizzle the border color for format faking.
2090           * A/LA formats are faked as R/RG with 000R or R00G swizzles.
2091           * This means we need to move the border color's A channel into
2092           * the R or G channels so that those read swizzles will move it
2093           * back into A.
2094           */
2095          union pipe_color_union *color = &state->border_color;
2096          union pipe_color_union tmp;
2097          if (tex) {
2098             enum pipe_format internal_format = tex->res->internal_format;
2099 
2100             if (util_format_is_alpha(internal_format)) {
2101                unsigned char swz[4] = {
2102                   PIPE_SWIZZLE_W, PIPE_SWIZZLE_0,
2103                   PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2104                };
2105                util_format_apply_color_swizzle(&tmp, color, swz, true);
2106                color = &tmp;
2107             } else if (util_format_is_luminance_alpha(internal_format) &&
2108                        internal_format != PIPE_FORMAT_L8A8_SRGB) {
2109                unsigned char swz[4] = {
2110                   PIPE_SWIZZLE_X, PIPE_SWIZZLE_W,
2111                   PIPE_SWIZZLE_0, PIPE_SWIZZLE_0
2112                };
2113                util_format_apply_color_swizzle(&tmp, color, swz, true);
2114                color = &tmp;
2115             }
2116          }
2117 
2118          /* Stream out the border color and merge the pointer. */
2119          uint32_t offset = iris_upload_border_color(ice, color);
2120 
2121          uint32_t dynamic[GENX(SAMPLER_STATE_length)];
2122          iris_pack_state(GENX(SAMPLER_STATE), dynamic, dyns) {
2123             dyns.BorderColorPointer = offset;
2124          }
2125 
2126          for (uint32_t j = 0; j < GENX(SAMPLER_STATE_length); j++)
2127             map[j] = state->sampler_state[j] | dynamic[j];
2128       }
2129 
2130       map += GENX(SAMPLER_STATE_length);
2131    }
2132 }
2133 
2134 static enum isl_channel_select
fmt_swizzle(const struct iris_format_info * fmt,enum pipe_swizzle swz)2135 fmt_swizzle(const struct iris_format_info *fmt, enum pipe_swizzle swz)
2136 {
2137    switch (swz) {
2138    case PIPE_SWIZZLE_X: return fmt->swizzle.r;
2139    case PIPE_SWIZZLE_Y: return fmt->swizzle.g;
2140    case PIPE_SWIZZLE_Z: return fmt->swizzle.b;
2141    case PIPE_SWIZZLE_W: return fmt->swizzle.a;
2142    case PIPE_SWIZZLE_1: return SCS_ONE;
2143    case PIPE_SWIZZLE_0: return SCS_ZERO;
2144    default: unreachable("invalid swizzle");
2145    }
2146 }
2147 
2148 static void
fill_buffer_surface_state(struct isl_device * isl_dev,struct iris_resource * res,void * map,enum isl_format format,struct isl_swizzle swizzle,unsigned offset,unsigned size,isl_surf_usage_flags_t usage)2149 fill_buffer_surface_state(struct isl_device *isl_dev,
2150                           struct iris_resource *res,
2151                           void *map,
2152                           enum isl_format format,
2153                           struct isl_swizzle swizzle,
2154                           unsigned offset,
2155                           unsigned size,
2156                           isl_surf_usage_flags_t usage)
2157 {
2158    const struct isl_format_layout *fmtl = isl_format_get_layout(format);
2159    const unsigned cpp = format == ISL_FORMAT_RAW ? 1 : fmtl->bpb / 8;
2160 
2161    /* The ARB_texture_buffer_specification says:
2162     *
2163     *    "The number of texels in the buffer texture's texel array is given by
2164     *
2165     *       floor(<buffer_size> / (<components> * sizeof(<base_type>)),
2166     *
2167     *     where <buffer_size> is the size of the buffer object, in basic
2168     *     machine units and <components> and <base_type> are the element count
2169     *     and base data type for elements, as specified in Table X.1.  The
2170     *     number of texels in the texel array is then clamped to the
2171     *     implementation-dependent limit MAX_TEXTURE_BUFFER_SIZE_ARB."
2172     *
2173     * We need to clamp the size in bytes to MAX_TEXTURE_BUFFER_SIZE * stride,
2174     * so that when ISL divides by stride to obtain the number of texels, that
2175     * texel count is clamped to MAX_TEXTURE_BUFFER_SIZE.
2176     */
2177    unsigned final_size =
2178       MIN3(size, res->bo->size - res->offset - offset,
2179            IRIS_MAX_TEXTURE_BUFFER_SIZE * cpp);
2180 
2181    isl_buffer_fill_state(isl_dev, map,
2182                          .address = res->bo->gtt_offset + res->offset + offset,
2183                          .size_B = final_size,
2184                          .format = format,
2185                          .swizzle = swizzle,
2186                          .stride_B = cpp,
2187                          .mocs = iris_mocs(res->bo, isl_dev, usage));
2188 }
2189 
2190 #define SURFACE_STATE_ALIGNMENT 64
2191 
2192 /**
2193  * Allocate several contiguous SURFACE_STATE structures, one for each
2194  * supported auxiliary surface mode.  This only allocates the CPU-side
2195  * copy, they will need to be uploaded later after they're filled in.
2196  */
2197 static void
alloc_surface_states(struct iris_surface_state * surf_state,unsigned aux_usages)2198 alloc_surface_states(struct iris_surface_state *surf_state,
2199                      unsigned aux_usages)
2200 {
2201    const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2202 
2203    /* If this changes, update this to explicitly align pointers */
2204    STATIC_ASSERT(surf_size == SURFACE_STATE_ALIGNMENT);
2205 
2206    assert(aux_usages != 0);
2207 
2208    /* In case we're re-allocating them... */
2209    free(surf_state->cpu);
2210 
2211    surf_state->num_states = util_bitcount(aux_usages);
2212    surf_state->cpu = calloc(surf_state->num_states, surf_size);
2213    surf_state->ref.offset = 0;
2214    pipe_resource_reference(&surf_state->ref.res, NULL);
2215 
2216    assert(surf_state->cpu);
2217 }
2218 
2219 /**
2220  * Upload the CPU side SURFACE_STATEs into a GPU buffer.
2221  */
2222 static void
upload_surface_states(struct u_upload_mgr * mgr,struct iris_surface_state * surf_state)2223 upload_surface_states(struct u_upload_mgr *mgr,
2224                       struct iris_surface_state *surf_state)
2225 {
2226    const unsigned surf_size = 4 * GENX(RENDER_SURFACE_STATE_length);
2227    const unsigned bytes = surf_state->num_states * surf_size;
2228 
2229    void *map =
2230       upload_state(mgr, &surf_state->ref, bytes, SURFACE_STATE_ALIGNMENT);
2231 
2232    surf_state->ref.offset +=
2233       iris_bo_offset_from_base_address(iris_resource_bo(surf_state->ref.res));
2234 
2235    if (map)
2236       memcpy(map, surf_state->cpu, bytes);
2237 }
2238 
2239 /**
2240  * Update resource addresses in a set of SURFACE_STATE descriptors,
2241  * and re-upload them if necessary.
2242  */
2243 static bool
update_surface_state_addrs(struct u_upload_mgr * mgr,struct iris_surface_state * surf_state,struct iris_bo * bo)2244 update_surface_state_addrs(struct u_upload_mgr *mgr,
2245                            struct iris_surface_state *surf_state,
2246                            struct iris_bo *bo)
2247 {
2248    if (surf_state->bo_address == bo->gtt_offset)
2249       return false;
2250 
2251    STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) % 64 == 0);
2252    STATIC_ASSERT(GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_bits) == 64);
2253 
2254    uint64_t *ss_addr = (uint64_t *) &surf_state->cpu[GENX(RENDER_SURFACE_STATE_SurfaceBaseAddress_start) / 32];
2255 
2256    /* First, update the CPU copies.  We assume no other fields exist in
2257     * the QWord containing Surface Base Address.
2258     */
2259    for (unsigned i = 0; i < surf_state->num_states; i++) {
2260       *ss_addr = *ss_addr - surf_state->bo_address + bo->gtt_offset;
2261       ss_addr = ((void *) ss_addr) + SURFACE_STATE_ALIGNMENT;
2262    }
2263 
2264    /* Next, upload the updated copies to a GPU buffer. */
2265    upload_surface_states(mgr, surf_state);
2266 
2267    surf_state->bo_address = bo->gtt_offset;
2268 
2269    return true;
2270 }
2271 
2272 #if GEN_GEN == 8
2273 /**
2274  * Return an ISL surface for use with non-coherent render target reads.
2275  *
2276  * In a few complex cases, we can't use the SURFACE_STATE for normal render
2277  * target writes.  We need to make a separate one for sampling which refers
2278  * to the single slice of the texture being read.
2279  */
2280 static void
get_rt_read_isl_surf(const struct gen_device_info * devinfo,struct iris_resource * res,enum pipe_texture_target target,struct isl_view * view,uint32_t * offset_to_tile,uint32_t * tile_x_sa,uint32_t * tile_y_sa,struct isl_surf * surf)2281 get_rt_read_isl_surf(const struct gen_device_info *devinfo,
2282                      struct iris_resource *res,
2283                      enum pipe_texture_target target,
2284                      struct isl_view *view,
2285                      uint32_t *offset_to_tile,
2286                      uint32_t *tile_x_sa,
2287                      uint32_t *tile_y_sa,
2288                      struct isl_surf *surf)
2289 {
2290    *surf = res->surf;
2291 
2292    const enum isl_dim_layout dim_layout =
2293       iris_get_isl_dim_layout(devinfo, res->surf.tiling, target);
2294 
2295    surf->dim = target_to_isl_surf_dim(target);
2296 
2297    if (surf->dim_layout == dim_layout)
2298       return;
2299 
2300    /* The layout of the specified texture target is not compatible with the
2301     * actual layout of the miptree structure in memory -- You're entering
2302     * dangerous territory, this can only possibly work if you only intended
2303     * to access a single level and slice of the texture, and the hardware
2304     * supports the tile offset feature in order to allow non-tile-aligned
2305     * base offsets, since we'll have to point the hardware to the first
2306     * texel of the level instead of relying on the usual base level/layer
2307     * controls.
2308     */
2309    assert(view->levels == 1 && view->array_len == 1);
2310    assert(*tile_x_sa == 0 && *tile_y_sa == 0);
2311 
2312    *offset_to_tile = iris_resource_get_tile_offsets(res, view->base_level,
2313                                                     view->base_array_layer,
2314                                                     tile_x_sa, tile_y_sa);
2315    const unsigned l = view->base_level;
2316 
2317    surf->logical_level0_px.width = minify(surf->logical_level0_px.width, l);
2318    surf->logical_level0_px.height = surf->dim <= ISL_SURF_DIM_1D ? 1 :
2319       minify(surf->logical_level0_px.height, l);
2320    surf->logical_level0_px.depth = surf->dim <= ISL_SURF_DIM_2D ? 1 :
2321       minify(surf->logical_level0_px.depth, l);
2322 
2323    surf->logical_level0_px.array_len = 1;
2324    surf->levels = 1;
2325    surf->dim_layout = dim_layout;
2326 
2327    view->base_level = 0;
2328    view->base_array_layer = 0;
2329 }
2330 #endif
2331 
2332 static void
fill_surface_state(struct isl_device * isl_dev,void * map,struct iris_resource * res,struct isl_surf * surf,struct isl_view * view,unsigned aux_usage,uint32_t extra_main_offset,uint32_t tile_x_sa,uint32_t tile_y_sa)2333 fill_surface_state(struct isl_device *isl_dev,
2334                    void *map,
2335                    struct iris_resource *res,
2336                    struct isl_surf *surf,
2337                    struct isl_view *view,
2338                    unsigned aux_usage,
2339                    uint32_t extra_main_offset,
2340                    uint32_t tile_x_sa,
2341                    uint32_t tile_y_sa)
2342 {
2343    struct isl_surf_fill_state_info f = {
2344       .surf = surf,
2345       .view = view,
2346       .mocs = iris_mocs(res->bo, isl_dev, view->usage),
2347       .address = res->bo->gtt_offset + res->offset + extra_main_offset,
2348       .x_offset_sa = tile_x_sa,
2349       .y_offset_sa = tile_y_sa,
2350    };
2351 
2352    assert(!iris_resource_unfinished_aux_import(res));
2353 
2354    if (aux_usage != ISL_AUX_USAGE_NONE) {
2355       f.aux_surf = &res->aux.surf;
2356       f.aux_usage = aux_usage;
2357       f.aux_address = res->aux.bo->gtt_offset + res->aux.offset;
2358 
2359       struct iris_bo *clear_bo = NULL;
2360       uint64_t clear_offset = 0;
2361       f.clear_color =
2362          iris_resource_get_clear_color(res, &clear_bo, &clear_offset);
2363       if (clear_bo) {
2364          f.clear_address = clear_bo->gtt_offset + clear_offset;
2365          f.use_clear_address = isl_dev->info->gen > 9;
2366       }
2367    }
2368 
2369    isl_surf_fill_state_s(isl_dev, map, &f);
2370 }
2371 
2372 /**
2373  * The pipe->create_sampler_view() driver hook.
2374  */
2375 static struct pipe_sampler_view *
iris_create_sampler_view(struct pipe_context * ctx,struct pipe_resource * tex,const struct pipe_sampler_view * tmpl)2376 iris_create_sampler_view(struct pipe_context *ctx,
2377                          struct pipe_resource *tex,
2378                          const struct pipe_sampler_view *tmpl)
2379 {
2380    struct iris_context *ice = (struct iris_context *) ctx;
2381    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2382    const struct gen_device_info *devinfo = &screen->devinfo;
2383    struct iris_sampler_view *isv = calloc(1, sizeof(struct iris_sampler_view));
2384 
2385    if (!isv)
2386       return NULL;
2387 
2388    /* initialize base object */
2389    isv->base = *tmpl;
2390    isv->base.context = ctx;
2391    isv->base.texture = NULL;
2392    pipe_reference_init(&isv->base.reference, 1);
2393    pipe_resource_reference(&isv->base.texture, tex);
2394 
2395    if (util_format_is_depth_or_stencil(tmpl->format)) {
2396       struct iris_resource *zres, *sres;
2397       const struct util_format_description *desc =
2398          util_format_description(tmpl->format);
2399 
2400       iris_get_depth_stencil_resources(tex, &zres, &sres);
2401 
2402       tex = util_format_has_depth(desc) ? &zres->base : &sres->base;
2403    }
2404 
2405    isv->res = (struct iris_resource *) tex;
2406 
2407    alloc_surface_states(&isv->surface_state, isv->res->aux.sampler_usages);
2408 
2409    isv->surface_state.bo_address = isv->res->bo->gtt_offset;
2410 
2411    isl_surf_usage_flags_t usage = ISL_SURF_USAGE_TEXTURE_BIT;
2412 
2413    if (isv->base.target == PIPE_TEXTURE_CUBE ||
2414        isv->base.target == PIPE_TEXTURE_CUBE_ARRAY)
2415       usage |= ISL_SURF_USAGE_CUBE_BIT;
2416 
2417    const struct iris_format_info fmt =
2418       iris_format_for_usage(devinfo, tmpl->format, usage);
2419 
2420    isv->clear_color = isv->res->aux.clear_color;
2421 
2422    isv->view = (struct isl_view) {
2423       .format = fmt.fmt,
2424       .swizzle = (struct isl_swizzle) {
2425          .r = fmt_swizzle(&fmt, tmpl->swizzle_r),
2426          .g = fmt_swizzle(&fmt, tmpl->swizzle_g),
2427          .b = fmt_swizzle(&fmt, tmpl->swizzle_b),
2428          .a = fmt_swizzle(&fmt, tmpl->swizzle_a),
2429       },
2430       .usage = usage,
2431    };
2432 
2433    void *map = isv->surface_state.cpu;
2434 
2435    /* Fill out SURFACE_STATE for this view. */
2436    if (tmpl->target != PIPE_BUFFER) {
2437       isv->view.base_level = tmpl->u.tex.first_level;
2438       isv->view.levels = tmpl->u.tex.last_level - tmpl->u.tex.first_level + 1;
2439       // XXX: do I need to port f9fd0cf4790cb2a530e75d1a2206dbb9d8af7cb2?
2440       isv->view.base_array_layer = tmpl->u.tex.first_layer;
2441       isv->view.array_len =
2442          tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2443 
2444       if (iris_resource_unfinished_aux_import(isv->res))
2445          iris_resource_finish_aux_import(&screen->base, isv->res);
2446 
2447       unsigned aux_modes = isv->res->aux.sampler_usages;
2448       while (aux_modes) {
2449          enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2450 
2451          /* If we have a multisampled depth buffer, do not create a sampler
2452           * surface state with HiZ.
2453           */
2454          fill_surface_state(&screen->isl_dev, map, isv->res, &isv->res->surf,
2455                             &isv->view, aux_usage, 0, 0, 0);
2456 
2457          map += SURFACE_STATE_ALIGNMENT;
2458       }
2459    } else {
2460       fill_buffer_surface_state(&screen->isl_dev, isv->res, map,
2461                                 isv->view.format, isv->view.swizzle,
2462                                 tmpl->u.buf.offset, tmpl->u.buf.size,
2463                                 ISL_SURF_USAGE_TEXTURE_BIT);
2464    }
2465 
2466    upload_surface_states(ice->state.surface_uploader, &isv->surface_state);
2467 
2468    return &isv->base;
2469 }
2470 
2471 static void
iris_sampler_view_destroy(struct pipe_context * ctx,struct pipe_sampler_view * state)2472 iris_sampler_view_destroy(struct pipe_context *ctx,
2473                           struct pipe_sampler_view *state)
2474 {
2475    struct iris_sampler_view *isv = (void *) state;
2476    pipe_resource_reference(&state->texture, NULL);
2477    pipe_resource_reference(&isv->surface_state.ref.res, NULL);
2478    free(isv->surface_state.cpu);
2479    free(isv);
2480 }
2481 
2482 /**
2483  * The pipe->create_surface() driver hook.
2484  *
2485  * In Gallium nomenclature, "surfaces" are a view of a resource that
2486  * can be bound as a render target or depth/stencil buffer.
2487  */
2488 static struct pipe_surface *
iris_create_surface(struct pipe_context * ctx,struct pipe_resource * tex,const struct pipe_surface * tmpl)2489 iris_create_surface(struct pipe_context *ctx,
2490                     struct pipe_resource *tex,
2491                     const struct pipe_surface *tmpl)
2492 {
2493    struct iris_context *ice = (struct iris_context *) ctx;
2494    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2495    const struct gen_device_info *devinfo = &screen->devinfo;
2496 
2497    isl_surf_usage_flags_t usage = 0;
2498    if (tmpl->writable)
2499       usage = ISL_SURF_USAGE_STORAGE_BIT;
2500    else if (util_format_is_depth_or_stencil(tmpl->format))
2501       usage = ISL_SURF_USAGE_DEPTH_BIT;
2502    else
2503       usage = ISL_SURF_USAGE_RENDER_TARGET_BIT;
2504 
2505    const struct iris_format_info fmt =
2506       iris_format_for_usage(devinfo, tmpl->format, usage);
2507 
2508    if ((usage & ISL_SURF_USAGE_RENDER_TARGET_BIT) &&
2509        !isl_format_supports_rendering(devinfo, fmt.fmt)) {
2510       /* Framebuffer validation will reject this invalid case, but it
2511        * hasn't had the opportunity yet.  In the meantime, we need to
2512        * avoid hitting ISL asserts about unsupported formats below.
2513        */
2514       return NULL;
2515    }
2516 
2517    struct iris_surface *surf = calloc(1, sizeof(struct iris_surface));
2518    struct pipe_surface *psurf = &surf->base;
2519    struct iris_resource *res = (struct iris_resource *) tex;
2520 
2521    if (!surf)
2522       return NULL;
2523 
2524    pipe_reference_init(&psurf->reference, 1);
2525    pipe_resource_reference(&psurf->texture, tex);
2526    psurf->context = ctx;
2527    psurf->format = tmpl->format;
2528    psurf->width = tex->width0;
2529    psurf->height = tex->height0;
2530    psurf->texture = tex;
2531    psurf->u.tex.first_layer = tmpl->u.tex.first_layer;
2532    psurf->u.tex.last_layer = tmpl->u.tex.last_layer;
2533    psurf->u.tex.level = tmpl->u.tex.level;
2534 
2535    uint32_t array_len = tmpl->u.tex.last_layer - tmpl->u.tex.first_layer + 1;
2536 
2537    struct isl_view *view = &surf->view;
2538    *view = (struct isl_view) {
2539       .format = fmt.fmt,
2540       .base_level = tmpl->u.tex.level,
2541       .levels = 1,
2542       .base_array_layer = tmpl->u.tex.first_layer,
2543       .array_len = array_len,
2544       .swizzle = ISL_SWIZZLE_IDENTITY,
2545       .usage = usage,
2546    };
2547 
2548 #if GEN_GEN == 8
2549    enum pipe_texture_target target = (tex->target == PIPE_TEXTURE_3D &&
2550                                       array_len == 1) ? PIPE_TEXTURE_2D :
2551                                      tex->target == PIPE_TEXTURE_1D_ARRAY ?
2552                                      PIPE_TEXTURE_2D_ARRAY : tex->target;
2553 
2554    struct isl_view *read_view = &surf->read_view;
2555    *read_view = (struct isl_view) {
2556       .format = fmt.fmt,
2557       .base_level = tmpl->u.tex.level,
2558       .levels = 1,
2559       .base_array_layer = tmpl->u.tex.first_layer,
2560       .array_len = array_len,
2561       .swizzle = ISL_SWIZZLE_IDENTITY,
2562       .usage = ISL_SURF_USAGE_TEXTURE_BIT,
2563    };
2564 #endif
2565 
2566    surf->clear_color = res->aux.clear_color;
2567 
2568    /* Bail early for depth/stencil - we don't want SURFACE_STATE for them. */
2569    if (res->surf.usage & (ISL_SURF_USAGE_DEPTH_BIT |
2570                           ISL_SURF_USAGE_STENCIL_BIT))
2571       return psurf;
2572 
2573 
2574    alloc_surface_states(&surf->surface_state, res->aux.possible_usages);
2575    surf->surface_state.bo_address = res->bo->gtt_offset;
2576 
2577 #if GEN_GEN == 8
2578    alloc_surface_states(&surf->surface_state_read, res->aux.possible_usages);
2579    surf->surface_state_read.bo_address = res->bo->gtt_offset;
2580 #endif
2581 
2582    if (!isl_format_is_compressed(res->surf.format)) {
2583       if (iris_resource_unfinished_aux_import(res))
2584          iris_resource_finish_aux_import(&screen->base, res);
2585 
2586       void *map = surf->surface_state.cpu;
2587       UNUSED void *map_read = surf->surface_state_read.cpu;
2588 
2589       /* This is a normal surface.  Fill out a SURFACE_STATE for each possible
2590        * auxiliary surface mode and return the pipe_surface.
2591        */
2592       unsigned aux_modes = res->aux.possible_usages;
2593       while (aux_modes) {
2594          enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
2595          fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2596                             view, aux_usage, 0, 0, 0);
2597          map += SURFACE_STATE_ALIGNMENT;
2598 
2599 #if GEN_GEN == 8
2600          struct isl_surf surf;
2601          uint32_t offset_to_tile = 0, tile_x_sa = 0, tile_y_sa = 0;
2602          get_rt_read_isl_surf(devinfo, res, target, read_view,
2603                               &offset_to_tile, &tile_x_sa, &tile_y_sa, &surf);
2604          fill_surface_state(&screen->isl_dev, map_read, res, &surf, read_view,
2605                             aux_usage, offset_to_tile, tile_x_sa, tile_y_sa);
2606          map_read += SURFACE_STATE_ALIGNMENT;
2607 #endif
2608       }
2609 
2610       upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
2611 
2612 #if GEN_GEN == 8
2613       upload_surface_states(ice->state.surface_uploader,
2614                             &surf->surface_state_read);
2615 #endif
2616 
2617       return psurf;
2618    }
2619 
2620    /* The resource has a compressed format, which is not renderable, but we
2621     * have a renderable view format.  We must be attempting to upload blocks
2622     * of compressed data via an uncompressed view.
2623     *
2624     * In this case, we can assume there are no auxiliary buffers, a single
2625     * miplevel, and that the resource is single-sampled.  Gallium may try
2626     * and create an uncompressed view with multiple layers, however.
2627     */
2628    assert(!isl_format_is_compressed(fmt.fmt));
2629    assert(res->aux.possible_usages == 1 << ISL_AUX_USAGE_NONE);
2630    assert(res->surf.samples == 1);
2631    assert(view->levels == 1);
2632 
2633    struct isl_surf isl_surf;
2634    uint32_t offset_B = 0, tile_x_sa = 0, tile_y_sa = 0;
2635 
2636    if (view->base_level > 0) {
2637       /* We can't rely on the hardware's miplevel selection with such
2638        * a substantial lie about the format, so we select a single image
2639        * using the Tile X/Y Offset fields.  In this case, we can't handle
2640        * multiple array slices.
2641        *
2642        * On Broadwell, HALIGN and VALIGN are specified in pixels and are
2643        * hard-coded to align to exactly the block size of the compressed
2644        * texture.  This means that, when reinterpreted as a non-compressed
2645        * texture, the tile offsets may be anything and we can't rely on
2646        * X/Y Offset.
2647        *
2648        * Return NULL to force gallium frontends to take fallback paths.
2649        */
2650       if (view->array_len > 1 || GEN_GEN == 8)
2651          return NULL;
2652 
2653       const bool is_3d = res->surf.dim == ISL_SURF_DIM_3D;
2654       isl_surf_get_image_surf(&screen->isl_dev, &res->surf,
2655                               view->base_level,
2656                               is_3d ? 0 : view->base_array_layer,
2657                               is_3d ? view->base_array_layer : 0,
2658                               &isl_surf,
2659                               &offset_B, &tile_x_sa, &tile_y_sa);
2660 
2661       /* We use address and tile offsets to access a single level/layer
2662        * as a subimage, so reset level/layer so it doesn't offset again.
2663        */
2664       view->base_array_layer = 0;
2665       view->base_level = 0;
2666    } else {
2667       /* Level 0 doesn't require tile offsets, and the hardware can find
2668        * array slices using QPitch even with the format override, so we
2669        * can allow layers in this case.  Copy the original ISL surface.
2670        */
2671       memcpy(&isl_surf, &res->surf, sizeof(isl_surf));
2672    }
2673 
2674    /* Scale down the image dimensions by the block size. */
2675    const struct isl_format_layout *fmtl =
2676       isl_format_get_layout(res->surf.format);
2677    isl_surf.format = fmt.fmt;
2678    isl_surf.logical_level0_px = isl_surf_get_logical_level0_el(&isl_surf);
2679    isl_surf.phys_level0_sa = isl_surf_get_phys_level0_el(&isl_surf);
2680    tile_x_sa /= fmtl->bw;
2681    tile_y_sa /= fmtl->bh;
2682 
2683    psurf->width = isl_surf.logical_level0_px.width;
2684    psurf->height = isl_surf.logical_level0_px.height;
2685 
2686    struct isl_surf_fill_state_info f = {
2687       .surf = &isl_surf,
2688       .view = view,
2689       .mocs = iris_mocs(res->bo, &screen->isl_dev,
2690                         ISL_SURF_USAGE_RENDER_TARGET_BIT),
2691       .address = res->bo->gtt_offset + offset_B,
2692       .x_offset_sa = tile_x_sa,
2693       .y_offset_sa = tile_y_sa,
2694    };
2695 
2696    isl_surf_fill_state_s(&screen->isl_dev, surf->surface_state.cpu, &f);
2697 
2698    upload_surface_states(ice->state.surface_uploader, &surf->surface_state);
2699 
2700    return psurf;
2701 }
2702 
2703 #if GEN_GEN < 9
2704 static void
fill_default_image_param(struct brw_image_param * param)2705 fill_default_image_param(struct brw_image_param *param)
2706 {
2707    memset(param, 0, sizeof(*param));
2708    /* Set the swizzling shifts to all-ones to effectively disable swizzling --
2709     * See emit_address_calculation() in brw_fs_surface_builder.cpp for a more
2710     * detailed explanation of these parameters.
2711     */
2712    param->swizzling[0] = 0xff;
2713    param->swizzling[1] = 0xff;
2714 }
2715 
2716 static void
fill_buffer_image_param(struct brw_image_param * param,enum pipe_format pfmt,unsigned size)2717 fill_buffer_image_param(struct brw_image_param *param,
2718                         enum pipe_format pfmt,
2719                         unsigned size)
2720 {
2721    const unsigned cpp = util_format_get_blocksize(pfmt);
2722 
2723    fill_default_image_param(param);
2724    param->size[0] = size / cpp;
2725    param->stride[0] = cpp;
2726 }
2727 #else
2728 #define isl_surf_fill_image_param(x, ...)
2729 #define fill_default_image_param(x, ...)
2730 #define fill_buffer_image_param(x, ...)
2731 #endif
2732 
2733 /**
2734  * The pipe->set_shader_images() driver hook.
2735  */
2736 static void
iris_set_shader_images(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start_slot,unsigned count,const struct pipe_image_view * p_images)2737 iris_set_shader_images(struct pipe_context *ctx,
2738                        enum pipe_shader_type p_stage,
2739                        unsigned start_slot, unsigned count,
2740                        const struct pipe_image_view *p_images)
2741 {
2742    struct iris_context *ice = (struct iris_context *) ctx;
2743    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
2744    gl_shader_stage stage = stage_from_pipe(p_stage);
2745    struct iris_shader_state *shs = &ice->state.shaders[stage];
2746 #if GEN_GEN == 8
2747    struct iris_genx_state *genx = ice->state.genx;
2748    struct brw_image_param *image_params = genx->shaders[stage].image_param;
2749 #endif
2750 
2751    shs->bound_image_views &= ~u_bit_consecutive(start_slot, count);
2752 
2753    for (unsigned i = 0; i < count; i++) {
2754       struct iris_image_view *iv = &shs->image[start_slot + i];
2755 
2756       if (p_images && p_images[i].resource) {
2757          const struct pipe_image_view *img = &p_images[i];
2758          struct iris_resource *res = (void *) img->resource;
2759 
2760          util_copy_image_view(&iv->base, img);
2761 
2762          shs->bound_image_views |= 1 << (start_slot + i);
2763 
2764          res->bind_history |= PIPE_BIND_SHADER_IMAGE;
2765          res->bind_stages |= 1 << stage;
2766 
2767          enum isl_format isl_fmt = iris_image_view_get_format(ice, img);
2768 
2769          /* Render compression with images supported on gen12+ only. */
2770          unsigned aux_usages = GEN_GEN >= 12 ? res->aux.possible_usages :
2771             1 << ISL_AUX_USAGE_NONE;
2772 
2773          alloc_surface_states(&iv->surface_state, aux_usages);
2774          iv->surface_state.bo_address = res->bo->gtt_offset;
2775 
2776          void *map = iv->surface_state.cpu;
2777 
2778          if (res->base.target != PIPE_BUFFER) {
2779             struct isl_view view = {
2780                .format = isl_fmt,
2781                .base_level = img->u.tex.level,
2782                .levels = 1,
2783                .base_array_layer = img->u.tex.first_layer,
2784                .array_len = img->u.tex.last_layer - img->u.tex.first_layer + 1,
2785                .swizzle = ISL_SWIZZLE_IDENTITY,
2786                .usage = ISL_SURF_USAGE_STORAGE_BIT,
2787             };
2788 
2789             /* If using untyped fallback. */
2790             if (isl_fmt == ISL_FORMAT_RAW) {
2791                fill_buffer_surface_state(&screen->isl_dev, res, map,
2792                                          isl_fmt, ISL_SWIZZLE_IDENTITY,
2793                                          0, res->bo->size,
2794                                          ISL_SURF_USAGE_STORAGE_BIT);
2795             } else {
2796                unsigned aux_modes = aux_usages;
2797                while (aux_modes) {
2798                   enum isl_aux_usage usage = u_bit_scan(&aux_modes);
2799 
2800                   fill_surface_state(&screen->isl_dev, map, res, &res->surf,
2801                                      &view, usage, 0, 0, 0);
2802 
2803                   map += SURFACE_STATE_ALIGNMENT;
2804                }
2805             }
2806 
2807             isl_surf_fill_image_param(&screen->isl_dev,
2808                                       &image_params[start_slot + i],
2809                                       &res->surf, &view);
2810          } else {
2811             util_range_add(&res->base, &res->valid_buffer_range, img->u.buf.offset,
2812                            img->u.buf.offset + img->u.buf.size);
2813 
2814             fill_buffer_surface_state(&screen->isl_dev, res, map,
2815                                       isl_fmt, ISL_SWIZZLE_IDENTITY,
2816                                       img->u.buf.offset, img->u.buf.size,
2817                                       ISL_SURF_USAGE_STORAGE_BIT);
2818             fill_buffer_image_param(&image_params[start_slot + i],
2819                                     img->format, img->u.buf.size);
2820          }
2821 
2822          upload_surface_states(ice->state.surface_uploader, &iv->surface_state);
2823       } else {
2824          pipe_resource_reference(&iv->base.resource, NULL);
2825          pipe_resource_reference(&iv->surface_state.ref.res, NULL);
2826          fill_default_image_param(&image_params[start_slot + i]);
2827       }
2828    }
2829 
2830    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
2831    ice->state.dirty |=
2832       stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2833                                    : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2834 
2835    /* Broadwell also needs brw_image_params re-uploaded */
2836    if (GEN_GEN < 9) {
2837       ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
2838       shs->sysvals_need_upload = true;
2839    }
2840 }
2841 
2842 
2843 /**
2844  * The pipe->set_sampler_views() driver hook.
2845  */
2846 static void
iris_set_sampler_views(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start,unsigned count,struct pipe_sampler_view ** views)2847 iris_set_sampler_views(struct pipe_context *ctx,
2848                        enum pipe_shader_type p_stage,
2849                        unsigned start, unsigned count,
2850                        struct pipe_sampler_view **views)
2851 {
2852    struct iris_context *ice = (struct iris_context *) ctx;
2853    gl_shader_stage stage = stage_from_pipe(p_stage);
2854    struct iris_shader_state *shs = &ice->state.shaders[stage];
2855 
2856    shs->bound_sampler_views &= ~u_bit_consecutive(start, count);
2857 
2858    for (unsigned i = 0; i < count; i++) {
2859       struct pipe_sampler_view *pview = views ? views[i] : NULL;
2860       pipe_sampler_view_reference((struct pipe_sampler_view **)
2861                                   &shs->textures[start + i], pview);
2862       struct iris_sampler_view *view = (void *) pview;
2863       if (view) {
2864          view->res->bind_history |= PIPE_BIND_SAMPLER_VIEW;
2865          view->res->bind_stages |= 1 << stage;
2866 
2867          shs->bound_sampler_views |= 1 << (start + i);
2868 
2869          update_surface_state_addrs(ice->state.surface_uploader,
2870                                     &view->surface_state, view->res->bo);
2871       }
2872    }
2873 
2874    ice->state.stage_dirty |= (IRIS_STAGE_DIRTY_BINDINGS_VS << stage);
2875    ice->state.dirty |=
2876       stage == MESA_SHADER_COMPUTE ? IRIS_DIRTY_COMPUTE_RESOLVES_AND_FLUSHES
2877                                    : IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
2878 }
2879 
2880 static void
iris_set_compute_resources(struct pipe_context * ctx,unsigned start,unsigned count,struct pipe_surface ** resources)2881 iris_set_compute_resources(struct pipe_context *ctx,
2882                            unsigned start, unsigned count,
2883                            struct pipe_surface **resources)
2884 {
2885    assert(count == 0);
2886 }
2887 
2888 static void
iris_set_global_binding(struct pipe_context * ctx,unsigned start_slot,unsigned count,struct pipe_resource ** resources,uint32_t ** handles)2889 iris_set_global_binding(struct pipe_context *ctx,
2890                         unsigned start_slot, unsigned count,
2891                         struct pipe_resource **resources,
2892                         uint32_t **handles)
2893 {
2894    struct iris_context *ice = (struct iris_context *) ctx;
2895 
2896    assert(start_slot + count <= IRIS_MAX_GLOBAL_BINDINGS);
2897    for (unsigned i = 0; i < count; i++) {
2898       if (resources && resources[i]) {
2899          pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
2900                                  resources[i]);
2901          struct iris_resource *res = (void *) resources[i];
2902          uint64_t addr = res->bo->gtt_offset;
2903          memcpy(handles[i], &addr, sizeof(addr));
2904       } else {
2905          pipe_resource_reference(&ice->state.global_bindings[start_slot + i],
2906                                  NULL);
2907       }
2908    }
2909 
2910    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_CS;
2911 }
2912 
2913 /**
2914  * The pipe->set_tess_state() driver hook.
2915  */
2916 static void
iris_set_tess_state(struct pipe_context * ctx,const float default_outer_level[4],const float default_inner_level[2])2917 iris_set_tess_state(struct pipe_context *ctx,
2918                     const float default_outer_level[4],
2919                     const float default_inner_level[2])
2920 {
2921    struct iris_context *ice = (struct iris_context *) ctx;
2922    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_TESS_CTRL];
2923 
2924    memcpy(&ice->state.default_outer_level[0], &default_outer_level[0], 4 * sizeof(float));
2925    memcpy(&ice->state.default_inner_level[0], &default_inner_level[0], 2 * sizeof(float));
2926 
2927    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_TCS;
2928    shs->sysvals_need_upload = true;
2929 }
2930 
2931 static void
iris_surface_destroy(struct pipe_context * ctx,struct pipe_surface * p_surf)2932 iris_surface_destroy(struct pipe_context *ctx, struct pipe_surface *p_surf)
2933 {
2934    struct iris_surface *surf = (void *) p_surf;
2935    pipe_resource_reference(&p_surf->texture, NULL);
2936    pipe_resource_reference(&surf->surface_state.ref.res, NULL);
2937    pipe_resource_reference(&surf->surface_state_read.ref.res, NULL);
2938    free(surf->surface_state.cpu);
2939    free(surf);
2940 }
2941 
2942 static void
iris_set_clip_state(struct pipe_context * ctx,const struct pipe_clip_state * state)2943 iris_set_clip_state(struct pipe_context *ctx,
2944                     const struct pipe_clip_state *state)
2945 {
2946    struct iris_context *ice = (struct iris_context *) ctx;
2947    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_VERTEX];
2948    struct iris_shader_state *gshs = &ice->state.shaders[MESA_SHADER_GEOMETRY];
2949    struct iris_shader_state *tshs = &ice->state.shaders[MESA_SHADER_TESS_EVAL];
2950 
2951    memcpy(&ice->state.clip_planes, state, sizeof(*state));
2952 
2953    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS |
2954                              IRIS_STAGE_DIRTY_CONSTANTS_GS |
2955                              IRIS_STAGE_DIRTY_CONSTANTS_TES;
2956    shs->sysvals_need_upload = true;
2957    gshs->sysvals_need_upload = true;
2958    tshs->sysvals_need_upload = true;
2959 }
2960 
2961 /**
2962  * The pipe->set_polygon_stipple() driver hook.
2963  */
2964 static void
iris_set_polygon_stipple(struct pipe_context * ctx,const struct pipe_poly_stipple * state)2965 iris_set_polygon_stipple(struct pipe_context *ctx,
2966                          const struct pipe_poly_stipple *state)
2967 {
2968    struct iris_context *ice = (struct iris_context *) ctx;
2969    memcpy(&ice->state.poly_stipple, state, sizeof(*state));
2970    ice->state.dirty |= IRIS_DIRTY_POLYGON_STIPPLE;
2971 }
2972 
2973 /**
2974  * The pipe->set_sample_mask() driver hook.
2975  */
2976 static void
iris_set_sample_mask(struct pipe_context * ctx,unsigned sample_mask)2977 iris_set_sample_mask(struct pipe_context *ctx, unsigned sample_mask)
2978 {
2979    struct iris_context *ice = (struct iris_context *) ctx;
2980 
2981    /* We only support 16x MSAA, so we have 16 bits of sample maks.
2982     * st/mesa may pass us 0xffffffff though, meaning "enable all samples".
2983     */
2984    ice->state.sample_mask = sample_mask & 0xffff;
2985    ice->state.dirty |= IRIS_DIRTY_SAMPLE_MASK;
2986 }
2987 
2988 /**
2989  * The pipe->set_scissor_states() driver hook.
2990  *
2991  * This corresponds to our SCISSOR_RECT state structures.  It's an
2992  * exact match, so we just store them, and memcpy them out later.
2993  */
2994 static void
iris_set_scissor_states(struct pipe_context * ctx,unsigned start_slot,unsigned num_scissors,const struct pipe_scissor_state * rects)2995 iris_set_scissor_states(struct pipe_context *ctx,
2996                         unsigned start_slot,
2997                         unsigned num_scissors,
2998                         const struct pipe_scissor_state *rects)
2999 {
3000    struct iris_context *ice = (struct iris_context *) ctx;
3001 
3002    for (unsigned i = 0; i < num_scissors; i++) {
3003       if (rects[i].minx == rects[i].maxx || rects[i].miny == rects[i].maxy) {
3004          /* If the scissor was out of bounds and got clamped to 0 width/height
3005           * at the bounds, the subtraction of 1 from maximums could produce a
3006           * negative number and thus not clip anything.  Instead, just provide
3007           * a min > max scissor inside the bounds, which produces the expected
3008           * no rendering.
3009           */
3010          ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3011             .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
3012          };
3013       } else {
3014          ice->state.scissors[start_slot + i] = (struct pipe_scissor_state) {
3015             .minx = rects[i].minx,     .miny = rects[i].miny,
3016             .maxx = rects[i].maxx - 1, .maxy = rects[i].maxy - 1,
3017          };
3018       }
3019    }
3020 
3021    ice->state.dirty |= IRIS_DIRTY_SCISSOR_RECT;
3022 }
3023 
3024 /**
3025  * The pipe->set_stencil_ref() driver hook.
3026  *
3027  * This is added to 3DSTATE_WM_DEPTH_STENCIL dynamically at draw time.
3028  */
3029 static void
iris_set_stencil_ref(struct pipe_context * ctx,const struct pipe_stencil_ref * state)3030 iris_set_stencil_ref(struct pipe_context *ctx,
3031                      const struct pipe_stencil_ref *state)
3032 {
3033    struct iris_context *ice = (struct iris_context *) ctx;
3034    memcpy(&ice->state.stencil_ref, state, sizeof(*state));
3035    if (GEN_GEN >= 12)
3036       ice->state.dirty |= IRIS_DIRTY_STENCIL_REF;
3037    else if (GEN_GEN >= 9)
3038       ice->state.dirty |= IRIS_DIRTY_WM_DEPTH_STENCIL;
3039    else
3040       ice->state.dirty |= IRIS_DIRTY_COLOR_CALC_STATE;
3041 }
3042 
3043 static float
viewport_extent(const struct pipe_viewport_state * state,int axis,float sign)3044 viewport_extent(const struct pipe_viewport_state *state, int axis, float sign)
3045 {
3046    return copysignf(state->scale[axis], sign) + state->translate[axis];
3047 }
3048 
3049 /**
3050  * The pipe->set_viewport_states() driver hook.
3051  *
3052  * This corresponds to our SF_CLIP_VIEWPORT states.  We can't calculate
3053  * the guardband yet, as we need the framebuffer dimensions, but we can
3054  * at least fill out the rest.
3055  */
3056 static void
iris_set_viewport_states(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_viewport_state * states)3057 iris_set_viewport_states(struct pipe_context *ctx,
3058                          unsigned start_slot,
3059                          unsigned count,
3060                          const struct pipe_viewport_state *states)
3061 {
3062    struct iris_context *ice = (struct iris_context *) ctx;
3063 
3064    memcpy(&ice->state.viewports[start_slot], states, sizeof(*states) * count);
3065 
3066    ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3067 
3068    if (ice->state.cso_rast && (!ice->state.cso_rast->depth_clip_near ||
3069                                !ice->state.cso_rast->depth_clip_far))
3070       ice->state.dirty |= IRIS_DIRTY_CC_VIEWPORT;
3071 }
3072 
3073 /**
3074  * The pipe->set_framebuffer_state() driver hook.
3075  *
3076  * Sets the current draw FBO, including color render targets, depth,
3077  * and stencil buffers.
3078  */
3079 static void
iris_set_framebuffer_state(struct pipe_context * ctx,const struct pipe_framebuffer_state * state)3080 iris_set_framebuffer_state(struct pipe_context *ctx,
3081                            const struct pipe_framebuffer_state *state)
3082 {
3083    struct iris_context *ice = (struct iris_context *) ctx;
3084    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3085    struct isl_device *isl_dev = &screen->isl_dev;
3086    struct pipe_framebuffer_state *cso = &ice->state.framebuffer;
3087    struct iris_resource *zres;
3088    struct iris_resource *stencil_res;
3089 
3090    unsigned samples = util_framebuffer_get_num_samples(state);
3091    unsigned layers = util_framebuffer_get_num_layers(state);
3092 
3093    if (cso->samples != samples) {
3094       ice->state.dirty |= IRIS_DIRTY_MULTISAMPLE;
3095 
3096       /* We need to toggle 3DSTATE_PS::32 Pixel Dispatch Enable */
3097       if (GEN_GEN >= 9 && (cso->samples == 16 || samples == 16))
3098          ice->state.stage_dirty |= IRIS_STAGE_DIRTY_FS;
3099    }
3100 
3101    if (cso->nr_cbufs != state->nr_cbufs) {
3102       ice->state.dirty |= IRIS_DIRTY_BLEND_STATE;
3103    }
3104 
3105    if ((cso->layers == 0) != (layers == 0)) {
3106       ice->state.dirty |= IRIS_DIRTY_CLIP;
3107    }
3108 
3109    if (cso->width != state->width || cso->height != state->height) {
3110       ice->state.dirty |= IRIS_DIRTY_SF_CL_VIEWPORT;
3111    }
3112 
3113    if (cso->zsbuf || state->zsbuf) {
3114       ice->state.dirty |= IRIS_DIRTY_DEPTH_BUFFER;
3115    }
3116 
3117    util_copy_framebuffer_state(cso, state);
3118    cso->samples = samples;
3119    cso->layers = layers;
3120 
3121    struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
3122 
3123    struct isl_view view = {
3124       .base_level = 0,
3125       .levels = 1,
3126       .base_array_layer = 0,
3127       .array_len = 1,
3128       .swizzle = ISL_SWIZZLE_IDENTITY,
3129    };
3130 
3131    struct isl_depth_stencil_hiz_emit_info info = { .view = &view };
3132 
3133    if (cso->zsbuf) {
3134       iris_get_depth_stencil_resources(cso->zsbuf->texture, &zres,
3135                                        &stencil_res);
3136 
3137       view.base_level = cso->zsbuf->u.tex.level;
3138       view.base_array_layer = cso->zsbuf->u.tex.first_layer;
3139       view.array_len =
3140          cso->zsbuf->u.tex.last_layer - cso->zsbuf->u.tex.first_layer + 1;
3141 
3142       if (zres) {
3143          view.usage |= ISL_SURF_USAGE_DEPTH_BIT;
3144 
3145          info.depth_surf = &zres->surf;
3146          info.depth_address = zres->bo->gtt_offset + zres->offset;
3147          info.mocs = iris_mocs(zres->bo, isl_dev, view.usage);
3148 
3149          view.format = zres->surf.format;
3150 
3151          if (iris_resource_level_has_hiz(zres, view.base_level)) {
3152             info.hiz_usage = zres->aux.usage;
3153             info.hiz_surf = &zres->aux.surf;
3154             info.hiz_address = zres->aux.bo->gtt_offset + zres->aux.offset;
3155          }
3156       }
3157 
3158       if (stencil_res) {
3159          view.usage |= ISL_SURF_USAGE_STENCIL_BIT;
3160          info.stencil_aux_usage = stencil_res->aux.usage;
3161          info.stencil_surf = &stencil_res->surf;
3162          info.stencil_address = stencil_res->bo->gtt_offset + stencil_res->offset;
3163          if (!zres) {
3164             view.format = stencil_res->surf.format;
3165             info.mocs = iris_mocs(stencil_res->bo, isl_dev, view.usage);
3166          }
3167       }
3168    }
3169 
3170    isl_emit_depth_stencil_hiz_s(isl_dev, cso_z->packets, &info);
3171 
3172    /* Make a null surface for unbound buffers */
3173    void *null_surf_map =
3174       upload_state(ice->state.surface_uploader, &ice->state.null_fb,
3175                    4 * GENX(RENDER_SURFACE_STATE_length), 64);
3176    isl_null_fill_state(&screen->isl_dev, null_surf_map,
3177                        isl_extent3d(MAX2(cso->width, 1),
3178                                     MAX2(cso->height, 1),
3179                                     cso->layers ? cso->layers : 1));
3180    ice->state.null_fb.offset +=
3181       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.null_fb.res));
3182 
3183    /* Render target change */
3184    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_FS;
3185 
3186    ice->state.dirty |= IRIS_DIRTY_RENDER_BUFFER;
3187 
3188    ice->state.dirty |= IRIS_DIRTY_RENDER_RESOLVES_AND_FLUSHES;
3189 
3190    ice->state.stage_dirty |=
3191       ice->state.stage_dirty_for_nos[IRIS_NOS_FRAMEBUFFER];
3192 
3193    if (GEN_GEN == 8)
3194       ice->state.dirty |= IRIS_DIRTY_PMA_FIX;
3195 }
3196 
3197 /**
3198  * The pipe->set_constant_buffer() driver hook.
3199  *
3200  * This uploads any constant data in user buffers, and references
3201  * any UBO resources containing constant data.
3202  */
3203 static void
iris_set_constant_buffer(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned index,const struct pipe_constant_buffer * input)3204 iris_set_constant_buffer(struct pipe_context *ctx,
3205                          enum pipe_shader_type p_stage, unsigned index,
3206                          const struct pipe_constant_buffer *input)
3207 {
3208    struct iris_context *ice = (struct iris_context *) ctx;
3209    gl_shader_stage stage = stage_from_pipe(p_stage);
3210    struct iris_shader_state *shs = &ice->state.shaders[stage];
3211    struct pipe_shader_buffer *cbuf = &shs->constbuf[index];
3212 
3213    /* TODO: Only do this if the buffer changes? */
3214    pipe_resource_reference(&shs->constbuf_surf_state[index].res, NULL);
3215 
3216    if (input && input->buffer_size && (input->buffer || input->user_buffer)) {
3217       shs->bound_cbufs |= 1u << index;
3218 
3219       if (input->user_buffer) {
3220          void *map = NULL;
3221          pipe_resource_reference(&cbuf->buffer, NULL);
3222          u_upload_alloc(ice->ctx.const_uploader, 0, input->buffer_size, 64,
3223                         &cbuf->buffer_offset, &cbuf->buffer, (void **) &map);
3224 
3225          if (!cbuf->buffer) {
3226             /* Allocation was unsuccessful - just unbind */
3227             iris_set_constant_buffer(ctx, p_stage, index, NULL);
3228             return;
3229          }
3230 
3231          assert(map);
3232          memcpy(map, input->user_buffer, input->buffer_size);
3233       } else if (input->buffer) {
3234          pipe_resource_reference(&cbuf->buffer, input->buffer);
3235 
3236          cbuf->buffer_offset = input->buffer_offset;
3237       }
3238 
3239       cbuf->buffer_size =
3240          MIN2(input->buffer_size,
3241               iris_resource_bo(cbuf->buffer)->size - cbuf->buffer_offset);
3242 
3243       struct iris_resource *res = (void *) cbuf->buffer;
3244       res->bind_history |= PIPE_BIND_CONSTANT_BUFFER;
3245       res->bind_stages |= 1 << stage;
3246    } else {
3247       shs->bound_cbufs &= ~(1u << index);
3248       pipe_resource_reference(&cbuf->buffer, NULL);
3249    }
3250 
3251    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << stage;
3252 }
3253 
3254 static void
upload_sysvals(struct iris_context * ice,gl_shader_stage stage,const struct pipe_grid_info * grid)3255 upload_sysvals(struct iris_context *ice,
3256                gl_shader_stage stage,
3257                const struct pipe_grid_info *grid)
3258 {
3259    UNUSED struct iris_genx_state *genx = ice->state.genx;
3260    struct iris_shader_state *shs = &ice->state.shaders[stage];
3261 
3262    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
3263    if (!shader || (shader->num_system_values == 0 &&
3264                    shader->kernel_input_size == 0))
3265       return;
3266 
3267    assert(shader->num_cbufs > 0);
3268 
3269    unsigned sysval_cbuf_index = shader->num_cbufs - 1;
3270    struct pipe_shader_buffer *cbuf = &shs->constbuf[sysval_cbuf_index];
3271    unsigned system_values_start =
3272       ALIGN(shader->kernel_input_size, sizeof(uint32_t));
3273    unsigned upload_size = system_values_start +
3274                           shader->num_system_values * sizeof(uint32_t);
3275    void *map = NULL;
3276 
3277    assert(sysval_cbuf_index < PIPE_MAX_CONSTANT_BUFFERS);
3278    u_upload_alloc(ice->ctx.const_uploader, 0, upload_size, 64,
3279                   &cbuf->buffer_offset, &cbuf->buffer, &map);
3280 
3281    if (shader->kernel_input_size > 0)
3282       memcpy(map, grid->input, shader->kernel_input_size);
3283 
3284    uint32_t *sysval_map = map + system_values_start;
3285    for (int i = 0; i < shader->num_system_values; i++) {
3286       uint32_t sysval = shader->system_values[i];
3287       uint32_t value = 0;
3288 
3289       if (BRW_PARAM_DOMAIN(sysval) == BRW_PARAM_DOMAIN_IMAGE) {
3290 #if GEN_GEN == 8
3291          unsigned img = BRW_PARAM_IMAGE_IDX(sysval);
3292          unsigned offset = BRW_PARAM_IMAGE_OFFSET(sysval);
3293          struct brw_image_param *param =
3294             &genx->shaders[stage].image_param[img];
3295 
3296          assert(offset < sizeof(struct brw_image_param));
3297          value = ((uint32_t *) param)[offset];
3298 #endif
3299       } else if (sysval == BRW_PARAM_BUILTIN_ZERO) {
3300          value = 0;
3301       } else if (BRW_PARAM_BUILTIN_IS_CLIP_PLANE(sysval)) {
3302          int plane = BRW_PARAM_BUILTIN_CLIP_PLANE_IDX(sysval);
3303          int comp  = BRW_PARAM_BUILTIN_CLIP_PLANE_COMP(sysval);
3304          value = fui(ice->state.clip_planes.ucp[plane][comp]);
3305       } else if (sysval == BRW_PARAM_BUILTIN_PATCH_VERTICES_IN) {
3306          if (stage == MESA_SHADER_TESS_CTRL) {
3307             value = ice->state.vertices_per_patch;
3308          } else {
3309             assert(stage == MESA_SHADER_TESS_EVAL);
3310             const struct shader_info *tcs_info =
3311                iris_get_shader_info(ice, MESA_SHADER_TESS_CTRL);
3312             if (tcs_info)
3313                value = tcs_info->tess.tcs_vertices_out;
3314             else
3315                value = ice->state.vertices_per_patch;
3316          }
3317       } else if (sysval >= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X &&
3318                  sysval <= BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_W) {
3319          unsigned i = sysval - BRW_PARAM_BUILTIN_TESS_LEVEL_OUTER_X;
3320          value = fui(ice->state.default_outer_level[i]);
3321       } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_X) {
3322          value = fui(ice->state.default_inner_level[0]);
3323       } else if (sysval == BRW_PARAM_BUILTIN_TESS_LEVEL_INNER_Y) {
3324          value = fui(ice->state.default_inner_level[1]);
3325       } else if (sysval >= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X &&
3326                  sysval <= BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_Z) {
3327          unsigned i = sysval - BRW_PARAM_BUILTIN_WORK_GROUP_SIZE_X;
3328          value = ice->state.last_block[i];
3329       } else if (sysval == BRW_PARAM_BUILTIN_WORK_DIM) {
3330          value = grid->work_dim;
3331       } else {
3332          assert(!"unhandled system value");
3333       }
3334 
3335       *sysval_map++ = value;
3336    }
3337 
3338    cbuf->buffer_size = upload_size;
3339    iris_upload_ubo_ssbo_surf_state(ice, cbuf,
3340                                    &shs->constbuf_surf_state[sysval_cbuf_index],
3341                                    ISL_SURF_USAGE_CONSTANT_BUFFER_BIT);
3342 
3343    shs->sysvals_need_upload = false;
3344 }
3345 
3346 /**
3347  * The pipe->set_shader_buffers() driver hook.
3348  *
3349  * This binds SSBOs and ABOs.  Unfortunately, we need to stream out
3350  * SURFACE_STATE here, as the buffer offset may change each time.
3351  */
3352 static void
iris_set_shader_buffers(struct pipe_context * ctx,enum pipe_shader_type p_stage,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers,unsigned writable_bitmask)3353 iris_set_shader_buffers(struct pipe_context *ctx,
3354                         enum pipe_shader_type p_stage,
3355                         unsigned start_slot, unsigned count,
3356                         const struct pipe_shader_buffer *buffers,
3357                         unsigned writable_bitmask)
3358 {
3359    struct iris_context *ice = (struct iris_context *) ctx;
3360    gl_shader_stage stage = stage_from_pipe(p_stage);
3361    struct iris_shader_state *shs = &ice->state.shaders[stage];
3362 
3363    unsigned modified_bits = u_bit_consecutive(start_slot, count);
3364 
3365    shs->bound_ssbos &= ~modified_bits;
3366    shs->writable_ssbos &= ~modified_bits;
3367    shs->writable_ssbos |= writable_bitmask << start_slot;
3368 
3369    for (unsigned i = 0; i < count; i++) {
3370       if (buffers && buffers[i].buffer) {
3371          struct iris_resource *res = (void *) buffers[i].buffer;
3372          struct pipe_shader_buffer *ssbo = &shs->ssbo[start_slot + i];
3373          struct iris_state_ref *surf_state =
3374             &shs->ssbo_surf_state[start_slot + i];
3375          pipe_resource_reference(&ssbo->buffer, &res->base);
3376          ssbo->buffer_offset = buffers[i].buffer_offset;
3377          ssbo->buffer_size =
3378             MIN2(buffers[i].buffer_size, res->bo->size - ssbo->buffer_offset);
3379 
3380          shs->bound_ssbos |= 1 << (start_slot + i);
3381 
3382          isl_surf_usage_flags_t usage = ISL_SURF_USAGE_STORAGE_BIT;
3383 
3384          iris_upload_ubo_ssbo_surf_state(ice, ssbo, surf_state, usage);
3385 
3386          res->bind_history |= PIPE_BIND_SHADER_BUFFER;
3387          res->bind_stages |= 1 << stage;
3388 
3389          util_range_add(&res->base, &res->valid_buffer_range, ssbo->buffer_offset,
3390                         ssbo->buffer_offset + ssbo->buffer_size);
3391       } else {
3392          pipe_resource_reference(&shs->ssbo[start_slot + i].buffer, NULL);
3393          pipe_resource_reference(&shs->ssbo_surf_state[start_slot + i].res,
3394                                  NULL);
3395       }
3396    }
3397 
3398    ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << stage;
3399 }
3400 
3401 static void
iris_delete_state(struct pipe_context * ctx,void * state)3402 iris_delete_state(struct pipe_context *ctx, void *state)
3403 {
3404    free(state);
3405 }
3406 
3407 /**
3408  * The pipe->set_vertex_buffers() driver hook.
3409  *
3410  * This translates pipe_vertex_buffer to our 3DSTATE_VERTEX_BUFFERS packet.
3411  */
3412 static void
iris_set_vertex_buffers(struct pipe_context * ctx,unsigned start_slot,unsigned count,const struct pipe_vertex_buffer * buffers)3413 iris_set_vertex_buffers(struct pipe_context *ctx,
3414                         unsigned start_slot, unsigned count,
3415                         const struct pipe_vertex_buffer *buffers)
3416 {
3417    struct iris_context *ice = (struct iris_context *) ctx;
3418    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3419    struct iris_genx_state *genx = ice->state.genx;
3420 
3421    ice->state.bound_vertex_buffers &= ~u_bit_consecutive64(start_slot, count);
3422 
3423    for (unsigned i = 0; i < count; i++) {
3424       const struct pipe_vertex_buffer *buffer = buffers ? &buffers[i] : NULL;
3425       struct iris_vertex_buffer_state *state =
3426          &genx->vertex_buffers[start_slot + i];
3427 
3428       if (!buffer) {
3429          pipe_resource_reference(&state->resource, NULL);
3430          continue;
3431       }
3432 
3433       /* We may see user buffers that are NULL bindings. */
3434       assert(!(buffer->is_user_buffer && buffer->buffer.user != NULL));
3435 
3436       pipe_resource_reference(&state->resource, buffer->buffer.resource);
3437       struct iris_resource *res = (void *) state->resource;
3438 
3439       state->offset = (int) buffer->buffer_offset;
3440 
3441       if (res) {
3442          ice->state.bound_vertex_buffers |= 1ull << (start_slot + i);
3443          res->bind_history |= PIPE_BIND_VERTEX_BUFFER;
3444       }
3445 
3446       iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
3447          vb.VertexBufferIndex = start_slot + i;
3448          vb.AddressModifyEnable = true;
3449          vb.BufferPitch = buffer->stride;
3450          if (res) {
3451             vb.BufferSize = res->base.width0 - (int) buffer->buffer_offset;
3452             vb.BufferStartingAddress =
3453                ro_bo(NULL, res->bo->gtt_offset + (int) buffer->buffer_offset);
3454             vb.MOCS = iris_mocs(res->bo, &screen->isl_dev,
3455                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
3456          } else {
3457             vb.NullVertexBuffer = true;
3458          }
3459       }
3460    }
3461 
3462    ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
3463 }
3464 
3465 /**
3466  * Gallium CSO for vertex elements.
3467  */
3468 struct iris_vertex_element_state {
3469    uint32_t vertex_elements[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
3470    uint32_t vf_instancing[33 * GENX(3DSTATE_VF_INSTANCING_length)];
3471    uint32_t edgeflag_ve[GENX(VERTEX_ELEMENT_STATE_length)];
3472    uint32_t edgeflag_vfi[GENX(3DSTATE_VF_INSTANCING_length)];
3473    unsigned count;
3474 };
3475 
3476 /**
3477  * The pipe->create_vertex_elements() driver hook.
3478  *
3479  * This translates pipe_vertex_element to our 3DSTATE_VERTEX_ELEMENTS
3480  * and 3DSTATE_VF_INSTANCING commands. The vertex_elements and vf_instancing
3481  * arrays are ready to be emitted at draw time if no EdgeFlag or SGVs are
3482  * needed. In these cases we will need information available at draw time.
3483  * We setup edgeflag_ve and edgeflag_vfi as alternatives last
3484  * 3DSTATE_VERTEX_ELEMENT and 3DSTATE_VF_INSTANCING that can be used at
3485  * draw time if we detect that EdgeFlag is needed by the Vertex Shader.
3486  */
3487 static void *
iris_create_vertex_elements(struct pipe_context * ctx,unsigned count,const struct pipe_vertex_element * state)3488 iris_create_vertex_elements(struct pipe_context *ctx,
3489                             unsigned count,
3490                             const struct pipe_vertex_element *state)
3491 {
3492    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3493    const struct gen_device_info *devinfo = &screen->devinfo;
3494    struct iris_vertex_element_state *cso =
3495       malloc(sizeof(struct iris_vertex_element_state));
3496 
3497    cso->count = count;
3498 
3499    iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS), cso->vertex_elements, ve) {
3500       ve.DWordLength =
3501          1 + GENX(VERTEX_ELEMENT_STATE_length) * MAX2(count, 1) - 2;
3502    }
3503 
3504    uint32_t *ve_pack_dest = &cso->vertex_elements[1];
3505    uint32_t *vfi_pack_dest = cso->vf_instancing;
3506 
3507    if (count == 0) {
3508       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3509          ve.Valid = true;
3510          ve.SourceElementFormat = ISL_FORMAT_R32G32B32A32_FLOAT;
3511          ve.Component0Control = VFCOMP_STORE_0;
3512          ve.Component1Control = VFCOMP_STORE_0;
3513          ve.Component2Control = VFCOMP_STORE_0;
3514          ve.Component3Control = VFCOMP_STORE_1_FP;
3515       }
3516 
3517       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3518       }
3519    }
3520 
3521    for (int i = 0; i < count; i++) {
3522       const struct iris_format_info fmt =
3523          iris_format_for_usage(devinfo, state[i].src_format, 0);
3524       unsigned comp[4] = { VFCOMP_STORE_SRC, VFCOMP_STORE_SRC,
3525                            VFCOMP_STORE_SRC, VFCOMP_STORE_SRC };
3526 
3527       switch (isl_format_get_num_channels(fmt.fmt)) {
3528       case 0: comp[0] = VFCOMP_STORE_0; /* fallthrough */
3529       case 1: comp[1] = VFCOMP_STORE_0; /* fallthrough */
3530       case 2: comp[2] = VFCOMP_STORE_0; /* fallthrough */
3531       case 3:
3532          comp[3] = isl_format_has_int_channel(fmt.fmt) ? VFCOMP_STORE_1_INT
3533                                                        : VFCOMP_STORE_1_FP;
3534          break;
3535       }
3536       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
3537          ve.EdgeFlagEnable = false;
3538          ve.VertexBufferIndex = state[i].vertex_buffer_index;
3539          ve.Valid = true;
3540          ve.SourceElementOffset = state[i].src_offset;
3541          ve.SourceElementFormat = fmt.fmt;
3542          ve.Component0Control = comp[0];
3543          ve.Component1Control = comp[1];
3544          ve.Component2Control = comp[2];
3545          ve.Component3Control = comp[3];
3546       }
3547 
3548       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
3549          vi.VertexElementIndex = i;
3550          vi.InstancingEnable = state[i].instance_divisor > 0;
3551          vi.InstanceDataStepRate = state[i].instance_divisor;
3552       }
3553 
3554       ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
3555       vfi_pack_dest += GENX(3DSTATE_VF_INSTANCING_length);
3556    }
3557 
3558    /* An alternative version of the last VE and VFI is stored so it
3559     * can be used at draw time in case Vertex Shader uses EdgeFlag
3560     */
3561    if (count) {
3562       const unsigned edgeflag_index = count - 1;
3563       const struct iris_format_info fmt =
3564          iris_format_for_usage(devinfo, state[edgeflag_index].src_format, 0);
3565       iris_pack_state(GENX(VERTEX_ELEMENT_STATE), cso->edgeflag_ve, ve) {
3566          ve.EdgeFlagEnable = true ;
3567          ve.VertexBufferIndex = state[edgeflag_index].vertex_buffer_index;
3568          ve.Valid = true;
3569          ve.SourceElementOffset = state[edgeflag_index].src_offset;
3570          ve.SourceElementFormat = fmt.fmt;
3571          ve.Component0Control = VFCOMP_STORE_SRC;
3572          ve.Component1Control = VFCOMP_STORE_0;
3573          ve.Component2Control = VFCOMP_STORE_0;
3574          ve.Component3Control = VFCOMP_STORE_0;
3575       }
3576       iris_pack_command(GENX(3DSTATE_VF_INSTANCING), cso->edgeflag_vfi, vi) {
3577          /* The vi.VertexElementIndex of the EdgeFlag Vertex Element is filled
3578           * at draw time, as it should change if SGVs are emitted.
3579           */
3580          vi.InstancingEnable = state[edgeflag_index].instance_divisor > 0;
3581          vi.InstanceDataStepRate = state[edgeflag_index].instance_divisor;
3582       }
3583    }
3584 
3585    return cso;
3586 }
3587 
3588 /**
3589  * The pipe->bind_vertex_elements_state() driver hook.
3590  */
3591 static void
iris_bind_vertex_elements_state(struct pipe_context * ctx,void * state)3592 iris_bind_vertex_elements_state(struct pipe_context *ctx, void *state)
3593 {
3594    struct iris_context *ice = (struct iris_context *) ctx;
3595    struct iris_vertex_element_state *old_cso = ice->state.cso_vertex_elements;
3596    struct iris_vertex_element_state *new_cso = state;
3597 
3598    /* 3DSTATE_VF_SGVs overrides the last VE, so if the count is changing,
3599     * we need to re-emit it to ensure we're overriding the right one.
3600     */
3601    if (new_cso && cso_changed(count))
3602       ice->state.dirty |= IRIS_DIRTY_VF_SGVS;
3603 
3604    ice->state.cso_vertex_elements = state;
3605    ice->state.dirty |= IRIS_DIRTY_VERTEX_ELEMENTS;
3606 }
3607 
3608 /**
3609  * The pipe->create_stream_output_target() driver hook.
3610  *
3611  * "Target" here refers to a destination buffer.  We translate this into
3612  * a 3DSTATE_SO_BUFFER packet.  We can handle most fields, but don't yet
3613  * know which buffer this represents, or whether we ought to zero the
3614  * write-offsets, or append.  Those are handled in the set() hook.
3615  */
3616 static struct pipe_stream_output_target *
iris_create_stream_output_target(struct pipe_context * ctx,struct pipe_resource * p_res,unsigned buffer_offset,unsigned buffer_size)3617 iris_create_stream_output_target(struct pipe_context *ctx,
3618                                  struct pipe_resource *p_res,
3619                                  unsigned buffer_offset,
3620                                  unsigned buffer_size)
3621 {
3622    struct iris_resource *res = (void *) p_res;
3623    struct iris_stream_output_target *cso = calloc(1, sizeof(*cso));
3624    if (!cso)
3625       return NULL;
3626 
3627    res->bind_history |= PIPE_BIND_STREAM_OUTPUT;
3628 
3629    pipe_reference_init(&cso->base.reference, 1);
3630    pipe_resource_reference(&cso->base.buffer, p_res);
3631    cso->base.buffer_offset = buffer_offset;
3632    cso->base.buffer_size = buffer_size;
3633    cso->base.context = ctx;
3634 
3635    util_range_add(&res->base, &res->valid_buffer_range, buffer_offset,
3636                   buffer_offset + buffer_size);
3637 
3638    upload_state(ctx->stream_uploader, &cso->offset, sizeof(uint32_t), 4);
3639 
3640    return &cso->base;
3641 }
3642 
3643 static void
iris_stream_output_target_destroy(struct pipe_context * ctx,struct pipe_stream_output_target * state)3644 iris_stream_output_target_destroy(struct pipe_context *ctx,
3645                                   struct pipe_stream_output_target *state)
3646 {
3647    struct iris_stream_output_target *cso = (void *) state;
3648 
3649    pipe_resource_reference(&cso->base.buffer, NULL);
3650    pipe_resource_reference(&cso->offset.res, NULL);
3651 
3652    free(cso);
3653 }
3654 
3655 /**
3656  * The pipe->set_stream_output_targets() driver hook.
3657  *
3658  * At this point, we know which targets are bound to a particular index,
3659  * and also whether we want to append or start over.  We can finish the
3660  * 3DSTATE_SO_BUFFER packets we started earlier.
3661  */
3662 static void
iris_set_stream_output_targets(struct pipe_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,const unsigned * offsets)3663 iris_set_stream_output_targets(struct pipe_context *ctx,
3664                                unsigned num_targets,
3665                                struct pipe_stream_output_target **targets,
3666                                const unsigned *offsets)
3667 {
3668    struct iris_context *ice = (struct iris_context *) ctx;
3669    struct iris_genx_state *genx = ice->state.genx;
3670    uint32_t *so_buffers = genx->so_buffers;
3671    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
3672 
3673    const bool active = num_targets > 0;
3674    if (ice->state.streamout_active != active) {
3675       ice->state.streamout_active = active;
3676       ice->state.dirty |= IRIS_DIRTY_STREAMOUT;
3677 
3678       /* We only emit 3DSTATE_SO_DECL_LIST when streamout is active, because
3679        * it's a non-pipelined command.  If we're switching streamout on, we
3680        * may have missed emitting it earlier, so do so now.  (We're already
3681        * taking a stall to update 3DSTATE_SO_BUFFERS anyway...)
3682        */
3683       if (active) {
3684          ice->state.dirty |= IRIS_DIRTY_SO_DECL_LIST;
3685       } else {
3686          uint32_t flush = 0;
3687          for (int i = 0; i < PIPE_MAX_SO_BUFFERS; i++) {
3688             struct iris_stream_output_target *tgt =
3689                (void *) ice->state.so_target[i];
3690             if (tgt) {
3691                struct iris_resource *res = (void *) tgt->base.buffer;
3692 
3693                flush |= iris_flush_bits_for_history(ice, res);
3694                iris_dirty_for_history(ice, res);
3695             }
3696          }
3697          iris_emit_pipe_control_flush(&ice->batches[IRIS_BATCH_RENDER],
3698                                       "make streamout results visible", flush);
3699       }
3700    }
3701 
3702    for (int i = 0; i < 4; i++) {
3703       pipe_so_target_reference(&ice->state.so_target[i],
3704                                i < num_targets ? targets[i] : NULL);
3705    }
3706 
3707    /* No need to update 3DSTATE_SO_BUFFER unless SOL is active. */
3708    if (!active)
3709       return;
3710 
3711    for (unsigned i = 0; i < 4; i++,
3712         so_buffers += GENX(3DSTATE_SO_BUFFER_length)) {
3713 
3714       struct iris_stream_output_target *tgt = (void *) ice->state.so_target[i];
3715       unsigned offset = offsets[i];
3716 
3717       if (!tgt) {
3718          iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3719 #if GEN_GEN < 12
3720             sob.SOBufferIndex = i;
3721 #else
3722             sob._3DCommandOpcode = 0;
3723             sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3724 #endif
3725          }
3726          continue;
3727       }
3728 
3729       struct iris_resource *res = (void *) tgt->base.buffer;
3730 
3731       /* Note that offsets[i] will either be 0, causing us to zero
3732        * the value in the buffer, or 0xFFFFFFFF, which happens to mean
3733        * "continue appending at the existing offset."
3734        */
3735       assert(offset == 0 || offset == 0xFFFFFFFF);
3736 
3737       /* We might be called by Begin (offset = 0), Pause, then Resume
3738        * (offset = 0xFFFFFFFF) before ever drawing (where these commands
3739        * will actually be sent to the GPU).  In this case, we don't want
3740        * to append - we still want to do our initial zeroing.
3741        */
3742       if (!tgt->zeroed)
3743          offset = 0;
3744 
3745       iris_pack_command(GENX(3DSTATE_SO_BUFFER), so_buffers, sob) {
3746 #if GEN_GEN < 12
3747          sob.SOBufferIndex = i;
3748 #else
3749          sob._3DCommandOpcode = 0;
3750          sob._3DCommandSubOpcode = SO_BUFFER_INDEX_0_CMD + i;
3751 #endif
3752          sob.SurfaceBaseAddress =
3753             rw_bo(NULL, res->bo->gtt_offset + tgt->base.buffer_offset,
3754                   IRIS_DOMAIN_OTHER_WRITE);
3755          sob.SOBufferEnable = true;
3756          sob.StreamOffsetWriteEnable = true;
3757          sob.StreamOutputBufferOffsetAddressEnable = true;
3758          sob.MOCS = iris_mocs(res->bo, &screen->isl_dev, 0);
3759 
3760          sob.SurfaceSize = MAX2(tgt->base.buffer_size / 4, 1) - 1;
3761          sob.StreamOffset = offset;
3762          sob.StreamOutputBufferOffsetAddress =
3763             rw_bo(NULL, iris_resource_bo(tgt->offset.res)->gtt_offset +
3764                         tgt->offset.offset, IRIS_DOMAIN_OTHER_WRITE);
3765       }
3766    }
3767 
3768    ice->state.dirty |= IRIS_DIRTY_SO_BUFFERS;
3769 }
3770 
3771 /**
3772  * An iris-vtable helper for encoding the 3DSTATE_SO_DECL_LIST and
3773  * 3DSTATE_STREAMOUT packets.
3774  *
3775  * 3DSTATE_SO_DECL_LIST is a list of shader outputs we want the streamout
3776  * hardware to record.  We can create it entirely based on the shader, with
3777  * no dynamic state dependencies.
3778  *
3779  * 3DSTATE_STREAMOUT is an annoying mix of shader-based information and
3780  * state-based settings.  We capture the shader-related ones here, and merge
3781  * the rest in at draw time.
3782  */
3783 static uint32_t *
iris_create_so_decl_list(const struct pipe_stream_output_info * info,const struct brw_vue_map * vue_map)3784 iris_create_so_decl_list(const struct pipe_stream_output_info *info,
3785                          const struct brw_vue_map *vue_map)
3786 {
3787    struct GENX(SO_DECL) so_decl[MAX_VERTEX_STREAMS][128];
3788    int buffer_mask[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3789    int next_offset[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3790    int decls[MAX_VERTEX_STREAMS] = {0, 0, 0, 0};
3791    int max_decls = 0;
3792    STATIC_ASSERT(ARRAY_SIZE(so_decl[0]) >= MAX_PROGRAM_OUTPUTS);
3793 
3794    memset(so_decl, 0, sizeof(so_decl));
3795 
3796    /* Construct the list of SO_DECLs to be emitted.  The formatting of the
3797     * command feels strange -- each dword pair contains a SO_DECL per stream.
3798     */
3799    for (unsigned i = 0; i < info->num_outputs; i++) {
3800       const struct pipe_stream_output *output = &info->output[i];
3801       const int buffer = output->output_buffer;
3802       const int varying = output->register_index;
3803       const unsigned stream_id = output->stream;
3804       assert(stream_id < MAX_VERTEX_STREAMS);
3805 
3806       buffer_mask[stream_id] |= 1 << buffer;
3807 
3808       assert(vue_map->varying_to_slot[varying] >= 0);
3809 
3810       /* Mesa doesn't store entries for gl_SkipComponents in the Outputs[]
3811        * array.  Instead, it simply increments DstOffset for the following
3812        * input by the number of components that should be skipped.
3813        *
3814        * Our hardware is unusual in that it requires us to program SO_DECLs
3815        * for fake "hole" components, rather than simply taking the offset
3816        * for each real varying.  Each hole can have size 1, 2, 3, or 4; we
3817        * program as many size = 4 holes as we can, then a final hole to
3818        * accommodate the final 1, 2, or 3 remaining.
3819        */
3820       int skip_components = output->dst_offset - next_offset[buffer];
3821 
3822       while (skip_components > 0) {
3823          so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3824             .HoleFlag = 1,
3825             .OutputBufferSlot = output->output_buffer,
3826             .ComponentMask = (1 << MIN2(skip_components, 4)) - 1,
3827          };
3828          skip_components -= 4;
3829       }
3830 
3831       next_offset[buffer] = output->dst_offset + output->num_components;
3832 
3833       so_decl[stream_id][decls[stream_id]++] = (struct GENX(SO_DECL)) {
3834          .OutputBufferSlot = output->output_buffer,
3835          .RegisterIndex = vue_map->varying_to_slot[varying],
3836          .ComponentMask =
3837             ((1 << output->num_components) - 1) << output->start_component,
3838       };
3839 
3840       if (decls[stream_id] > max_decls)
3841          max_decls = decls[stream_id];
3842    }
3843 
3844    unsigned dwords = GENX(3DSTATE_STREAMOUT_length) + (3 + 2 * max_decls);
3845    uint32_t *map = ralloc_size(NULL, sizeof(uint32_t) * dwords);
3846    uint32_t *so_decl_map = map + GENX(3DSTATE_STREAMOUT_length);
3847 
3848    iris_pack_command(GENX(3DSTATE_STREAMOUT), map, sol) {
3849       int urb_entry_read_offset = 0;
3850       int urb_entry_read_length = (vue_map->num_slots + 1) / 2 -
3851          urb_entry_read_offset;
3852 
3853       /* We always read the whole vertex.  This could be reduced at some
3854        * point by reading less and offsetting the register index in the
3855        * SO_DECLs.
3856        */
3857       sol.Stream0VertexReadOffset = urb_entry_read_offset;
3858       sol.Stream0VertexReadLength = urb_entry_read_length - 1;
3859       sol.Stream1VertexReadOffset = urb_entry_read_offset;
3860       sol.Stream1VertexReadLength = urb_entry_read_length - 1;
3861       sol.Stream2VertexReadOffset = urb_entry_read_offset;
3862       sol.Stream2VertexReadLength = urb_entry_read_length - 1;
3863       sol.Stream3VertexReadOffset = urb_entry_read_offset;
3864       sol.Stream3VertexReadLength = urb_entry_read_length - 1;
3865 
3866       /* Set buffer pitches; 0 means unbound. */
3867       sol.Buffer0SurfacePitch = 4 * info->stride[0];
3868       sol.Buffer1SurfacePitch = 4 * info->stride[1];
3869       sol.Buffer2SurfacePitch = 4 * info->stride[2];
3870       sol.Buffer3SurfacePitch = 4 * info->stride[3];
3871    }
3872 
3873    iris_pack_command(GENX(3DSTATE_SO_DECL_LIST), so_decl_map, list) {
3874       list.DWordLength = 3 + 2 * max_decls - 2;
3875       list.StreamtoBufferSelects0 = buffer_mask[0];
3876       list.StreamtoBufferSelects1 = buffer_mask[1];
3877       list.StreamtoBufferSelects2 = buffer_mask[2];
3878       list.StreamtoBufferSelects3 = buffer_mask[3];
3879       list.NumEntries0 = decls[0];
3880       list.NumEntries1 = decls[1];
3881       list.NumEntries2 = decls[2];
3882       list.NumEntries3 = decls[3];
3883    }
3884 
3885    for (int i = 0; i < max_decls; i++) {
3886       iris_pack_state(GENX(SO_DECL_ENTRY), so_decl_map + 3 + i * 2, entry) {
3887          entry.Stream0Decl = so_decl[0][i];
3888          entry.Stream1Decl = so_decl[1][i];
3889          entry.Stream2Decl = so_decl[2][i];
3890          entry.Stream3Decl = so_decl[3][i];
3891       }
3892    }
3893 
3894    return map;
3895 }
3896 
3897 static void
iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,const struct brw_vue_map * last_vue_map,bool two_sided_color,unsigned * out_offset,unsigned * out_length)3898 iris_compute_sbe_urb_read_interval(uint64_t fs_input_slots,
3899                                    const struct brw_vue_map *last_vue_map,
3900                                    bool two_sided_color,
3901                                    unsigned *out_offset,
3902                                    unsigned *out_length)
3903 {
3904    /* The compiler computes the first URB slot without considering COL/BFC
3905     * swizzling (because it doesn't know whether it's enabled), so we need
3906     * to do that here too.  This may result in a smaller offset, which
3907     * should be safe.
3908     */
3909    const unsigned first_slot =
3910       brw_compute_first_urb_slot_required(fs_input_slots, last_vue_map);
3911 
3912    /* This becomes the URB read offset (counted in pairs of slots). */
3913    assert(first_slot % 2 == 0);
3914    *out_offset = first_slot / 2;
3915 
3916    /* We need to adjust the inputs read to account for front/back color
3917     * swizzling, as it can make the URB length longer.
3918     */
3919    for (int c = 0; c <= 1; c++) {
3920       if (fs_input_slots & (VARYING_BIT_COL0 << c)) {
3921          /* If two sided color is enabled, the fragment shader's gl_Color
3922           * (COL0) input comes from either the gl_FrontColor (COL0) or
3923           * gl_BackColor (BFC0) input varyings.  Mark BFC as used, too.
3924           */
3925          if (two_sided_color)
3926             fs_input_slots |= (VARYING_BIT_BFC0 << c);
3927 
3928          /* If front color isn't written, we opt to give them back color
3929           * instead of an undefined value.  Switch from COL to BFC.
3930           */
3931          if (last_vue_map->varying_to_slot[VARYING_SLOT_COL0 + c] == -1) {
3932             fs_input_slots &= ~(VARYING_BIT_COL0 << c);
3933             fs_input_slots |= (VARYING_BIT_BFC0 << c);
3934          }
3935       }
3936    }
3937 
3938    /* Compute the minimum URB Read Length necessary for the FS inputs.
3939     *
3940     * From the Sandy Bridge PRM, Volume 2, Part 1, documentation for
3941     * 3DSTATE_SF DWord 1 bits 15:11, "Vertex URB Entry Read Length":
3942     *
3943     * "This field should be set to the minimum length required to read the
3944     *  maximum source attribute.  The maximum source attribute is indicated
3945     *  by the maximum value of the enabled Attribute # Source Attribute if
3946     *  Attribute Swizzle Enable is set, Number of Output Attributes-1 if
3947     *  enable is not set.
3948     *  read_length = ceiling((max_source_attr + 1) / 2)
3949     *
3950     *  [errata] Corruption/Hang possible if length programmed larger than
3951     *  recommended"
3952     *
3953     * Similar text exists for Ivy Bridge.
3954     *
3955     * We find the last URB slot that's actually read by the FS.
3956     */
3957    unsigned last_read_slot = last_vue_map->num_slots - 1;
3958    while (last_read_slot > first_slot && !(fs_input_slots &
3959           (1ull << last_vue_map->slot_to_varying[last_read_slot])))
3960       --last_read_slot;
3961 
3962    /* The URB read length is the difference of the two, counted in pairs. */
3963    *out_length = DIV_ROUND_UP(last_read_slot - first_slot + 1, 2);
3964 }
3965 
3966 static void
iris_emit_sbe_swiz(struct iris_batch * batch,const struct iris_context * ice,unsigned urb_read_offset,unsigned sprite_coord_enables)3967 iris_emit_sbe_swiz(struct iris_batch *batch,
3968                    const struct iris_context *ice,
3969                    unsigned urb_read_offset,
3970                    unsigned sprite_coord_enables)
3971 {
3972    struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) attr_overrides[16] = {};
3973    const struct brw_wm_prog_data *wm_prog_data = (void *)
3974       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
3975    const struct brw_vue_map *vue_map = ice->shaders.last_vue_map;
3976    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
3977 
3978    /* XXX: this should be generated when putting programs in place */
3979 
3980    for (uint8_t idx = 0; idx < wm_prog_data->urb_setup_attribs_count; idx++) {
3981       const uint8_t fs_attr = wm_prog_data->urb_setup_attribs[idx];
3982       const int input_index = wm_prog_data->urb_setup[fs_attr];
3983       if (input_index < 0 || input_index >= 16)
3984          continue;
3985 
3986       struct GENX(SF_OUTPUT_ATTRIBUTE_DETAIL) *attr =
3987          &attr_overrides[input_index];
3988       int slot = vue_map->varying_to_slot[fs_attr];
3989 
3990       /* Viewport and Layer are stored in the VUE header.  We need to override
3991        * them to zero if earlier stages didn't write them, as GL requires that
3992        * they read back as zero when not explicitly set.
3993        */
3994       switch (fs_attr) {
3995       case VARYING_SLOT_VIEWPORT:
3996       case VARYING_SLOT_LAYER:
3997          attr->ComponentOverrideX = true;
3998          attr->ComponentOverrideW = true;
3999          attr->ConstantSource = CONST_0000;
4000 
4001          if (!(vue_map->slots_valid & VARYING_BIT_LAYER))
4002             attr->ComponentOverrideY = true;
4003          if (!(vue_map->slots_valid & VARYING_BIT_VIEWPORT))
4004             attr->ComponentOverrideZ = true;
4005          continue;
4006 
4007       case VARYING_SLOT_PRIMITIVE_ID:
4008          /* Override if the previous shader stage didn't write gl_PrimitiveID. */
4009          if (slot == -1) {
4010             attr->ComponentOverrideX = true;
4011             attr->ComponentOverrideY = true;
4012             attr->ComponentOverrideZ = true;
4013             attr->ComponentOverrideW = true;
4014             attr->ConstantSource = PRIM_ID;
4015             continue;
4016          }
4017 
4018       default:
4019          break;
4020       }
4021 
4022       if (sprite_coord_enables & (1 << input_index))
4023          continue;
4024 
4025       /* If there was only a back color written but not front, use back
4026        * as the color instead of undefined.
4027        */
4028       if (slot == -1 && fs_attr == VARYING_SLOT_COL0)
4029          slot = vue_map->varying_to_slot[VARYING_SLOT_BFC0];
4030       if (slot == -1 && fs_attr == VARYING_SLOT_COL1)
4031          slot = vue_map->varying_to_slot[VARYING_SLOT_BFC1];
4032 
4033       /* Not written by the previous stage - undefined. */
4034       if (slot == -1) {
4035          attr->ComponentOverrideX = true;
4036          attr->ComponentOverrideY = true;
4037          attr->ComponentOverrideZ = true;
4038          attr->ComponentOverrideW = true;
4039          attr->ConstantSource = CONST_0001_FLOAT;
4040          continue;
4041       }
4042 
4043       /* Compute the location of the attribute relative to the read offset,
4044        * which is counted in 256-bit increments (two 128-bit VUE slots).
4045        */
4046       const int source_attr = slot - 2 * urb_read_offset;
4047       assert(source_attr >= 0 && source_attr <= 32);
4048       attr->SourceAttribute = source_attr;
4049 
4050       /* If we are doing two-sided color, and the VUE slot following this one
4051        * represents a back-facing color, then we need to instruct the SF unit
4052        * to do back-facing swizzling.
4053        */
4054       if (cso_rast->light_twoside &&
4055           ((vue_map->slot_to_varying[slot] == VARYING_SLOT_COL0 &&
4056             vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC0) ||
4057            (vue_map->slot_to_varying[slot] == VARYING_SLOT_COL1 &&
4058             vue_map->slot_to_varying[slot+1] == VARYING_SLOT_BFC1)))
4059          attr->SwizzleSelect = INPUTATTR_FACING;
4060    }
4061 
4062    iris_emit_cmd(batch, GENX(3DSTATE_SBE_SWIZ), sbes) {
4063       for (int i = 0; i < 16; i++)
4064          sbes.Attribute[i] = attr_overrides[i];
4065    }
4066 }
4067 
4068 static bool
iris_is_drawing_points(const struct iris_context * ice)4069 iris_is_drawing_points(const struct iris_context *ice)
4070 {
4071    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4072 
4073    if (cso_rast->fill_mode_point) {
4074       return true;
4075    }
4076 
4077    if (ice->shaders.prog[MESA_SHADER_GEOMETRY]) {
4078       const struct brw_gs_prog_data *gs_prog_data =
4079          (void *) ice->shaders.prog[MESA_SHADER_GEOMETRY]->prog_data;
4080       return gs_prog_data->output_topology == _3DPRIM_POINTLIST;
4081    } else if (ice->shaders.prog[MESA_SHADER_TESS_EVAL]) {
4082       const struct brw_tes_prog_data *tes_data =
4083          (void *) ice->shaders.prog[MESA_SHADER_TESS_EVAL]->prog_data;
4084       return tes_data->output_topology == BRW_TESS_OUTPUT_TOPOLOGY_POINT;
4085    } else {
4086       return ice->state.prim_mode == PIPE_PRIM_POINTS;
4087    }
4088 }
4089 
4090 static unsigned
iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data * prog_data,const struct iris_rasterizer_state * cso)4091 iris_calculate_point_sprite_overrides(const struct brw_wm_prog_data *prog_data,
4092                                       const struct iris_rasterizer_state *cso)
4093 {
4094    unsigned overrides = 0;
4095 
4096    if (prog_data->urb_setup[VARYING_SLOT_PNTC] != -1)
4097       overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_PNTC];
4098 
4099    for (int i = 0; i < 8; i++) {
4100       if ((cso->sprite_coord_enable & (1 << i)) &&
4101           prog_data->urb_setup[VARYING_SLOT_TEX0 + i] != -1)
4102          overrides |= 1 << prog_data->urb_setup[VARYING_SLOT_TEX0 + i];
4103    }
4104 
4105    return overrides;
4106 }
4107 
4108 static void
iris_emit_sbe(struct iris_batch * batch,const struct iris_context * ice)4109 iris_emit_sbe(struct iris_batch *batch, const struct iris_context *ice)
4110 {
4111    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4112    const struct brw_wm_prog_data *wm_prog_data = (void *)
4113       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
4114    const struct shader_info *fs_info =
4115       iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
4116 
4117    unsigned urb_read_offset, urb_read_length;
4118    iris_compute_sbe_urb_read_interval(fs_info->inputs_read,
4119                                       ice->shaders.last_vue_map,
4120                                       cso_rast->light_twoside,
4121                                       &urb_read_offset, &urb_read_length);
4122 
4123    unsigned sprite_coord_overrides =
4124       iris_is_drawing_points(ice) ?
4125       iris_calculate_point_sprite_overrides(wm_prog_data, cso_rast) : 0;
4126 
4127    iris_emit_cmd(batch, GENX(3DSTATE_SBE), sbe) {
4128       sbe.AttributeSwizzleEnable = true;
4129       sbe.NumberofSFOutputAttributes = wm_prog_data->num_varying_inputs;
4130       sbe.PointSpriteTextureCoordinateOrigin = cso_rast->sprite_coord_mode;
4131       sbe.VertexURBEntryReadOffset = urb_read_offset;
4132       sbe.VertexURBEntryReadLength = urb_read_length;
4133       sbe.ForceVertexURBEntryReadOffset = true;
4134       sbe.ForceVertexURBEntryReadLength = true;
4135       sbe.ConstantInterpolationEnable = wm_prog_data->flat_inputs;
4136       sbe.PointSpriteTextureCoordinateEnable = sprite_coord_overrides;
4137 #if GEN_GEN >= 9
4138       for (int i = 0; i < 32; i++) {
4139          sbe.AttributeActiveComponentFormat[i] = ACTIVE_COMPONENT_XYZW;
4140       }
4141 #endif
4142    }
4143 
4144    iris_emit_sbe_swiz(batch, ice, urb_read_offset, sprite_coord_overrides);
4145 }
4146 
4147 /* ------------------------------------------------------------------- */
4148 
4149 /**
4150  * Populate VS program key fields based on the current state.
4151  */
4152 static void
iris_populate_vs_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_vs_prog_key * key)4153 iris_populate_vs_key(const struct iris_context *ice,
4154                      const struct shader_info *info,
4155                      gl_shader_stage last_stage,
4156                      struct iris_vs_prog_key *key)
4157 {
4158    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4159 
4160    if (info->clip_distance_array_size == 0 &&
4161        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4162        last_stage == MESA_SHADER_VERTEX)
4163       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4164 }
4165 
4166 /**
4167  * Populate TCS program key fields based on the current state.
4168  */
4169 static void
iris_populate_tcs_key(const struct iris_context * ice,struct iris_tcs_prog_key * key)4170 iris_populate_tcs_key(const struct iris_context *ice,
4171                       struct iris_tcs_prog_key *key)
4172 {
4173 }
4174 
4175 /**
4176  * Populate TES program key fields based on the current state.
4177  */
4178 static void
iris_populate_tes_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_tes_prog_key * key)4179 iris_populate_tes_key(const struct iris_context *ice,
4180                       const struct shader_info *info,
4181                       gl_shader_stage last_stage,
4182                       struct iris_tes_prog_key *key)
4183 {
4184    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4185 
4186    if (info->clip_distance_array_size == 0 &&
4187        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4188        last_stage == MESA_SHADER_TESS_EVAL)
4189       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4190 }
4191 
4192 /**
4193  * Populate GS program key fields based on the current state.
4194  */
4195 static void
iris_populate_gs_key(const struct iris_context * ice,const struct shader_info * info,gl_shader_stage last_stage,struct iris_gs_prog_key * key)4196 iris_populate_gs_key(const struct iris_context *ice,
4197                      const struct shader_info *info,
4198                      gl_shader_stage last_stage,
4199                      struct iris_gs_prog_key *key)
4200 {
4201    const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
4202 
4203    if (info->clip_distance_array_size == 0 &&
4204        (info->outputs_written & (VARYING_BIT_POS | VARYING_BIT_CLIP_VERTEX)) &&
4205        last_stage == MESA_SHADER_GEOMETRY)
4206       key->vue.nr_userclip_plane_consts = cso_rast->num_clip_plane_consts;
4207 }
4208 
4209 /**
4210  * Populate FS program key fields based on the current state.
4211  */
4212 static void
iris_populate_fs_key(const struct iris_context * ice,const struct shader_info * info,struct iris_fs_prog_key * key)4213 iris_populate_fs_key(const struct iris_context *ice,
4214                      const struct shader_info *info,
4215                      struct iris_fs_prog_key *key)
4216 {
4217    struct iris_screen *screen = (void *) ice->ctx.screen;
4218    const struct pipe_framebuffer_state *fb = &ice->state.framebuffer;
4219    const struct iris_depth_stencil_alpha_state *zsa = ice->state.cso_zsa;
4220    const struct iris_rasterizer_state *rast = ice->state.cso_rast;
4221    const struct iris_blend_state *blend = ice->state.cso_blend;
4222 
4223    key->nr_color_regions = fb->nr_cbufs;
4224 
4225    key->clamp_fragment_color = rast->clamp_fragment_color;
4226 
4227    key->alpha_to_coverage = blend->alpha_to_coverage;
4228 
4229    key->alpha_test_replicate_alpha = fb->nr_cbufs > 1 && zsa->alpha.enabled;
4230 
4231    key->flat_shade = rast->flatshade &&
4232       (info->inputs_read & (VARYING_BIT_COL0 | VARYING_BIT_COL1));
4233 
4234    key->persample_interp = rast->force_persample_interp;
4235    key->multisample_fbo = rast->multisample && fb->samples > 1;
4236 
4237    key->coherent_fb_fetch = GEN_GEN >= 9;
4238 
4239    key->force_dual_color_blend =
4240       screen->driconf.dual_color_blend_by_location &&
4241       (blend->blend_enables & 1) && blend->dual_color_blending;
4242 
4243    /* TODO: Respect glHint for key->high_quality_derivatives */
4244 }
4245 
4246 static void
iris_populate_cs_key(const struct iris_context * ice,struct iris_cs_prog_key * key)4247 iris_populate_cs_key(const struct iris_context *ice,
4248                      struct iris_cs_prog_key *key)
4249 {
4250 }
4251 
4252 static uint64_t
KSP(const struct iris_compiled_shader * shader)4253 KSP(const struct iris_compiled_shader *shader)
4254 {
4255    struct iris_resource *res = (void *) shader->assembly.res;
4256    return iris_bo_offset_from_base_address(res->bo) + shader->assembly.offset;
4257 }
4258 
4259 #define INIT_THREAD_DISPATCH_FIELDS(pkt, prefix, stage)                   \
4260    pkt.KernelStartPointer = KSP(shader);                                  \
4261    pkt.BindingTableEntryCount = shader->bt.size_bytes / 4;                \
4262    pkt.FloatingPointMode = prog_data->use_alt_mode;                       \
4263                                                                           \
4264    pkt.DispatchGRFStartRegisterForURBData =                               \
4265       prog_data->dispatch_grf_start_reg;                                  \
4266    pkt.prefix##URBEntryReadLength = vue_prog_data->urb_read_length;       \
4267    pkt.prefix##URBEntryReadOffset = 0;                                    \
4268                                                                           \
4269    pkt.StatisticsEnable = true;                                           \
4270    pkt.Enable           = true;                                           \
4271                                                                           \
4272    if (prog_data->total_scratch) {                                        \
4273       struct iris_bo *bo =                                                \
4274          iris_get_scratch_space(ice, prog_data->total_scratch, stage);    \
4275       uint32_t scratch_addr = bo->gtt_offset;                             \
4276       pkt.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;     \
4277       pkt.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr,             \
4278                                           IRIS_DOMAIN_NONE);              \
4279    }
4280 
4281 /**
4282  * Encode most of 3DSTATE_VS based on the compiled shader.
4283  */
4284 static void
iris_store_vs_state(struct iris_context * ice,const struct gen_device_info * devinfo,struct iris_compiled_shader * shader)4285 iris_store_vs_state(struct iris_context *ice,
4286                     const struct gen_device_info *devinfo,
4287                     struct iris_compiled_shader *shader)
4288 {
4289    struct brw_stage_prog_data *prog_data = shader->prog_data;
4290    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4291 
4292    iris_pack_command(GENX(3DSTATE_VS), shader->derived_data, vs) {
4293       INIT_THREAD_DISPATCH_FIELDS(vs, Vertex, MESA_SHADER_VERTEX);
4294       vs.MaximumNumberofThreads = devinfo->max_vs_threads - 1;
4295       vs.SIMD8DispatchEnable = true;
4296       vs.UserClipDistanceCullTestEnableBitmask =
4297          vue_prog_data->cull_distance_mask;
4298    }
4299 }
4300 
4301 /**
4302  * Encode most of 3DSTATE_HS based on the compiled shader.
4303  */
4304 static void
iris_store_tcs_state(struct iris_context * ice,const struct gen_device_info * devinfo,struct iris_compiled_shader * shader)4305 iris_store_tcs_state(struct iris_context *ice,
4306                      const struct gen_device_info *devinfo,
4307                      struct iris_compiled_shader *shader)
4308 {
4309    struct brw_stage_prog_data *prog_data = shader->prog_data;
4310    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4311    struct brw_tcs_prog_data *tcs_prog_data = (void *) prog_data;
4312 
4313    iris_pack_command(GENX(3DSTATE_HS), shader->derived_data, hs) {
4314       INIT_THREAD_DISPATCH_FIELDS(hs, Vertex, MESA_SHADER_TESS_CTRL);
4315 
4316 #if GEN_GEN >= 12
4317       /* GEN:BUG:1604578095:
4318        *
4319        *    Hang occurs when the number of max threads is less than 2 times
4320        *    the number of instance count. The number of max threads must be
4321        *    more than 2 times the number of instance count.
4322        */
4323       assert((devinfo->max_tcs_threads / 2) > tcs_prog_data->instances);
4324       hs.DispatchGRFStartRegisterForURBData = prog_data->dispatch_grf_start_reg & 0x1f;
4325       hs.DispatchGRFStartRegisterForURBData5 = prog_data->dispatch_grf_start_reg >> 5;
4326 #endif
4327 
4328       hs.InstanceCount = tcs_prog_data->instances - 1;
4329       hs.MaximumNumberofThreads = devinfo->max_tcs_threads - 1;
4330       hs.IncludeVertexHandles = true;
4331 
4332 #if GEN_GEN == 12
4333       /* Patch Count threshold specifies the maximum number of patches that
4334        * will be accumulated before a thread dispatch is forced.
4335        */
4336       hs.PatchCountThreshold = tcs_prog_data->patch_count_threshold;
4337 #endif
4338 
4339 #if GEN_GEN >= 9
4340       hs.DispatchMode = vue_prog_data->dispatch_mode;
4341       hs.IncludePrimitiveID = tcs_prog_data->include_primitive_id;
4342 #endif
4343    }
4344 }
4345 
4346 /**
4347  * Encode 3DSTATE_TE and most of 3DSTATE_DS based on the compiled shader.
4348  */
4349 static void
iris_store_tes_state(struct iris_context * ice,const struct gen_device_info * devinfo,struct iris_compiled_shader * shader)4350 iris_store_tes_state(struct iris_context *ice,
4351                      const struct gen_device_info *devinfo,
4352                      struct iris_compiled_shader *shader)
4353 {
4354    struct brw_stage_prog_data *prog_data = shader->prog_data;
4355    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4356    struct brw_tes_prog_data *tes_prog_data = (void *) prog_data;
4357 
4358    uint32_t *te_state = (void *) shader->derived_data;
4359    uint32_t *ds_state = te_state + GENX(3DSTATE_TE_length);
4360 
4361    iris_pack_command(GENX(3DSTATE_TE), te_state, te) {
4362       te.Partitioning = tes_prog_data->partitioning;
4363       te.OutputTopology = tes_prog_data->output_topology;
4364       te.TEDomain = tes_prog_data->domain;
4365       te.TEEnable = true;
4366       te.MaximumTessellationFactorOdd = 63.0;
4367       te.MaximumTessellationFactorNotOdd = 64.0;
4368    }
4369 
4370    iris_pack_command(GENX(3DSTATE_DS), ds_state, ds) {
4371       INIT_THREAD_DISPATCH_FIELDS(ds, Patch, MESA_SHADER_TESS_EVAL);
4372 
4373       ds.DispatchMode = DISPATCH_MODE_SIMD8_SINGLE_PATCH;
4374       ds.MaximumNumberofThreads = devinfo->max_tes_threads - 1;
4375       ds.ComputeWCoordinateEnable =
4376          tes_prog_data->domain == BRW_TESS_DOMAIN_TRI;
4377 
4378       ds.UserClipDistanceCullTestEnableBitmask =
4379          vue_prog_data->cull_distance_mask;
4380    }
4381 
4382 }
4383 
4384 /**
4385  * Encode most of 3DSTATE_GS based on the compiled shader.
4386  */
4387 static void
iris_store_gs_state(struct iris_context * ice,const struct gen_device_info * devinfo,struct iris_compiled_shader * shader)4388 iris_store_gs_state(struct iris_context *ice,
4389                     const struct gen_device_info *devinfo,
4390                     struct iris_compiled_shader *shader)
4391 {
4392    struct brw_stage_prog_data *prog_data = shader->prog_data;
4393    struct brw_vue_prog_data *vue_prog_data = (void *) prog_data;
4394    struct brw_gs_prog_data *gs_prog_data = (void *) prog_data;
4395 
4396    iris_pack_command(GENX(3DSTATE_GS), shader->derived_data, gs) {
4397       INIT_THREAD_DISPATCH_FIELDS(gs, Vertex, MESA_SHADER_GEOMETRY);
4398 
4399       gs.OutputVertexSize = gs_prog_data->output_vertex_size_hwords * 2 - 1;
4400       gs.OutputTopology = gs_prog_data->output_topology;
4401       gs.ControlDataHeaderSize =
4402          gs_prog_data->control_data_header_size_hwords;
4403       gs.InstanceControl = gs_prog_data->invocations - 1;
4404       gs.DispatchMode = DISPATCH_MODE_SIMD8;
4405       gs.IncludePrimitiveID = gs_prog_data->include_primitive_id;
4406       gs.ControlDataFormat = gs_prog_data->control_data_format;
4407       gs.ReorderMode = TRAILING;
4408       gs.ExpectedVertexCount = gs_prog_data->vertices_in;
4409       gs.MaximumNumberofThreads =
4410          GEN_GEN == 8 ? (devinfo->max_gs_threads / 2 - 1)
4411                       : (devinfo->max_gs_threads - 1);
4412 
4413       if (gs_prog_data->static_vertex_count != -1) {
4414          gs.StaticOutput = true;
4415          gs.StaticOutputVertexCount = gs_prog_data->static_vertex_count;
4416       }
4417       gs.IncludeVertexHandles = vue_prog_data->include_vue_handles;
4418 
4419       gs.UserClipDistanceCullTestEnableBitmask =
4420          vue_prog_data->cull_distance_mask;
4421 
4422       const int urb_entry_write_offset = 1;
4423       const uint32_t urb_entry_output_length =
4424          DIV_ROUND_UP(vue_prog_data->vue_map.num_slots, 2) -
4425          urb_entry_write_offset;
4426 
4427       gs.VertexURBEntryOutputReadOffset = urb_entry_write_offset;
4428       gs.VertexURBEntryOutputLength = MAX2(urb_entry_output_length, 1);
4429    }
4430 }
4431 
4432 /**
4433  * Encode most of 3DSTATE_PS and 3DSTATE_PS_EXTRA based on the shader.
4434  */
4435 static void
iris_store_fs_state(struct iris_context * ice,const struct gen_device_info * devinfo,struct iris_compiled_shader * shader)4436 iris_store_fs_state(struct iris_context *ice,
4437                     const struct gen_device_info *devinfo,
4438                     struct iris_compiled_shader *shader)
4439 {
4440    struct brw_stage_prog_data *prog_data = shader->prog_data;
4441    struct brw_wm_prog_data *wm_prog_data = (void *) shader->prog_data;
4442 
4443    uint32_t *ps_state = (void *) shader->derived_data;
4444    uint32_t *psx_state = ps_state + GENX(3DSTATE_PS_length);
4445 
4446    iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
4447       ps.VectorMaskEnable = true;
4448       ps.BindingTableEntryCount = shader->bt.size_bytes / 4;
4449       ps.FloatingPointMode = prog_data->use_alt_mode;
4450       ps.MaximumNumberofThreadsPerPSD = 64 - (GEN_GEN == 8 ? 2 : 1);
4451 
4452       ps.PushConstantEnable = prog_data->ubo_ranges[0].length > 0;
4453 
4454       /* From the documentation for this packet:
4455        * "If the PS kernel does not need the Position XY Offsets to
4456        *  compute a Position Value, then this field should be programmed
4457        *  to POSOFFSET_NONE."
4458        *
4459        * "SW Recommendation: If the PS kernel needs the Position Offsets
4460        *  to compute a Position XY value, this field should match Position
4461        *  ZW Interpolation Mode to ensure a consistent position.xyzw
4462        *  computation."
4463        *
4464        * We only require XY sample offsets. So, this recommendation doesn't
4465        * look useful at the moment.  We might need this in future.
4466        */
4467       ps.PositionXYOffsetSelect =
4468          wm_prog_data->uses_pos_offset ? POSOFFSET_SAMPLE : POSOFFSET_NONE;
4469 
4470       if (prog_data->total_scratch) {
4471          struct iris_bo *bo =
4472             iris_get_scratch_space(ice, prog_data->total_scratch,
4473                                    MESA_SHADER_FRAGMENT);
4474          uint32_t scratch_addr = bo->gtt_offset;
4475          ps.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
4476          ps.ScratchSpaceBasePointer = rw_bo(NULL, scratch_addr,
4477                                             IRIS_DOMAIN_NONE);
4478       }
4479    }
4480 
4481    iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
4482       psx.PixelShaderValid = true;
4483       psx.PixelShaderComputedDepthMode = wm_prog_data->computed_depth_mode;
4484       psx.PixelShaderKillsPixel = wm_prog_data->uses_kill;
4485       psx.AttributeEnable = wm_prog_data->num_varying_inputs != 0;
4486       psx.PixelShaderUsesSourceDepth = wm_prog_data->uses_src_depth;
4487       psx.PixelShaderUsesSourceW = wm_prog_data->uses_src_w;
4488       psx.PixelShaderIsPerSample = wm_prog_data->persample_dispatch;
4489       psx.oMaskPresenttoRenderTarget = wm_prog_data->uses_omask;
4490 
4491 #if GEN_GEN >= 9
4492       psx.PixelShaderPullsBary = wm_prog_data->pulls_bary;
4493       psx.PixelShaderComputesStencil = wm_prog_data->computed_stencil;
4494 #endif
4495    }
4496 }
4497 
4498 /**
4499  * Compute the size of the derived data (shader command packets).
4500  *
4501  * This must match the data written by the iris_store_xs_state() functions.
4502  */
4503 static void
iris_store_cs_state(struct iris_context * ice,const struct gen_device_info * devinfo,struct iris_compiled_shader * shader)4504 iris_store_cs_state(struct iris_context *ice,
4505                     const struct gen_device_info *devinfo,
4506                     struct iris_compiled_shader *shader)
4507 {
4508    struct brw_cs_prog_data *cs_prog_data = (void *) shader->prog_data;
4509    void *map = shader->derived_data;
4510 
4511    iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), map, desc) {
4512       desc.ConstantURBEntryReadLength = cs_prog_data->push.per_thread.regs;
4513       desc.BarrierEnable = cs_prog_data->uses_barrier;
4514       desc.CrossThreadConstantDataReadLength =
4515          cs_prog_data->push.cross_thread.regs;
4516 #if GEN_GEN >= 12
4517       /* TODO: Check if we are missing workarounds and enable mid-thread
4518        * preemption.
4519        *
4520        * We still have issues with mid-thread preemption (it was already
4521        * disabled by the kernel on gen11, due to missing workarounds). It's
4522        * possible that we are just missing some workarounds, and could enable
4523        * it later, but for now let's disable it to fix a GPU in compute in Car
4524        * Chase (and possibly more).
4525        */
4526       desc.ThreadPreemptionDisable = true;
4527 #endif
4528    }
4529 }
4530 
4531 static unsigned
iris_derived_program_state_size(enum iris_program_cache_id cache_id)4532 iris_derived_program_state_size(enum iris_program_cache_id cache_id)
4533 {
4534    assert(cache_id <= IRIS_CACHE_BLORP);
4535 
4536    static const unsigned dwords[] = {
4537       [IRIS_CACHE_VS] = GENX(3DSTATE_VS_length),
4538       [IRIS_CACHE_TCS] = GENX(3DSTATE_HS_length),
4539       [IRIS_CACHE_TES] = GENX(3DSTATE_TE_length) + GENX(3DSTATE_DS_length),
4540       [IRIS_CACHE_GS] = GENX(3DSTATE_GS_length),
4541       [IRIS_CACHE_FS] =
4542          GENX(3DSTATE_PS_length) + GENX(3DSTATE_PS_EXTRA_length),
4543       [IRIS_CACHE_CS] = GENX(INTERFACE_DESCRIPTOR_DATA_length),
4544       [IRIS_CACHE_BLORP] = 0,
4545    };
4546 
4547    return sizeof(uint32_t) * dwords[cache_id];
4548 }
4549 
4550 /**
4551  * Create any state packets corresponding to the given shader stage
4552  * (i.e. 3DSTATE_VS) and save them as "derived data" in the shader variant.
4553  * This means that we can look up a program in the in-memory cache and
4554  * get most of the state packet without having to reconstruct it.
4555  */
4556 static void
iris_store_derived_program_state(struct iris_context * ice,enum iris_program_cache_id cache_id,struct iris_compiled_shader * shader)4557 iris_store_derived_program_state(struct iris_context *ice,
4558                                  enum iris_program_cache_id cache_id,
4559                                  struct iris_compiled_shader *shader)
4560 {
4561    struct iris_screen *screen = (void *) ice->ctx.screen;
4562    const struct gen_device_info *devinfo = &screen->devinfo;
4563 
4564    switch (cache_id) {
4565    case IRIS_CACHE_VS:
4566       iris_store_vs_state(ice, devinfo, shader);
4567       break;
4568    case IRIS_CACHE_TCS:
4569       iris_store_tcs_state(ice, devinfo, shader);
4570       break;
4571    case IRIS_CACHE_TES:
4572       iris_store_tes_state(ice, devinfo, shader);
4573       break;
4574    case IRIS_CACHE_GS:
4575       iris_store_gs_state(ice, devinfo, shader);
4576       break;
4577    case IRIS_CACHE_FS:
4578       iris_store_fs_state(ice, devinfo, shader);
4579       break;
4580    case IRIS_CACHE_CS:
4581       iris_store_cs_state(ice, devinfo, shader);
4582    case IRIS_CACHE_BLORP:
4583       break;
4584    default:
4585       break;
4586    }
4587 }
4588 
4589 /* ------------------------------------------------------------------- */
4590 
4591 static const uint32_t push_constant_opcodes[] = {
4592    [MESA_SHADER_VERTEX]    = 21,
4593    [MESA_SHADER_TESS_CTRL] = 25, /* HS */
4594    [MESA_SHADER_TESS_EVAL] = 26, /* DS */
4595    [MESA_SHADER_GEOMETRY]  = 22,
4596    [MESA_SHADER_FRAGMENT]  = 23,
4597    [MESA_SHADER_COMPUTE]   = 0,
4598 };
4599 
4600 static uint32_t
use_null_surface(struct iris_batch * batch,struct iris_context * ice)4601 use_null_surface(struct iris_batch *batch, struct iris_context *ice)
4602 {
4603    struct iris_bo *state_bo = iris_resource_bo(ice->state.unbound_tex.res);
4604 
4605    iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4606 
4607    return ice->state.unbound_tex.offset;
4608 }
4609 
4610 static uint32_t
use_null_fb_surface(struct iris_batch * batch,struct iris_context * ice)4611 use_null_fb_surface(struct iris_batch *batch, struct iris_context *ice)
4612 {
4613    /* If set_framebuffer_state() was never called, fall back to 1x1x1 */
4614    if (!ice->state.null_fb.res)
4615       return use_null_surface(batch, ice);
4616 
4617    struct iris_bo *state_bo = iris_resource_bo(ice->state.null_fb.res);
4618 
4619    iris_use_pinned_bo(batch, state_bo, false, IRIS_DOMAIN_NONE);
4620 
4621    return ice->state.null_fb.offset;
4622 }
4623 
4624 static uint32_t
surf_state_offset_for_aux(struct iris_resource * res,unsigned aux_modes,enum isl_aux_usage aux_usage)4625 surf_state_offset_for_aux(struct iris_resource *res,
4626                           unsigned aux_modes,
4627                           enum isl_aux_usage aux_usage)
4628 {
4629    assert(aux_modes & (1 << aux_usage));
4630    return SURFACE_STATE_ALIGNMENT *
4631           util_bitcount(aux_modes & ((1 << aux_usage) - 1));
4632 }
4633 
4634 #if GEN_GEN == 9
4635 static void
surf_state_update_clear_value(struct iris_batch * batch,struct iris_resource * res,struct iris_state_ref * state,unsigned aux_modes,enum isl_aux_usage aux_usage)4636 surf_state_update_clear_value(struct iris_batch *batch,
4637                               struct iris_resource *res,
4638                               struct iris_state_ref *state,
4639                               unsigned aux_modes,
4640                               enum isl_aux_usage aux_usage)
4641 {
4642    struct isl_device *isl_dev = &batch->screen->isl_dev;
4643    struct iris_bo *state_bo = iris_resource_bo(state->res);
4644    uint64_t real_offset = state->offset + IRIS_MEMZONE_BINDER_START;
4645    uint32_t offset_into_bo = real_offset - state_bo->gtt_offset;
4646    uint32_t clear_offset = offset_into_bo +
4647       isl_dev->ss.clear_value_offset +
4648       surf_state_offset_for_aux(res, aux_modes, aux_usage);
4649    uint32_t *color = res->aux.clear_color.u32;
4650 
4651    assert(isl_dev->ss.clear_value_size == 16);
4652 
4653    if (aux_usage == ISL_AUX_USAGE_HIZ) {
4654       iris_emit_pipe_control_write(batch, "update fast clear value (Z)",
4655                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4656                                    state_bo, clear_offset, color[0]);
4657    } else {
4658       iris_emit_pipe_control_write(batch, "update fast clear color (RG__)",
4659                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4660                                    state_bo, clear_offset,
4661                                    (uint64_t) color[0] |
4662                                    (uint64_t) color[1] << 32);
4663       iris_emit_pipe_control_write(batch, "update fast clear color (__BA)",
4664                                    PIPE_CONTROL_WRITE_IMMEDIATE,
4665                                    state_bo, clear_offset + 8,
4666                                    (uint64_t) color[2] |
4667                                    (uint64_t) color[3] << 32);
4668    }
4669 
4670    iris_emit_pipe_control_flush(batch,
4671                                 "update fast clear: state cache invalidate",
4672                                 PIPE_CONTROL_FLUSH_ENABLE |
4673                                 PIPE_CONTROL_STATE_CACHE_INVALIDATE);
4674 }
4675 #endif
4676 
4677 static void
update_clear_value(struct iris_context * ice,struct iris_batch * batch,struct iris_resource * res,struct iris_surface_state * surf_state,unsigned all_aux_modes,struct isl_view * view)4678 update_clear_value(struct iris_context *ice,
4679                    struct iris_batch *batch,
4680                    struct iris_resource *res,
4681                    struct iris_surface_state *surf_state,
4682                    unsigned all_aux_modes,
4683                    struct isl_view *view)
4684 {
4685    UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
4686    UNUSED unsigned aux_modes = all_aux_modes;
4687 
4688    /* We only need to update the clear color in the surface state for gen8 and
4689     * gen9. Newer gens can read it directly from the clear color state buffer.
4690     */
4691 #if GEN_GEN == 9
4692    /* Skip updating the ISL_AUX_USAGE_NONE surface state */
4693    aux_modes &= ~(1 << ISL_AUX_USAGE_NONE);
4694 
4695    while (aux_modes) {
4696       enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4697 
4698       surf_state_update_clear_value(batch, res, &surf_state->ref,
4699                                     all_aux_modes, aux_usage);
4700    }
4701 #elif GEN_GEN == 8
4702    /* TODO: Could update rather than re-filling */
4703    alloc_surface_states(surf_state, all_aux_modes);
4704 
4705    void *map = surf_state->cpu;
4706 
4707    while (aux_modes) {
4708       enum isl_aux_usage aux_usage = u_bit_scan(&aux_modes);
4709       fill_surface_state(isl_dev, map, res, &res->surf, view, aux_usage,
4710                          0, 0, 0);
4711       map += SURFACE_STATE_ALIGNMENT;
4712    }
4713 
4714    upload_surface_states(ice->state.surface_uploader, surf_state);
4715 #endif
4716 }
4717 
4718 /**
4719  * Add a surface to the validation list, as well as the buffer containing
4720  * the corresponding SURFACE_STATE.
4721  *
4722  * Returns the binding table entry (offset to SURFACE_STATE).
4723  */
4724 static uint32_t
use_surface(struct iris_context * ice,struct iris_batch * batch,struct pipe_surface * p_surf,bool writeable,enum isl_aux_usage aux_usage,bool is_read_surface,enum iris_domain access)4725 use_surface(struct iris_context *ice,
4726             struct iris_batch *batch,
4727             struct pipe_surface *p_surf,
4728             bool writeable,
4729             enum isl_aux_usage aux_usage,
4730             bool is_read_surface,
4731             enum iris_domain access)
4732 {
4733    struct iris_surface *surf = (void *) p_surf;
4734    struct iris_resource *res = (void *) p_surf->texture;
4735    uint32_t offset = 0;
4736 
4737    iris_use_pinned_bo(batch, iris_resource_bo(p_surf->texture),
4738                       writeable, access);
4739    if (GEN_GEN == 8 && is_read_surface) {
4740       iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state_read.ref.res), false,
4741                          IRIS_DOMAIN_NONE);
4742    } else {
4743       iris_use_pinned_bo(batch, iris_resource_bo(surf->surface_state.ref.res), false,
4744                          IRIS_DOMAIN_NONE);
4745    }
4746 
4747    if (res->aux.bo) {
4748       iris_use_pinned_bo(batch, res->aux.bo, writeable, access);
4749       if (res->aux.clear_color_bo)
4750          iris_use_pinned_bo(batch, res->aux.clear_color_bo, false, access);
4751 
4752       if (memcmp(&res->aux.clear_color, &surf->clear_color,
4753                  sizeof(surf->clear_color)) != 0) {
4754          update_clear_value(ice, batch, res, &surf->surface_state,
4755                             res->aux.possible_usages, &surf->view);
4756          if (GEN_GEN == 8) {
4757             update_clear_value(ice, batch, res, &surf->surface_state_read,
4758                                res->aux.possible_usages, &surf->read_view);
4759          }
4760          surf->clear_color = res->aux.clear_color;
4761       }
4762    }
4763 
4764    offset = (GEN_GEN == 8 && is_read_surface)
4765                ? surf->surface_state_read.ref.offset
4766                : surf->surface_state.ref.offset;
4767 
4768    return offset +
4769           surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4770 }
4771 
4772 static uint32_t
use_sampler_view(struct iris_context * ice,struct iris_batch * batch,struct iris_sampler_view * isv)4773 use_sampler_view(struct iris_context *ice,
4774                  struct iris_batch *batch,
4775                  struct iris_sampler_view *isv)
4776 {
4777    enum isl_aux_usage aux_usage =
4778       iris_resource_texture_aux_usage(ice, isv->res, isv->view.format);
4779 
4780    iris_use_pinned_bo(batch, isv->res->bo, false, IRIS_DOMAIN_OTHER_READ);
4781    iris_use_pinned_bo(batch, iris_resource_bo(isv->surface_state.ref.res), false,
4782                       IRIS_DOMAIN_NONE);
4783 
4784    if (isv->res->aux.bo) {
4785       iris_use_pinned_bo(batch, isv->res->aux.bo,
4786                          false, IRIS_DOMAIN_OTHER_READ);
4787       if (isv->res->aux.clear_color_bo)
4788          iris_use_pinned_bo(batch, isv->res->aux.clear_color_bo,
4789                             false, IRIS_DOMAIN_OTHER_READ);
4790       if (memcmp(&isv->res->aux.clear_color, &isv->clear_color,
4791                  sizeof(isv->clear_color)) != 0) {
4792          update_clear_value(ice, batch, isv->res, &isv->surface_state,
4793                             isv->res->aux.sampler_usages, &isv->view);
4794          isv->clear_color = isv->res->aux.clear_color;
4795       }
4796    }
4797 
4798    return isv->surface_state.ref.offset +
4799           surf_state_offset_for_aux(isv->res, isv->res->aux.sampler_usages,
4800                                     aux_usage);
4801 }
4802 
4803 static uint32_t
use_ubo_ssbo(struct iris_batch * batch,struct iris_context * ice,struct pipe_shader_buffer * buf,struct iris_state_ref * surf_state,bool writable,enum iris_domain access)4804 use_ubo_ssbo(struct iris_batch *batch,
4805              struct iris_context *ice,
4806              struct pipe_shader_buffer *buf,
4807              struct iris_state_ref *surf_state,
4808              bool writable, enum iris_domain access)
4809 {
4810    if (!buf->buffer || !surf_state->res)
4811       return use_null_surface(batch, ice);
4812 
4813    iris_use_pinned_bo(batch, iris_resource_bo(buf->buffer), writable, access);
4814    iris_use_pinned_bo(batch, iris_resource_bo(surf_state->res), false,
4815                       IRIS_DOMAIN_NONE);
4816 
4817    return surf_state->offset;
4818 }
4819 
4820 static uint32_t
use_image(struct iris_batch * batch,struct iris_context * ice,struct iris_shader_state * shs,const struct shader_info * info,int i)4821 use_image(struct iris_batch *batch, struct iris_context *ice,
4822           struct iris_shader_state *shs, const struct shader_info *info,
4823           int i)
4824 {
4825    struct iris_image_view *iv = &shs->image[i];
4826    struct iris_resource *res = (void *) iv->base.resource;
4827 
4828    if (!res)
4829       return use_null_surface(batch, ice);
4830 
4831    bool write = iv->base.shader_access & PIPE_IMAGE_ACCESS_WRITE;
4832 
4833    iris_use_pinned_bo(batch, res->bo, write, IRIS_DOMAIN_NONE);
4834    iris_use_pinned_bo(batch, iris_resource_bo(iv->surface_state.ref.res),
4835                       false, IRIS_DOMAIN_NONE);
4836 
4837    if (res->aux.bo)
4838       iris_use_pinned_bo(batch, res->aux.bo, write, IRIS_DOMAIN_NONE);
4839 
4840    enum isl_aux_usage aux_usage =
4841       iris_image_view_aux_usage(ice, &iv->base, info);
4842 
4843    return iv->surface_state.ref.offset +
4844       surf_state_offset_for_aux(res, res->aux.possible_usages, aux_usage);
4845 }
4846 
4847 #define push_bt_entry(addr) \
4848    assert(addr >= binder_addr); \
4849    assert(s < shader->bt.size_bytes / sizeof(uint32_t)); \
4850    if (!pin_only) bt_map[s++] = (addr) - binder_addr;
4851 
4852 #define bt_assert(section) \
4853    if (!pin_only && shader->bt.used_mask[section] != 0) \
4854       assert(shader->bt.offsets[section] == s);
4855 
4856 /**
4857  * Populate the binding table for a given shader stage.
4858  *
4859  * This fills out the table of pointers to surfaces required by the shader,
4860  * and also adds those buffers to the validation list so the kernel can make
4861  * resident before running our batch.
4862  */
4863 static void
iris_populate_binding_table(struct iris_context * ice,struct iris_batch * batch,gl_shader_stage stage,bool pin_only)4864 iris_populate_binding_table(struct iris_context *ice,
4865                             struct iris_batch *batch,
4866                             gl_shader_stage stage,
4867                             bool pin_only)
4868 {
4869    const struct iris_binder *binder = &ice->state.binder;
4870    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
4871    if (!shader)
4872       return;
4873 
4874    struct iris_binding_table *bt = &shader->bt;
4875    UNUSED struct brw_stage_prog_data *prog_data = shader->prog_data;
4876    struct iris_shader_state *shs = &ice->state.shaders[stage];
4877    uint32_t binder_addr = binder->bo->gtt_offset;
4878 
4879    uint32_t *bt_map = binder->map + binder->bt_offset[stage];
4880    int s = 0;
4881 
4882    const struct shader_info *info = iris_get_shader_info(ice, stage);
4883    if (!info) {
4884       /* TCS passthrough doesn't need a binding table. */
4885       assert(stage == MESA_SHADER_TESS_CTRL);
4886       return;
4887    }
4888 
4889    if (stage == MESA_SHADER_COMPUTE &&
4890        shader->bt.used_mask[IRIS_SURFACE_GROUP_CS_WORK_GROUPS]) {
4891       /* surface for gl_NumWorkGroups */
4892       struct iris_state_ref *grid_data = &ice->state.grid_size;
4893       struct iris_state_ref *grid_state = &ice->state.grid_surf_state;
4894       iris_use_pinned_bo(batch, iris_resource_bo(grid_data->res), false,
4895                          IRIS_DOMAIN_OTHER_READ);
4896       iris_use_pinned_bo(batch, iris_resource_bo(grid_state->res), false,
4897                          IRIS_DOMAIN_NONE);
4898       push_bt_entry(grid_state->offset);
4899    }
4900 
4901    if (stage == MESA_SHADER_FRAGMENT) {
4902       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4903       /* Note that cso_fb->nr_cbufs == fs_key->nr_color_regions. */
4904       if (cso_fb->nr_cbufs) {
4905          for (unsigned i = 0; i < cso_fb->nr_cbufs; i++) {
4906             uint32_t addr;
4907             if (cso_fb->cbufs[i]) {
4908                addr = use_surface(ice, batch, cso_fb->cbufs[i], true,
4909                                   ice->state.draw_aux_usage[i], false,
4910                                   IRIS_DOMAIN_RENDER_WRITE);
4911             } else {
4912                addr = use_null_fb_surface(batch, ice);
4913             }
4914             push_bt_entry(addr);
4915          }
4916       } else if (GEN_GEN < 11) {
4917          uint32_t addr = use_null_fb_surface(batch, ice);
4918          push_bt_entry(addr);
4919       }
4920    }
4921 
4922 #define foreach_surface_used(index, group) \
4923    bt_assert(group); \
4924    for (int index = 0; index < bt->sizes[group]; index++) \
4925       if (iris_group_index_to_bti(bt, group, index) != \
4926           IRIS_SURFACE_NOT_USED)
4927 
4928    foreach_surface_used(i, IRIS_SURFACE_GROUP_RENDER_TARGET_READ) {
4929       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
4930       uint32_t addr;
4931       if (cso_fb->cbufs[i]) {
4932          addr = use_surface(ice, batch, cso_fb->cbufs[i],
4933                             false, ice->state.draw_aux_usage[i], true,
4934                             IRIS_DOMAIN_OTHER_READ);
4935          push_bt_entry(addr);
4936       }
4937    }
4938 
4939    foreach_surface_used(i, IRIS_SURFACE_GROUP_TEXTURE) {
4940       struct iris_sampler_view *view = shs->textures[i];
4941       uint32_t addr = view ? use_sampler_view(ice, batch, view)
4942                            : use_null_surface(batch, ice);
4943       push_bt_entry(addr);
4944    }
4945 
4946    foreach_surface_used(i, IRIS_SURFACE_GROUP_IMAGE) {
4947       uint32_t addr = use_image(batch, ice, shs, info, i);
4948       push_bt_entry(addr);
4949    }
4950 
4951    foreach_surface_used(i, IRIS_SURFACE_GROUP_UBO) {
4952       uint32_t addr = use_ubo_ssbo(batch, ice, &shs->constbuf[i],
4953                                    &shs->constbuf_surf_state[i], false,
4954                                    IRIS_DOMAIN_OTHER_READ);
4955       push_bt_entry(addr);
4956    }
4957 
4958    foreach_surface_used(i, IRIS_SURFACE_GROUP_SSBO) {
4959       uint32_t addr =
4960          use_ubo_ssbo(batch, ice, &shs->ssbo[i], &shs->ssbo_surf_state[i],
4961                       shs->writable_ssbos & (1u << i), IRIS_DOMAIN_NONE);
4962       push_bt_entry(addr);
4963    }
4964 
4965 #if 0
4966       /* XXX: YUV surfaces not implemented yet */
4967       bt_assert(plane_start[1], ...);
4968       bt_assert(plane_start[2], ...);
4969 #endif
4970 }
4971 
4972 static void
iris_use_optional_res(struct iris_batch * batch,struct pipe_resource * res,bool writeable,enum iris_domain access)4973 iris_use_optional_res(struct iris_batch *batch,
4974                       struct pipe_resource *res,
4975                       bool writeable,
4976                       enum iris_domain access)
4977 {
4978    if (res) {
4979       struct iris_bo *bo = iris_resource_bo(res);
4980       iris_use_pinned_bo(batch, bo, writeable, access);
4981    }
4982 }
4983 
4984 static void
pin_depth_and_stencil_buffers(struct iris_batch * batch,struct pipe_surface * zsbuf,struct iris_depth_stencil_alpha_state * cso_zsa)4985 pin_depth_and_stencil_buffers(struct iris_batch *batch,
4986                               struct pipe_surface *zsbuf,
4987                               struct iris_depth_stencil_alpha_state *cso_zsa)
4988 {
4989    if (!zsbuf)
4990       return;
4991 
4992    struct iris_resource *zres, *sres;
4993    iris_get_depth_stencil_resources(zsbuf->texture, &zres, &sres);
4994 
4995    if (zres) {
4996       const enum iris_domain access = cso_zsa->depth_writes_enabled ?
4997          IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
4998       iris_use_pinned_bo(batch, zres->bo, cso_zsa->depth_writes_enabled,
4999                          access);
5000       if (zres->aux.bo) {
5001          iris_use_pinned_bo(batch, zres->aux.bo,
5002                             cso_zsa->depth_writes_enabled, access);
5003       }
5004    }
5005 
5006    if (sres) {
5007       const enum iris_domain access = cso_zsa->stencil_writes_enabled ?
5008          IRIS_DOMAIN_DEPTH_WRITE : IRIS_DOMAIN_OTHER_READ;
5009       iris_use_pinned_bo(batch, sres->bo, cso_zsa->stencil_writes_enabled,
5010                          access);
5011    }
5012 }
5013 
5014 /* ------------------------------------------------------------------- */
5015 
5016 /**
5017  * Pin any BOs which were installed by a previous batch, and restored
5018  * via the hardware logical context mechanism.
5019  *
5020  * We don't need to re-emit all state every batch - the hardware context
5021  * mechanism will save and restore it for us.  This includes pointers to
5022  * various BOs...which won't exist unless we ask the kernel to pin them
5023  * by adding them to the validation list.
5024  *
5025  * We can skip buffers if we've re-emitted those packets, as we're
5026  * overwriting those stale pointers with new ones, and don't actually
5027  * refer to the old BOs.
5028  */
5029 static void
iris_restore_render_saved_bos(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)5030 iris_restore_render_saved_bos(struct iris_context *ice,
5031                               struct iris_batch *batch,
5032                               const struct pipe_draw_info *draw)
5033 {
5034    struct iris_genx_state *genx = ice->state.genx;
5035 
5036    const uint64_t clean = ~ice->state.dirty;
5037    const uint64_t stage_clean = ~ice->state.stage_dirty;
5038 
5039    if (clean & IRIS_DIRTY_CC_VIEWPORT) {
5040       iris_use_optional_res(batch, ice->state.last_res.cc_vp, false,
5041                             IRIS_DOMAIN_NONE);
5042    }
5043 
5044    if (clean & IRIS_DIRTY_SF_CL_VIEWPORT) {
5045       iris_use_optional_res(batch, ice->state.last_res.sf_cl_vp, false,
5046                             IRIS_DOMAIN_NONE);
5047    }
5048 
5049    if (clean & IRIS_DIRTY_BLEND_STATE) {
5050       iris_use_optional_res(batch, ice->state.last_res.blend, false,
5051                             IRIS_DOMAIN_NONE);
5052    }
5053 
5054    if (clean & IRIS_DIRTY_COLOR_CALC_STATE) {
5055       iris_use_optional_res(batch, ice->state.last_res.color_calc, false,
5056                             IRIS_DOMAIN_NONE);
5057    }
5058 
5059    if (clean & IRIS_DIRTY_SCISSOR_RECT) {
5060       iris_use_optional_res(batch, ice->state.last_res.scissor, false,
5061                             IRIS_DOMAIN_NONE);
5062    }
5063 
5064    if (ice->state.streamout_active && (clean & IRIS_DIRTY_SO_BUFFERS)) {
5065       for (int i = 0; i < 4; i++) {
5066          struct iris_stream_output_target *tgt =
5067             (void *) ice->state.so_target[i];
5068          if (tgt) {
5069             iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5070                                true, IRIS_DOMAIN_OTHER_WRITE);
5071             iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5072                                true, IRIS_DOMAIN_OTHER_WRITE);
5073          }
5074       }
5075    }
5076 
5077    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5078       if (!(stage_clean & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)))
5079          continue;
5080 
5081       struct iris_shader_state *shs = &ice->state.shaders[stage];
5082       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5083 
5084       if (!shader)
5085          continue;
5086 
5087       struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5088 
5089       for (int i = 0; i < 4; i++) {
5090          const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5091 
5092          if (range->length == 0)
5093             continue;
5094 
5095          /* Range block is a binding table index, map back to UBO index. */
5096          unsigned block_index = iris_bti_to_group_index(
5097             &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5098          assert(block_index != IRIS_SURFACE_NOT_USED);
5099 
5100          struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5101          struct iris_resource *res = (void *) cbuf->buffer;
5102 
5103          if (res)
5104             iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
5105          else
5106             iris_use_pinned_bo(batch, batch->screen->workaround_bo, false,
5107                                IRIS_DOMAIN_OTHER_READ);
5108       }
5109    }
5110 
5111    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5112       if (stage_clean & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5113          /* Re-pin any buffers referred to by the binding table. */
5114          iris_populate_binding_table(ice, batch, stage, true);
5115       }
5116    }
5117 
5118    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5119       struct iris_shader_state *shs = &ice->state.shaders[stage];
5120       struct pipe_resource *res = shs->sampler_table.res;
5121       if (res)
5122          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5123                             IRIS_DOMAIN_NONE);
5124    }
5125 
5126    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5127       if (stage_clean & (IRIS_STAGE_DIRTY_VS << stage)) {
5128          struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5129 
5130          if (shader) {
5131             struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5132             iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5133 
5134             struct brw_stage_prog_data *prog_data = shader->prog_data;
5135 
5136             if (prog_data->total_scratch > 0) {
5137                struct iris_bo *bo =
5138                   iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5139                iris_use_pinned_bo(batch, bo, true, IRIS_DOMAIN_NONE);
5140             }
5141          }
5142       }
5143    }
5144 
5145    if ((clean & IRIS_DIRTY_DEPTH_BUFFER) &&
5146        (clean & IRIS_DIRTY_WM_DEPTH_STENCIL)) {
5147       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5148       pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
5149    }
5150 
5151    iris_use_optional_res(batch, ice->state.last_res.index_buffer, false,
5152                          IRIS_DOMAIN_OTHER_READ);
5153 
5154    if (clean & IRIS_DIRTY_VERTEX_BUFFERS) {
5155       uint64_t bound = ice->state.bound_vertex_buffers;
5156       while (bound) {
5157          const int i = u_bit_scan64(&bound);
5158          struct pipe_resource *res = genx->vertex_buffers[i].resource;
5159          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5160                             IRIS_DOMAIN_OTHER_READ);
5161       }
5162    }
5163 }
5164 
5165 static void
iris_restore_compute_saved_bos(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)5166 iris_restore_compute_saved_bos(struct iris_context *ice,
5167                                struct iris_batch *batch,
5168                                const struct pipe_grid_info *grid)
5169 {
5170    const uint64_t stage_clean = ~ice->state.stage_dirty;
5171 
5172    const int stage = MESA_SHADER_COMPUTE;
5173    struct iris_shader_state *shs = &ice->state.shaders[stage];
5174 
5175    if (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) {
5176       /* Re-pin any buffers referred to by the binding table. */
5177       iris_populate_binding_table(ice, batch, stage, true);
5178    }
5179 
5180    struct pipe_resource *sampler_res = shs->sampler_table.res;
5181    if (sampler_res)
5182       iris_use_pinned_bo(batch, iris_resource_bo(sampler_res), false,
5183                          IRIS_DOMAIN_NONE);
5184 
5185    if ((stage_clean & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS) &&
5186        (stage_clean & IRIS_STAGE_DIRTY_BINDINGS_CS) &&
5187        (stage_clean & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
5188        (stage_clean & IRIS_STAGE_DIRTY_CS)) {
5189       iris_use_optional_res(batch, ice->state.last_res.cs_desc, false,
5190                             IRIS_DOMAIN_NONE);
5191    }
5192 
5193    if (stage_clean & IRIS_STAGE_DIRTY_CS) {
5194       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5195 
5196       if (shader) {
5197          struct iris_bo *bo = iris_resource_bo(shader->assembly.res);
5198          iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_NONE);
5199 
5200          struct iris_bo *curbe_bo =
5201             iris_resource_bo(ice->state.last_res.cs_thread_ids);
5202          iris_use_pinned_bo(batch, curbe_bo, false, IRIS_DOMAIN_NONE);
5203 
5204          struct brw_stage_prog_data *prog_data = shader->prog_data;
5205 
5206          if (prog_data->total_scratch > 0) {
5207             struct iris_bo *bo =
5208                iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5209             iris_use_pinned_bo(batch, bo, true, IRIS_DOMAIN_NONE);
5210          }
5211       }
5212    }
5213 }
5214 
5215 /**
5216  * Possibly emit STATE_BASE_ADDRESS to update Surface State Base Address.
5217  */
5218 static void
iris_update_surface_base_address(struct iris_batch * batch,struct iris_binder * binder)5219 iris_update_surface_base_address(struct iris_batch *batch,
5220                                  struct iris_binder *binder)
5221 {
5222    if (batch->last_surface_base_address == binder->bo->gtt_offset)
5223       return;
5224 
5225    struct isl_device *isl_dev = &batch->screen->isl_dev;
5226    uint32_t mocs = isl_mocs(isl_dev, 0);
5227 
5228    iris_batch_sync_region_start(batch);
5229 
5230    flush_before_state_base_change(batch);
5231 
5232 #if GEN_GEN == 12
5233    /* GEN:BUG:1607854226:
5234     *
5235     *  Workaround the non pipelined state not applying in MEDIA/GPGPU pipeline
5236     *  mode by putting the pipeline temporarily in 3D mode..
5237     */
5238    if (batch->name == IRIS_BATCH_COMPUTE)
5239       emit_pipeline_select(batch, _3D);
5240 #endif
5241 
5242    iris_emit_cmd(batch, GENX(STATE_BASE_ADDRESS), sba) {
5243       sba.SurfaceStateBaseAddressModifyEnable = true;
5244       sba.SurfaceStateBaseAddress = ro_bo(binder->bo, 0);
5245 
5246       /* The hardware appears to pay attention to the MOCS fields even
5247        * if you don't set the "Address Modify Enable" bit for the base.
5248        */
5249       sba.GeneralStateMOCS            = mocs;
5250       sba.StatelessDataPortAccessMOCS = mocs;
5251       sba.DynamicStateMOCS            = mocs;
5252       sba.IndirectObjectMOCS          = mocs;
5253       sba.InstructionMOCS             = mocs;
5254       sba.SurfaceStateMOCS            = mocs;
5255 #if GEN_GEN >= 9
5256       sba.BindlessSurfaceStateMOCS    = mocs;
5257 #endif
5258    }
5259 
5260 #if GEN_GEN == 12
5261    /* GEN:BUG:1607854226:
5262     *
5263     *  Put the pipeline back into compute mode.
5264     */
5265    if (batch->name == IRIS_BATCH_COMPUTE)
5266       emit_pipeline_select(batch, GPGPU);
5267 #endif
5268 
5269    flush_after_state_base_change(batch);
5270    iris_batch_sync_region_end(batch);
5271 
5272    batch->last_surface_base_address = binder->bo->gtt_offset;
5273 }
5274 
5275 static inline void
iris_viewport_zmin_zmax(const struct pipe_viewport_state * vp,bool halfz,bool window_space_position,float * zmin,float * zmax)5276 iris_viewport_zmin_zmax(const struct pipe_viewport_state *vp, bool halfz,
5277                         bool window_space_position, float *zmin, float *zmax)
5278 {
5279    if (window_space_position) {
5280       *zmin = 0.f;
5281       *zmax = 1.f;
5282       return;
5283    }
5284    util_viewport_zmin_zmax(vp, halfz, zmin, zmax);
5285 }
5286 
5287 #if GEN_GEN >= 12
5288 void
genX(invalidate_aux_map_state)5289 genX(invalidate_aux_map_state)(struct iris_batch *batch)
5290 {
5291    struct iris_screen *screen = batch->screen;
5292    void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5293    if (!aux_map_ctx)
5294       return;
5295    uint32_t aux_map_state_num = gen_aux_map_get_state_num(aux_map_ctx);
5296    if (batch->last_aux_map_state != aux_map_state_num) {
5297       /* HSD 1209978178: docs say that before programming the aux table:
5298        *
5299        *    "Driver must ensure that the engine is IDLE but ensure it doesn't
5300        *    add extra flushes in the case it knows that the engine is already
5301        *    IDLE."
5302        *
5303        * An end of pipe sync is needed here, otherwise we see GPU hangs in
5304        * dEQP-GLES31.functional.copy_image.* tests.
5305        */
5306       iris_emit_end_of_pipe_sync(batch, "Invalidate aux map table",
5307                                  PIPE_CONTROL_CS_STALL);
5308 
5309       /* If the aux-map state number increased, then we need to rewrite the
5310        * register. Rewriting the register is used to both set the aux-map
5311        * translation table address, and also to invalidate any previously
5312        * cached translations.
5313        */
5314       iris_load_register_imm32(batch, GENX(GFX_CCS_AUX_INV_num), 1);
5315       batch->last_aux_map_state = aux_map_state_num;
5316    }
5317 }
5318 
5319 static void
init_aux_map_state(struct iris_batch * batch)5320 init_aux_map_state(struct iris_batch *batch)
5321 {
5322    struct iris_screen *screen = batch->screen;
5323    void *aux_map_ctx = iris_bufmgr_get_aux_map_context(screen->bufmgr);
5324    if (!aux_map_ctx)
5325       return;
5326 
5327    uint64_t base_addr = gen_aux_map_get_base(aux_map_ctx);
5328    assert(base_addr != 0 && align64(base_addr, 32 * 1024) == base_addr);
5329    iris_load_register_imm64(batch, GENX(GFX_AUX_TABLE_BASE_ADDR_num),
5330                             base_addr);
5331 }
5332 #endif
5333 
5334 struct push_bos {
5335    struct {
5336       struct iris_address addr;
5337       uint32_t length;
5338    } buffers[4];
5339    int buffer_count;
5340    uint32_t max_length;
5341 };
5342 
5343 static void
setup_constant_buffers(struct iris_context * ice,struct iris_batch * batch,int stage,struct push_bos * push_bos)5344 setup_constant_buffers(struct iris_context *ice,
5345                        struct iris_batch *batch,
5346                        int stage,
5347                        struct push_bos *push_bos)
5348 {
5349    struct iris_shader_state *shs = &ice->state.shaders[stage];
5350    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5351    struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5352 
5353    uint32_t push_range_sum = 0;
5354 
5355    int n = 0;
5356    for (int i = 0; i < 4; i++) {
5357       const struct brw_ubo_range *range = &prog_data->ubo_ranges[i];
5358 
5359       if (range->length == 0)
5360          continue;
5361 
5362       push_range_sum += range->length;
5363 
5364       if (range->length > push_bos->max_length)
5365          push_bos->max_length = range->length;
5366 
5367       /* Range block is a binding table index, map back to UBO index. */
5368       unsigned block_index = iris_bti_to_group_index(
5369          &shader->bt, IRIS_SURFACE_GROUP_UBO, range->block);
5370       assert(block_index != IRIS_SURFACE_NOT_USED);
5371 
5372       struct pipe_shader_buffer *cbuf = &shs->constbuf[block_index];
5373       struct iris_resource *res = (void *) cbuf->buffer;
5374 
5375       assert(cbuf->buffer_offset % 32 == 0);
5376 
5377       push_bos->buffers[n].length = range->length;
5378       push_bos->buffers[n].addr =
5379          res ? ro_bo(res->bo, range->start * 32 + cbuf->buffer_offset)
5380          : batch->screen->workaround_address;
5381       n++;
5382    }
5383 
5384    /* From the 3DSTATE_CONSTANT_XS and 3DSTATE_CONSTANT_ALL programming notes:
5385     *
5386     *    "The sum of all four read length fields must be less than or
5387     *    equal to the size of 64."
5388     */
5389    assert(push_range_sum <= 64);
5390 
5391    push_bos->buffer_count = n;
5392 }
5393 
5394 static void
emit_push_constant_packets(struct iris_context * ice,struct iris_batch * batch,int stage,const struct push_bos * push_bos)5395 emit_push_constant_packets(struct iris_context *ice,
5396                            struct iris_batch *batch,
5397                            int stage,
5398                            const struct push_bos *push_bos)
5399 {
5400    UNUSED struct isl_device *isl_dev = &batch->screen->isl_dev;
5401    struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5402    struct brw_stage_prog_data *prog_data = (void *) shader->prog_data;
5403 
5404    iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_VS), pkt) {
5405       pkt._3DCommandSubOpcode = push_constant_opcodes[stage];
5406 #if GEN_GEN >= 12
5407       pkt.MOCS = isl_mocs(isl_dev, 0);
5408 #endif
5409       if (prog_data) {
5410          /* The Skylake PRM contains the following restriction:
5411           *
5412           *    "The driver must ensure The following case does not occur
5413           *     without a flush to the 3D engine: 3DSTATE_CONSTANT_* with
5414           *     buffer 3 read length equal to zero committed followed by a
5415           *     3DSTATE_CONSTANT_* with buffer 0 read length not equal to
5416           *     zero committed."
5417           *
5418           * To avoid this, we program the buffers in the highest slots.
5419           * This way, slot 0 is only used if slot 3 is also used.
5420           */
5421          int n = push_bos->buffer_count;
5422          assert(n <= 4);
5423          const unsigned shift = 4 - n;
5424          for (int i = 0; i < n; i++) {
5425             pkt.ConstantBody.ReadLength[i + shift] =
5426                push_bos->buffers[i].length;
5427             pkt.ConstantBody.Buffer[i + shift] = push_bos->buffers[i].addr;
5428          }
5429       }
5430    }
5431 }
5432 
5433 #if GEN_GEN >= 12
5434 static void
emit_push_constant_packet_all(struct iris_context * ice,struct iris_batch * batch,uint32_t shader_mask,const struct push_bos * push_bos)5435 emit_push_constant_packet_all(struct iris_context *ice,
5436                               struct iris_batch *batch,
5437                               uint32_t shader_mask,
5438                               const struct push_bos *push_bos)
5439 {
5440    struct isl_device *isl_dev = &batch->screen->isl_dev;
5441 
5442    if (!push_bos) {
5443       iris_emit_cmd(batch, GENX(3DSTATE_CONSTANT_ALL), pc) {
5444          pc.ShaderUpdateEnable = shader_mask;
5445       }
5446       return;
5447    }
5448 
5449    const uint32_t n = push_bos->buffer_count;
5450    const uint32_t max_pointers = 4;
5451    const uint32_t num_dwords = 2 + 2 * n;
5452    uint32_t const_all[2 + 2 * max_pointers];
5453    uint32_t *dw = &const_all[0];
5454 
5455    assert(n <= max_pointers);
5456    iris_pack_command(GENX(3DSTATE_CONSTANT_ALL), dw, all) {
5457       all.DWordLength = num_dwords - 2;
5458       all.MOCS = isl_mocs(isl_dev, 0);
5459       all.ShaderUpdateEnable = shader_mask;
5460       all.PointerBufferMask = (1 << n) - 1;
5461    }
5462    dw += 2;
5463 
5464    for (int i = 0; i < n; i++) {
5465       _iris_pack_state(batch, GENX(3DSTATE_CONSTANT_ALL_DATA),
5466                        dw + i * 2, data) {
5467          data.PointerToConstantBuffer = push_bos->buffers[i].addr;
5468          data.ConstantBufferReadLength = push_bos->buffers[i].length;
5469       }
5470    }
5471    iris_batch_emit(batch, const_all, sizeof(uint32_t) * num_dwords);
5472 }
5473 #endif
5474 
5475 static void
iris_upload_dirty_render_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)5476 iris_upload_dirty_render_state(struct iris_context *ice,
5477                                struct iris_batch *batch,
5478                                const struct pipe_draw_info *draw)
5479 {
5480    const uint64_t dirty = ice->state.dirty;
5481    const uint64_t stage_dirty = ice->state.stage_dirty;
5482 
5483    if (!(dirty & IRIS_ALL_DIRTY_FOR_RENDER) &&
5484        !(stage_dirty & IRIS_ALL_STAGE_DIRTY_FOR_RENDER))
5485       return;
5486 
5487    struct iris_genx_state *genx = ice->state.genx;
5488    struct iris_binder *binder = &ice->state.binder;
5489    struct brw_wm_prog_data *wm_prog_data = (void *)
5490       ice->shaders.prog[MESA_SHADER_FRAGMENT]->prog_data;
5491 
5492    if (dirty & IRIS_DIRTY_CC_VIEWPORT) {
5493       const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5494       uint32_t cc_vp_address;
5495 
5496       /* XXX: could avoid streaming for depth_clip [0,1] case. */
5497       uint32_t *cc_vp_map =
5498          stream_state(batch, ice->state.dynamic_uploader,
5499                       &ice->state.last_res.cc_vp,
5500                       4 * ice->state.num_viewports *
5501                       GENX(CC_VIEWPORT_length), 32, &cc_vp_address);
5502       for (int i = 0; i < ice->state.num_viewports; i++) {
5503          float zmin, zmax;
5504          iris_viewport_zmin_zmax(&ice->state.viewports[i], cso_rast->clip_halfz,
5505                                  ice->state.window_space_position,
5506                                  &zmin, &zmax);
5507          if (cso_rast->depth_clip_near)
5508             zmin = 0.0;
5509          if (cso_rast->depth_clip_far)
5510             zmax = 1.0;
5511 
5512          iris_pack_state(GENX(CC_VIEWPORT), cc_vp_map, ccv) {
5513             ccv.MinimumDepth = zmin;
5514             ccv.MaximumDepth = zmax;
5515          }
5516 
5517          cc_vp_map += GENX(CC_VIEWPORT_length);
5518       }
5519 
5520       iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_CC), ptr) {
5521          ptr.CCViewportPointer = cc_vp_address;
5522       }
5523    }
5524 
5525    if (dirty & IRIS_DIRTY_SF_CL_VIEWPORT) {
5526       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5527       uint32_t sf_cl_vp_address;
5528       uint32_t *vp_map =
5529          stream_state(batch, ice->state.dynamic_uploader,
5530                       &ice->state.last_res.sf_cl_vp,
5531                       4 * ice->state.num_viewports *
5532                       GENX(SF_CLIP_VIEWPORT_length), 64, &sf_cl_vp_address);
5533 
5534       for (unsigned i = 0; i < ice->state.num_viewports; i++) {
5535          const struct pipe_viewport_state *state = &ice->state.viewports[i];
5536          float gb_xmin, gb_xmax, gb_ymin, gb_ymax;
5537 
5538          float vp_xmin = viewport_extent(state, 0, -1.0f);
5539          float vp_xmax = viewport_extent(state, 0,  1.0f);
5540          float vp_ymin = viewport_extent(state, 1, -1.0f);
5541          float vp_ymax = viewport_extent(state, 1,  1.0f);
5542 
5543          gen_calculate_guardband_size(cso_fb->width, cso_fb->height,
5544                                       state->scale[0], state->scale[1],
5545                                       state->translate[0], state->translate[1],
5546                                       &gb_xmin, &gb_xmax, &gb_ymin, &gb_ymax);
5547 
5548          iris_pack_state(GENX(SF_CLIP_VIEWPORT), vp_map, vp) {
5549             vp.ViewportMatrixElementm00 = state->scale[0];
5550             vp.ViewportMatrixElementm11 = state->scale[1];
5551             vp.ViewportMatrixElementm22 = state->scale[2];
5552             vp.ViewportMatrixElementm30 = state->translate[0];
5553             vp.ViewportMatrixElementm31 = state->translate[1];
5554             vp.ViewportMatrixElementm32 = state->translate[2];
5555             vp.XMinClipGuardband = gb_xmin;
5556             vp.XMaxClipGuardband = gb_xmax;
5557             vp.YMinClipGuardband = gb_ymin;
5558             vp.YMaxClipGuardband = gb_ymax;
5559             vp.XMinViewPort = MAX2(vp_xmin, 0);
5560             vp.XMaxViewPort = MIN2(vp_xmax, cso_fb->width) - 1;
5561             vp.YMinViewPort = MAX2(vp_ymin, 0);
5562             vp.YMaxViewPort = MIN2(vp_ymax, cso_fb->height) - 1;
5563          }
5564 
5565          vp_map += GENX(SF_CLIP_VIEWPORT_length);
5566       }
5567 
5568       iris_emit_cmd(batch, GENX(3DSTATE_VIEWPORT_STATE_POINTERS_SF_CLIP), ptr) {
5569          ptr.SFClipViewportPointer = sf_cl_vp_address;
5570       }
5571    }
5572 
5573    if (dirty & IRIS_DIRTY_URB) {
5574       unsigned size[4];
5575 
5576       for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5577          if (!ice->shaders.prog[i]) {
5578             size[i] = 1;
5579          } else {
5580             struct brw_vue_prog_data *vue_prog_data =
5581                (void *) ice->shaders.prog[i]->prog_data;
5582             size[i] = vue_prog_data->urb_entry_size;
5583          }
5584          assert(size[i] != 0);
5585       }
5586 
5587       unsigned entries[4], start[4];
5588       gen_get_urb_config(&batch->screen->devinfo,
5589                          batch->screen->l3_config_3d,
5590                          ice->shaders.prog[MESA_SHADER_TESS_EVAL] != NULL,
5591                          ice->shaders.prog[MESA_SHADER_GEOMETRY] != NULL,
5592                          size, entries, start,
5593                          &ice->state.urb_deref_block_size);
5594 
5595       for (int i = MESA_SHADER_VERTEX; i <= MESA_SHADER_GEOMETRY; i++) {
5596          iris_emit_cmd(batch, GENX(3DSTATE_URB_VS), urb) {
5597             urb._3DCommandSubOpcode += i;
5598             urb.VSURBStartingAddress     = start[i];
5599             urb.VSURBEntryAllocationSize = size[i] - 1;
5600             urb.VSNumberofURBEntries     = entries[i];
5601          }
5602       }
5603    }
5604 
5605    if (dirty & IRIS_DIRTY_BLEND_STATE) {
5606       struct iris_blend_state *cso_blend = ice->state.cso_blend;
5607       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5608       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
5609       const int header_dwords = GENX(BLEND_STATE_length);
5610 
5611       /* Always write at least one BLEND_STATE - the final RT message will
5612        * reference BLEND_STATE[0] even if there aren't color writes.  There
5613        * may still be alpha testing, computed depth, and so on.
5614        */
5615       const int rt_dwords =
5616          MAX2(cso_fb->nr_cbufs, 1) * GENX(BLEND_STATE_ENTRY_length);
5617 
5618       uint32_t blend_offset;
5619       uint32_t *blend_map =
5620          stream_state(batch, ice->state.dynamic_uploader,
5621                       &ice->state.last_res.blend,
5622                       4 * (header_dwords + rt_dwords), 64, &blend_offset);
5623 
5624       uint32_t blend_state_header;
5625       iris_pack_state(GENX(BLEND_STATE), &blend_state_header, bs) {
5626          bs.AlphaTestEnable = cso_zsa->alpha.enabled;
5627          bs.AlphaTestFunction = translate_compare_func(cso_zsa->alpha.func);
5628       }
5629 
5630       blend_map[0] = blend_state_header | cso_blend->blend_state[0];
5631       memcpy(&blend_map[1], &cso_blend->blend_state[1], 4 * rt_dwords);
5632 
5633       iris_emit_cmd(batch, GENX(3DSTATE_BLEND_STATE_POINTERS), ptr) {
5634          ptr.BlendStatePointer = blend_offset;
5635          ptr.BlendStatePointerValid = true;
5636       }
5637    }
5638 
5639    if (dirty & IRIS_DIRTY_COLOR_CALC_STATE) {
5640       struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
5641 #if GEN_GEN == 8
5642       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
5643 #endif
5644       uint32_t cc_offset;
5645       void *cc_map =
5646          stream_state(batch, ice->state.dynamic_uploader,
5647                       &ice->state.last_res.color_calc,
5648                       sizeof(uint32_t) * GENX(COLOR_CALC_STATE_length),
5649                       64, &cc_offset);
5650       iris_pack_state(GENX(COLOR_CALC_STATE), cc_map, cc) {
5651          cc.AlphaTestFormat = ALPHATEST_FLOAT32;
5652          cc.AlphaReferenceValueAsFLOAT32 = cso->alpha.ref_value;
5653          cc.BlendConstantColorRed   = ice->state.blend_color.color[0];
5654          cc.BlendConstantColorGreen = ice->state.blend_color.color[1];
5655          cc.BlendConstantColorBlue  = ice->state.blend_color.color[2];
5656          cc.BlendConstantColorAlpha = ice->state.blend_color.color[3];
5657 #if GEN_GEN == 8
5658 	 cc.StencilReferenceValue = p_stencil_refs->ref_value[0];
5659 	 cc.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
5660 #endif
5661       }
5662       iris_emit_cmd(batch, GENX(3DSTATE_CC_STATE_POINTERS), ptr) {
5663          ptr.ColorCalcStatePointer = cc_offset;
5664          ptr.ColorCalcStatePointerValid = true;
5665       }
5666    }
5667 
5668    /* GEN:BUG:1604061319
5669     *
5670     *    3DSTATE_CONSTANT_* needs to be programmed before BTP_*
5671     *
5672     * Testing shows that all the 3DSTATE_CONSTANT_XS need to be emitted if
5673     * any stage has a dirty binding table.
5674     */
5675    const bool emit_const_wa = GEN_GEN >= 11 &&
5676       ((dirty & IRIS_DIRTY_RENDER_BUFFER) ||
5677        (stage_dirty & IRIS_ALL_STAGE_DIRTY_BINDINGS));
5678 
5679 #if GEN_GEN >= 12
5680    uint32_t nobuffer_stages = 0;
5681 #endif
5682 
5683    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5684       if (!(stage_dirty & (IRIS_STAGE_DIRTY_CONSTANTS_VS << stage)) &&
5685           !emit_const_wa)
5686          continue;
5687 
5688       struct iris_shader_state *shs = &ice->state.shaders[stage];
5689       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5690 
5691       if (!shader)
5692          continue;
5693 
5694       if (shs->sysvals_need_upload)
5695          upload_sysvals(ice, stage, NULL);
5696 
5697       struct push_bos push_bos = {};
5698       setup_constant_buffers(ice, batch, stage, &push_bos);
5699 
5700 #if GEN_GEN >= 12
5701       /* If this stage doesn't have any push constants, emit it later in a
5702        * single CONSTANT_ALL packet with all the other stages.
5703        */
5704       if (push_bos.buffer_count == 0) {
5705          nobuffer_stages |= 1 << stage;
5706          continue;
5707       }
5708 
5709       /* The Constant Buffer Read Length field from 3DSTATE_CONSTANT_ALL
5710        * contains only 5 bits, so we can only use it for buffers smaller than
5711        * 32.
5712        */
5713       if (push_bos.max_length < 32) {
5714          emit_push_constant_packet_all(ice, batch, 1 << stage, &push_bos);
5715          continue;
5716       }
5717 #endif
5718       emit_push_constant_packets(ice, batch, stage, &push_bos);
5719    }
5720 
5721 #if GEN_GEN >= 12
5722    if (nobuffer_stages)
5723       emit_push_constant_packet_all(ice, batch, nobuffer_stages, NULL);
5724 #endif
5725 
5726    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5727       /* Gen9 requires 3DSTATE_BINDING_TABLE_POINTERS_XS to be re-emitted
5728        * in order to commit constants.  TODO: Investigate "Disable Gather
5729        * at Set Shader" to go back to legacy mode...
5730        */
5731       if (stage_dirty & ((IRIS_STAGE_DIRTY_BINDINGS_VS |
5732                           (GEN_GEN == 9 ? IRIS_STAGE_DIRTY_CONSTANTS_VS : 0))
5733                             << stage)) {
5734          iris_emit_cmd(batch, GENX(3DSTATE_BINDING_TABLE_POINTERS_VS), ptr) {
5735             ptr._3DCommandSubOpcode = 38 + stage;
5736             ptr.PointertoVSBindingTable = binder->bt_offset[stage];
5737          }
5738       }
5739    }
5740 
5741    if (GEN_GEN >= 11 && (dirty & IRIS_DIRTY_RENDER_BUFFER)) {
5742       // XXX: we may want to flag IRIS_DIRTY_MULTISAMPLE (or SAMPLE_MASK?)
5743       // XXX: see commit 979fc1bc9bcc64027ff2cfafd285676f31b930a6
5744 
5745       /* The PIPE_CONTROL command description says:
5746        *
5747        *   "Whenever a Binding Table Index (BTI) used by a Render Target
5748        *    Message points to a different RENDER_SURFACE_STATE, SW must issue a
5749        *    Render Target Cache Flush by enabling this bit. When render target
5750        *    flush is set due to new association of BTI, PS Scoreboard Stall bit
5751        *    must be set in this packet."
5752        */
5753       // XXX: does this need to happen at 3DSTATE_BTP_PS time?
5754       iris_emit_pipe_control_flush(batch, "workaround: RT BTI change [draw]",
5755                                    PIPE_CONTROL_RENDER_TARGET_FLUSH |
5756                                    PIPE_CONTROL_STALL_AT_SCOREBOARD);
5757    }
5758 
5759    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5760       if (stage_dirty & (IRIS_STAGE_DIRTY_BINDINGS_VS << stage)) {
5761          iris_populate_binding_table(ice, batch, stage, false);
5762       }
5763    }
5764 
5765    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5766       if (!(stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_VS << stage)) ||
5767           !ice->shaders.prog[stage])
5768          continue;
5769 
5770       iris_upload_sampler_states(ice, stage);
5771 
5772       struct iris_shader_state *shs = &ice->state.shaders[stage];
5773       struct pipe_resource *res = shs->sampler_table.res;
5774       if (res)
5775          iris_use_pinned_bo(batch, iris_resource_bo(res), false,
5776                             IRIS_DOMAIN_NONE);
5777 
5778       iris_emit_cmd(batch, GENX(3DSTATE_SAMPLER_STATE_POINTERS_VS), ptr) {
5779          ptr._3DCommandSubOpcode = 43 + stage;
5780          ptr.PointertoVSSamplerState = shs->sampler_table.offset;
5781       }
5782    }
5783 
5784    if (ice->state.need_border_colors)
5785       iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
5786                          IRIS_DOMAIN_NONE);
5787 
5788    if (dirty & IRIS_DIRTY_MULTISAMPLE) {
5789       iris_emit_cmd(batch, GENX(3DSTATE_MULTISAMPLE), ms) {
5790          ms.PixelLocation =
5791             ice->state.cso_rast->half_pixel_center ? CENTER : UL_CORNER;
5792          if (ice->state.framebuffer.samples > 0)
5793             ms.NumberofMultisamples = ffs(ice->state.framebuffer.samples) - 1;
5794       }
5795    }
5796 
5797    if (dirty & IRIS_DIRTY_SAMPLE_MASK) {
5798       iris_emit_cmd(batch, GENX(3DSTATE_SAMPLE_MASK), ms) {
5799          ms.SampleMask = ice->state.sample_mask;
5800       }
5801    }
5802 
5803    for (int stage = 0; stage <= MESA_SHADER_FRAGMENT; stage++) {
5804       if (!(stage_dirty & (IRIS_STAGE_DIRTY_VS << stage)))
5805          continue;
5806 
5807       struct iris_compiled_shader *shader = ice->shaders.prog[stage];
5808 
5809       if (shader) {
5810          struct brw_stage_prog_data *prog_data = shader->prog_data;
5811          struct iris_resource *cache = (void *) shader->assembly.res;
5812          iris_use_pinned_bo(batch, cache->bo, false, IRIS_DOMAIN_NONE);
5813 
5814          if (prog_data->total_scratch > 0) {
5815             struct iris_bo *bo =
5816                iris_get_scratch_space(ice, prog_data->total_scratch, stage);
5817             iris_use_pinned_bo(batch, bo, true, IRIS_DOMAIN_NONE);
5818          }
5819 
5820          if (stage == MESA_SHADER_FRAGMENT) {
5821             UNUSED struct iris_rasterizer_state *cso = ice->state.cso_rast;
5822             struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5823 
5824             uint32_t ps_state[GENX(3DSTATE_PS_length)] = {0};
5825             iris_pack_command(GENX(3DSTATE_PS), ps_state, ps) {
5826                ps._8PixelDispatchEnable = wm_prog_data->dispatch_8;
5827                ps._16PixelDispatchEnable = wm_prog_data->dispatch_16;
5828                ps._32PixelDispatchEnable = wm_prog_data->dispatch_32;
5829 
5830               /* The docs for 3DSTATE_PS::32 Pixel Dispatch Enable say:
5831                *
5832                *    "When NUM_MULTISAMPLES = 16 or FORCE_SAMPLE_COUNT = 16,
5833                *     SIMD32 Dispatch must not be enabled for PER_PIXEL dispatch
5834                *     mode."
5835                *
5836                * 16x MSAA only exists on Gen9+, so we can skip this on Gen8.
5837                */
5838                if (GEN_GEN >= 9 && cso_fb->samples == 16 &&
5839                    !wm_prog_data->persample_dispatch) {
5840                   assert(ps._8PixelDispatchEnable || ps._16PixelDispatchEnable);
5841                   ps._32PixelDispatchEnable = false;
5842                }
5843 
5844                ps.DispatchGRFStartRegisterForConstantSetupData0 =
5845                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 0);
5846                ps.DispatchGRFStartRegisterForConstantSetupData1 =
5847                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 1);
5848                ps.DispatchGRFStartRegisterForConstantSetupData2 =
5849                   brw_wm_prog_data_dispatch_grf_start_reg(wm_prog_data, ps, 2);
5850 
5851                ps.KernelStartPointer0 = KSP(shader) +
5852                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 0);
5853                ps.KernelStartPointer1 = KSP(shader) +
5854                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 1);
5855                ps.KernelStartPointer2 = KSP(shader) +
5856                   brw_wm_prog_data_prog_offset(wm_prog_data, ps, 2);
5857             }
5858 
5859             uint32_t psx_state[GENX(3DSTATE_PS_EXTRA_length)] = {0};
5860             iris_pack_command(GENX(3DSTATE_PS_EXTRA), psx_state, psx) {
5861 #if GEN_GEN >= 9
5862                if (!wm_prog_data->uses_sample_mask)
5863                   psx.InputCoverageMaskState  = ICMS_NONE;
5864                else if (wm_prog_data->post_depth_coverage)
5865                   psx.InputCoverageMaskState = ICMS_DEPTH_COVERAGE;
5866                else if (wm_prog_data->inner_coverage &&
5867                         cso->conservative_rasterization)
5868                   psx.InputCoverageMaskState = ICMS_INNER_CONSERVATIVE;
5869                else
5870                   psx.InputCoverageMaskState = ICMS_NORMAL;
5871 #else
5872                psx.PixelShaderUsesInputCoverageMask =
5873                   wm_prog_data->uses_sample_mask;
5874 #endif
5875             }
5876 
5877             uint32_t *shader_ps = (uint32_t *) shader->derived_data;
5878             uint32_t *shader_psx = shader_ps + GENX(3DSTATE_PS_length);
5879             iris_emit_merge(batch, shader_ps, ps_state,
5880                             GENX(3DSTATE_PS_length));
5881             iris_emit_merge(batch, shader_psx, psx_state,
5882                             GENX(3DSTATE_PS_EXTRA_length));
5883          } else {
5884             iris_batch_emit(batch, shader->derived_data,
5885                             iris_derived_program_state_size(stage));
5886          }
5887       } else {
5888          if (stage == MESA_SHADER_TESS_EVAL) {
5889             iris_emit_cmd(batch, GENX(3DSTATE_HS), hs);
5890             iris_emit_cmd(batch, GENX(3DSTATE_TE), te);
5891             iris_emit_cmd(batch, GENX(3DSTATE_DS), ds);
5892          } else if (stage == MESA_SHADER_GEOMETRY) {
5893             iris_emit_cmd(batch, GENX(3DSTATE_GS), gs);
5894          }
5895       }
5896    }
5897 
5898    if (ice->state.streamout_active) {
5899       if (dirty & IRIS_DIRTY_SO_BUFFERS) {
5900          iris_batch_emit(batch, genx->so_buffers,
5901                          4 * 4 * GENX(3DSTATE_SO_BUFFER_length));
5902          for (int i = 0; i < 4; i++) {
5903             struct iris_stream_output_target *tgt =
5904                (void *) ice->state.so_target[i];
5905             if (tgt) {
5906                tgt->zeroed = true;
5907                iris_use_pinned_bo(batch, iris_resource_bo(tgt->base.buffer),
5908                                   true, IRIS_DOMAIN_OTHER_WRITE);
5909                iris_use_pinned_bo(batch, iris_resource_bo(tgt->offset.res),
5910                                   true, IRIS_DOMAIN_OTHER_WRITE);
5911             }
5912          }
5913       }
5914 
5915       if ((dirty & IRIS_DIRTY_SO_DECL_LIST) && ice->state.streamout) {
5916          uint32_t *decl_list =
5917             ice->state.streamout + GENX(3DSTATE_STREAMOUT_length);
5918          iris_batch_emit(batch, decl_list, 4 * ((decl_list[0] & 0xff) + 2));
5919       }
5920 
5921       if (dirty & IRIS_DIRTY_STREAMOUT) {
5922          const struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5923 
5924          uint32_t dynamic_sol[GENX(3DSTATE_STREAMOUT_length)];
5925          iris_pack_command(GENX(3DSTATE_STREAMOUT), dynamic_sol, sol) {
5926             sol.SOFunctionEnable = true;
5927             sol.SOStatisticsEnable = true;
5928 
5929             sol.RenderingDisable = cso_rast->rasterizer_discard &&
5930                                    !ice->state.prims_generated_query_active;
5931             sol.ReorderMode = cso_rast->flatshade_first ? LEADING : TRAILING;
5932          }
5933 
5934          assert(ice->state.streamout);
5935 
5936          iris_emit_merge(batch, ice->state.streamout, dynamic_sol,
5937                          GENX(3DSTATE_STREAMOUT_length));
5938       }
5939    } else {
5940       if (dirty & IRIS_DIRTY_STREAMOUT) {
5941          iris_emit_cmd(batch, GENX(3DSTATE_STREAMOUT), sol);
5942       }
5943    }
5944 
5945    if (dirty & IRIS_DIRTY_CLIP) {
5946       struct iris_rasterizer_state *cso_rast = ice->state.cso_rast;
5947       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
5948 
5949       bool gs_or_tes = ice->shaders.prog[MESA_SHADER_GEOMETRY] ||
5950                        ice->shaders.prog[MESA_SHADER_TESS_EVAL];
5951       bool points_or_lines = cso_rast->fill_mode_point_or_line ||
5952          (gs_or_tes ? ice->shaders.output_topology_is_points_or_lines
5953                     : ice->state.prim_is_points_or_lines);
5954 
5955       uint32_t dynamic_clip[GENX(3DSTATE_CLIP_length)];
5956       iris_pack_command(GENX(3DSTATE_CLIP), &dynamic_clip, cl) {
5957          cl.StatisticsEnable = ice->state.statistics_counters_enabled;
5958          if (cso_rast->rasterizer_discard)
5959             cl.ClipMode = CLIPMODE_REJECT_ALL;
5960          else if (ice->state.window_space_position)
5961             cl.ClipMode = CLIPMODE_ACCEPT_ALL;
5962          else
5963             cl.ClipMode = CLIPMODE_NORMAL;
5964 
5965          cl.PerspectiveDivideDisable = ice->state.window_space_position;
5966          cl.ViewportXYClipTestEnable = !points_or_lines;
5967 
5968          if (wm_prog_data->barycentric_interp_modes &
5969              BRW_BARYCENTRIC_NONPERSPECTIVE_BITS)
5970             cl.NonPerspectiveBarycentricEnable = true;
5971 
5972          cl.ForceZeroRTAIndexEnable = cso_fb->layers <= 1;
5973          cl.MaximumVPIndex = ice->state.num_viewports - 1;
5974       }
5975       iris_emit_merge(batch, cso_rast->clip, dynamic_clip,
5976                       ARRAY_SIZE(cso_rast->clip));
5977    }
5978 
5979    if (dirty & (IRIS_DIRTY_RASTER | IRIS_DIRTY_URB)) {
5980       struct iris_rasterizer_state *cso = ice->state.cso_rast;
5981       iris_batch_emit(batch, cso->raster, sizeof(cso->raster));
5982 
5983       uint32_t dynamic_sf[GENX(3DSTATE_SF_length)];
5984       iris_pack_command(GENX(3DSTATE_SF), &dynamic_sf, sf) {
5985          sf.ViewportTransformEnable = !ice->state.window_space_position;
5986 
5987 #if GEN_GEN >= 12
5988          sf.DerefBlockSize = ice->state.urb_deref_block_size;
5989 #endif
5990       }
5991       iris_emit_merge(batch, cso->sf, dynamic_sf,
5992                       ARRAY_SIZE(dynamic_sf));
5993    }
5994 
5995    if (dirty & IRIS_DIRTY_WM) {
5996       struct iris_rasterizer_state *cso = ice->state.cso_rast;
5997       uint32_t dynamic_wm[GENX(3DSTATE_WM_length)];
5998 
5999       iris_pack_command(GENX(3DSTATE_WM), &dynamic_wm, wm) {
6000          wm.StatisticsEnable = ice->state.statistics_counters_enabled;
6001 
6002          wm.BarycentricInterpolationMode =
6003             wm_prog_data->barycentric_interp_modes;
6004 
6005          if (wm_prog_data->early_fragment_tests)
6006             wm.EarlyDepthStencilControl = EDSC_PREPS;
6007          else if (wm_prog_data->has_side_effects)
6008             wm.EarlyDepthStencilControl = EDSC_PSEXEC;
6009 
6010          /* We could skip this bit if color writes are enabled. */
6011          if (wm_prog_data->has_side_effects || wm_prog_data->uses_kill)
6012             wm.ForceThreadDispatchEnable = ForceON;
6013       }
6014       iris_emit_merge(batch, cso->wm, dynamic_wm, ARRAY_SIZE(cso->wm));
6015    }
6016 
6017    if (dirty & IRIS_DIRTY_SBE) {
6018       iris_emit_sbe(batch, ice);
6019    }
6020 
6021    if (dirty & IRIS_DIRTY_PS_BLEND) {
6022       struct iris_blend_state *cso_blend = ice->state.cso_blend;
6023       struct iris_depth_stencil_alpha_state *cso_zsa = ice->state.cso_zsa;
6024       const struct shader_info *fs_info =
6025          iris_get_shader_info(ice, MESA_SHADER_FRAGMENT);
6026 
6027       uint32_t dynamic_pb[GENX(3DSTATE_PS_BLEND_length)];
6028       iris_pack_command(GENX(3DSTATE_PS_BLEND), &dynamic_pb, pb) {
6029          pb.HasWriteableRT = has_writeable_rt(cso_blend, fs_info);
6030          pb.AlphaTestEnable = cso_zsa->alpha.enabled;
6031 
6032          /* The dual source blending docs caution against using SRC1 factors
6033           * when the shader doesn't use a dual source render target write.
6034           * Empirically, this can lead to GPU hangs, and the results are
6035           * undefined anyway, so simply disable blending to avoid the hang.
6036           */
6037          pb.ColorBufferBlendEnable = (cso_blend->blend_enables & 1) &&
6038             (!cso_blend->dual_color_blending || wm_prog_data->dual_src_blend);
6039       }
6040 
6041       iris_emit_merge(batch, cso_blend->ps_blend, dynamic_pb,
6042                       ARRAY_SIZE(cso_blend->ps_blend));
6043    }
6044 
6045    if (dirty & IRIS_DIRTY_WM_DEPTH_STENCIL) {
6046       struct iris_depth_stencil_alpha_state *cso = ice->state.cso_zsa;
6047 #if GEN_GEN >= 9 && GEN_GEN < 12
6048       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6049       uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6050       iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6051          wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6052          wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6053       }
6054       iris_emit_merge(batch, cso->wmds, stencil_refs, ARRAY_SIZE(cso->wmds));
6055 #else
6056       /* Use modify disable fields which allow us to emit packets
6057        * directly instead of merging them later.
6058        */
6059       iris_batch_emit(batch, cso->wmds, sizeof(cso->wmds));
6060 #endif
6061 
6062 #if GEN_GEN >= 12
6063       iris_batch_emit(batch, cso->depth_bounds, sizeof(cso->depth_bounds));
6064 #endif
6065    }
6066 
6067    if (dirty & IRIS_DIRTY_STENCIL_REF) {
6068 #if GEN_GEN >= 12
6069       /* Use modify disable fields which allow us to emit packets
6070        * directly instead of merging them later.
6071        */
6072       struct pipe_stencil_ref *p_stencil_refs = &ice->state.stencil_ref;
6073       uint32_t stencil_refs[GENX(3DSTATE_WM_DEPTH_STENCIL_length)];
6074       iris_pack_command(GENX(3DSTATE_WM_DEPTH_STENCIL), &stencil_refs, wmds) {
6075          wmds.StencilReferenceValue = p_stencil_refs->ref_value[0];
6076          wmds.BackfaceStencilReferenceValue = p_stencil_refs->ref_value[1];
6077          wmds.StencilTestMaskModifyDisable = true;
6078          wmds.StencilWriteMaskModifyDisable = true;
6079          wmds.StencilStateModifyDisable = true;
6080          wmds.DepthStateModifyDisable = true;
6081       }
6082       iris_batch_emit(batch, stencil_refs, sizeof(stencil_refs));
6083 #endif
6084    }
6085 
6086    if (dirty & IRIS_DIRTY_SCISSOR_RECT) {
6087       /* GEN:BUG:1409725701:
6088        *    "The viewport-specific state used by the SF unit (SCISSOR_RECT) is
6089        *    stored as an array of up to 16 elements. The location of first
6090        *    element of the array, as specified by Pointer to SCISSOR_RECT,
6091        *    should be aligned to a 64-byte boundary.
6092        */
6093       uint32_t alignment = 64;
6094       uint32_t scissor_offset =
6095          emit_state(batch, ice->state.dynamic_uploader,
6096                     &ice->state.last_res.scissor,
6097                     ice->state.scissors,
6098                     sizeof(struct pipe_scissor_state) *
6099                     ice->state.num_viewports, alignment);
6100 
6101       iris_emit_cmd(batch, GENX(3DSTATE_SCISSOR_STATE_POINTERS), ptr) {
6102          ptr.ScissorRectPointer = scissor_offset;
6103       }
6104    }
6105 
6106    if (dirty & IRIS_DIRTY_DEPTH_BUFFER) {
6107       struct iris_depth_buffer_state *cso_z = &ice->state.genx->depth_buffer;
6108 
6109       /* Do not emit the clear params yets. We need to update the clear value
6110        * first.
6111        */
6112       uint32_t clear_length = GENX(3DSTATE_CLEAR_PARAMS_length) * 4;
6113       uint32_t cso_z_size = batch->screen->isl_dev.ds.size - clear_length;;
6114 
6115 #if GEN_GEN == 12
6116       /* GEN:BUG:14010455700
6117        *
6118        * ISL will change some CHICKEN registers depending on the depth surface
6119        * format, along with emitting the depth and stencil packets. In that
6120        * case, we want to do a depth flush and stall, so the pipeline is not
6121        * using these settings while we change the registers.
6122        */
6123       iris_emit_end_of_pipe_sync(batch,
6124                                  "Workaround: Stop pipeline for 14010455700",
6125                                  PIPE_CONTROL_DEPTH_STALL |
6126                                  PIPE_CONTROL_DEPTH_CACHE_FLUSH);
6127 #endif
6128 
6129       iris_batch_emit(batch, cso_z->packets, cso_z_size);
6130       if (GEN_GEN >= 12) {
6131          /* GEN:BUG:1408224581
6132           *
6133           * Workaround: Gen12LP Astep only An additional pipe control with
6134           * post-sync = store dword operation would be required.( w/a is to
6135           * have an additional pipe control after the stencil state whenever
6136           * the surface state bits of this state is changing).
6137           */
6138          iris_emit_pipe_control_write(batch, "WA for stencil state",
6139                                       PIPE_CONTROL_WRITE_IMMEDIATE,
6140                                       batch->screen->workaround_address.bo,
6141                                       batch->screen->workaround_address.offset, 0);
6142       }
6143 
6144       union isl_color_value clear_value = { .f32 = { 0, } };
6145 
6146       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6147       if (cso_fb->zsbuf) {
6148          struct iris_resource *zres, *sres;
6149          iris_get_depth_stencil_resources(cso_fb->zsbuf->texture,
6150                                           &zres, &sres);
6151          if (zres && zres->aux.bo)
6152             clear_value = iris_resource_get_clear_color(zres, NULL, NULL);
6153       }
6154 
6155       uint32_t clear_params[GENX(3DSTATE_CLEAR_PARAMS_length)];
6156       iris_pack_command(GENX(3DSTATE_CLEAR_PARAMS), clear_params, clear) {
6157          clear.DepthClearValueValid = true;
6158          clear.DepthClearValue = clear_value.f32[0];
6159       }
6160       iris_batch_emit(batch, clear_params, clear_length);
6161    }
6162 
6163    if (dirty & (IRIS_DIRTY_DEPTH_BUFFER | IRIS_DIRTY_WM_DEPTH_STENCIL)) {
6164       /* Listen for buffer changes, and also write enable changes. */
6165       struct pipe_framebuffer_state *cso_fb = &ice->state.framebuffer;
6166       pin_depth_and_stencil_buffers(batch, cso_fb->zsbuf, ice->state.cso_zsa);
6167    }
6168 
6169    if (dirty & IRIS_DIRTY_POLYGON_STIPPLE) {
6170       iris_emit_cmd(batch, GENX(3DSTATE_POLY_STIPPLE_PATTERN), poly) {
6171          for (int i = 0; i < 32; i++) {
6172             poly.PatternRow[i] = ice->state.poly_stipple.stipple[i];
6173          }
6174       }
6175    }
6176 
6177    if (dirty & IRIS_DIRTY_LINE_STIPPLE) {
6178       struct iris_rasterizer_state *cso = ice->state.cso_rast;
6179       iris_batch_emit(batch, cso->line_stipple, sizeof(cso->line_stipple));
6180    }
6181 
6182    if (dirty & IRIS_DIRTY_VF_TOPOLOGY) {
6183       iris_emit_cmd(batch, GENX(3DSTATE_VF_TOPOLOGY), topo) {
6184          topo.PrimitiveTopologyType =
6185             translate_prim_type(draw->mode, draw->vertices_per_patch);
6186       }
6187    }
6188 
6189    if (dirty & IRIS_DIRTY_VERTEX_BUFFERS) {
6190       int count = util_bitcount64(ice->state.bound_vertex_buffers);
6191       uint64_t dynamic_bound = ice->state.bound_vertex_buffers;
6192 
6193       if (ice->state.vs_uses_draw_params) {
6194          assert(ice->draw.draw_params.res);
6195 
6196          struct iris_vertex_buffer_state *state =
6197             &(ice->state.genx->vertex_buffers[count]);
6198          pipe_resource_reference(&state->resource, ice->draw.draw_params.res);
6199          struct iris_resource *res = (void *) state->resource;
6200 
6201          iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6202             vb.VertexBufferIndex = count;
6203             vb.AddressModifyEnable = true;
6204             vb.BufferPitch = 0;
6205             vb.BufferSize = res->bo->size - ice->draw.draw_params.offset;
6206             vb.BufferStartingAddress =
6207                ro_bo(NULL, res->bo->gtt_offset +
6208                            (int) ice->draw.draw_params.offset);
6209             vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
6210                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6211          }
6212          dynamic_bound |= 1ull << count;
6213          count++;
6214       }
6215 
6216       if (ice->state.vs_uses_derived_draw_params) {
6217          struct iris_vertex_buffer_state *state =
6218             &(ice->state.genx->vertex_buffers[count]);
6219          pipe_resource_reference(&state->resource,
6220                                  ice->draw.derived_draw_params.res);
6221          struct iris_resource *res = (void *) ice->draw.derived_draw_params.res;
6222 
6223          iris_pack_state(GENX(VERTEX_BUFFER_STATE), state->state, vb) {
6224              vb.VertexBufferIndex = count;
6225             vb.AddressModifyEnable = true;
6226             vb.BufferPitch = 0;
6227             vb.BufferSize =
6228                res->bo->size - ice->draw.derived_draw_params.offset;
6229             vb.BufferStartingAddress =
6230                ro_bo(NULL, res->bo->gtt_offset +
6231                            (int) ice->draw.derived_draw_params.offset);
6232             vb.MOCS = iris_mocs(res->bo, &batch->screen->isl_dev,
6233                                 ISL_SURF_USAGE_VERTEX_BUFFER_BIT);
6234          }
6235          dynamic_bound |= 1ull << count;
6236          count++;
6237       }
6238 
6239       if (count) {
6240 #if GEN_GEN >= 11
6241          /* Gen11+ doesn't need the cache workaround below */
6242          uint64_t bound = dynamic_bound;
6243          while (bound) {
6244             const int i = u_bit_scan64(&bound);
6245             iris_use_optional_res(batch, genx->vertex_buffers[i].resource,
6246                                   false, IRIS_DOMAIN_OTHER_READ);
6247          }
6248 #else
6249          /* The VF cache designers cut corners, and made the cache key's
6250           * <VertexBufferIndex, Memory Address> tuple only consider the bottom
6251           * 32 bits of the address.  If you have two vertex buffers which get
6252           * placed exactly 4 GiB apart and use them in back-to-back draw calls,
6253           * you can get collisions (even within a single batch).
6254           *
6255           * So, we need to do a VF cache invalidate if the buffer for a VB
6256           * slot slot changes [48:32] address bits from the previous time.
6257           */
6258          unsigned flush_flags = 0;
6259 
6260          uint64_t bound = dynamic_bound;
6261          while (bound) {
6262             const int i = u_bit_scan64(&bound);
6263             uint16_t high_bits = 0;
6264 
6265             struct iris_resource *res =
6266                (void *) genx->vertex_buffers[i].resource;
6267             if (res) {
6268                iris_use_pinned_bo(batch, res->bo, false, IRIS_DOMAIN_OTHER_READ);
6269 
6270                high_bits = res->bo->gtt_offset >> 32ull;
6271                if (high_bits != ice->state.last_vbo_high_bits[i]) {
6272                   flush_flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE |
6273                                  PIPE_CONTROL_CS_STALL;
6274                   ice->state.last_vbo_high_bits[i] = high_bits;
6275                }
6276             }
6277          }
6278 
6279          if (flush_flags) {
6280             iris_emit_pipe_control_flush(batch,
6281                                          "workaround: VF cache 32-bit key [VB]",
6282                                          flush_flags);
6283          }
6284 #endif
6285 
6286          const unsigned vb_dwords = GENX(VERTEX_BUFFER_STATE_length);
6287 
6288          uint32_t *map =
6289             iris_get_command_space(batch, 4 * (1 + vb_dwords * count));
6290          _iris_pack_command(batch, GENX(3DSTATE_VERTEX_BUFFERS), map, vb) {
6291             vb.DWordLength = (vb_dwords * count + 1) - 2;
6292          }
6293          map += 1;
6294 
6295          bound = dynamic_bound;
6296          while (bound) {
6297             const int i = u_bit_scan64(&bound);
6298             memcpy(map, genx->vertex_buffers[i].state,
6299                    sizeof(uint32_t) * vb_dwords);
6300             map += vb_dwords;
6301          }
6302       }
6303    }
6304 
6305    if (dirty & IRIS_DIRTY_VERTEX_ELEMENTS) {
6306       struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6307       const unsigned entries = MAX2(cso->count, 1);
6308       if (!(ice->state.vs_needs_sgvs_element ||
6309             ice->state.vs_uses_derived_draw_params ||
6310             ice->state.vs_needs_edge_flag)) {
6311          iris_batch_emit(batch, cso->vertex_elements, sizeof(uint32_t) *
6312                          (1 + entries * GENX(VERTEX_ELEMENT_STATE_length)));
6313       } else {
6314          uint32_t dynamic_ves[1 + 33 * GENX(VERTEX_ELEMENT_STATE_length)];
6315          const unsigned dyn_count = cso->count +
6316             ice->state.vs_needs_sgvs_element +
6317             ice->state.vs_uses_derived_draw_params;
6318 
6319          iris_pack_command(GENX(3DSTATE_VERTEX_ELEMENTS),
6320                            &dynamic_ves, ve) {
6321             ve.DWordLength =
6322                1 + GENX(VERTEX_ELEMENT_STATE_length) * dyn_count - 2;
6323          }
6324          memcpy(&dynamic_ves[1], &cso->vertex_elements[1],
6325                 (cso->count - ice->state.vs_needs_edge_flag) *
6326                 GENX(VERTEX_ELEMENT_STATE_length) * sizeof(uint32_t));
6327          uint32_t *ve_pack_dest =
6328             &dynamic_ves[1 + (cso->count - ice->state.vs_needs_edge_flag) *
6329                          GENX(VERTEX_ELEMENT_STATE_length)];
6330 
6331          if (ice->state.vs_needs_sgvs_element) {
6332             uint32_t base_ctrl = ice->state.vs_uses_draw_params ?
6333                                  VFCOMP_STORE_SRC : VFCOMP_STORE_0;
6334             iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6335                ve.Valid = true;
6336                ve.VertexBufferIndex =
6337                   util_bitcount64(ice->state.bound_vertex_buffers);
6338                ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6339                ve.Component0Control = base_ctrl;
6340                ve.Component1Control = base_ctrl;
6341                ve.Component2Control = VFCOMP_STORE_0;
6342                ve.Component3Control = VFCOMP_STORE_0;
6343             }
6344             ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6345          }
6346          if (ice->state.vs_uses_derived_draw_params) {
6347             iris_pack_state(GENX(VERTEX_ELEMENT_STATE), ve_pack_dest, ve) {
6348                ve.Valid = true;
6349                ve.VertexBufferIndex =
6350                   util_bitcount64(ice->state.bound_vertex_buffers) +
6351                   ice->state.vs_uses_draw_params;
6352                ve.SourceElementFormat = ISL_FORMAT_R32G32_UINT;
6353                ve.Component0Control = VFCOMP_STORE_SRC;
6354                ve.Component1Control = VFCOMP_STORE_SRC;
6355                ve.Component2Control = VFCOMP_STORE_0;
6356                ve.Component3Control = VFCOMP_STORE_0;
6357             }
6358             ve_pack_dest += GENX(VERTEX_ELEMENT_STATE_length);
6359          }
6360          if (ice->state.vs_needs_edge_flag) {
6361             for (int i = 0; i < GENX(VERTEX_ELEMENT_STATE_length);  i++)
6362                ve_pack_dest[i] = cso->edgeflag_ve[i];
6363          }
6364 
6365          iris_batch_emit(batch, &dynamic_ves, sizeof(uint32_t) *
6366                          (1 + dyn_count * GENX(VERTEX_ELEMENT_STATE_length)));
6367       }
6368 
6369       if (!ice->state.vs_needs_edge_flag) {
6370          iris_batch_emit(batch, cso->vf_instancing, sizeof(uint32_t) *
6371                          entries * GENX(3DSTATE_VF_INSTANCING_length));
6372       } else {
6373          assert(cso->count > 0);
6374          const unsigned edgeflag_index = cso->count - 1;
6375          uint32_t dynamic_vfi[33 * GENX(3DSTATE_VF_INSTANCING_length)];
6376          memcpy(&dynamic_vfi[0], cso->vf_instancing, edgeflag_index *
6377                 GENX(3DSTATE_VF_INSTANCING_length) * sizeof(uint32_t));
6378 
6379          uint32_t *vfi_pack_dest = &dynamic_vfi[0] +
6380             edgeflag_index * GENX(3DSTATE_VF_INSTANCING_length);
6381          iris_pack_command(GENX(3DSTATE_VF_INSTANCING), vfi_pack_dest, vi) {
6382             vi.VertexElementIndex = edgeflag_index +
6383                ice->state.vs_needs_sgvs_element +
6384                ice->state.vs_uses_derived_draw_params;
6385          }
6386          for (int i = 0; i < GENX(3DSTATE_VF_INSTANCING_length);  i++)
6387             vfi_pack_dest[i] |= cso->edgeflag_vfi[i];
6388 
6389          iris_batch_emit(batch, &dynamic_vfi[0], sizeof(uint32_t) *
6390                          entries * GENX(3DSTATE_VF_INSTANCING_length));
6391       }
6392    }
6393 
6394    if (dirty & IRIS_DIRTY_VF_SGVS) {
6395       const struct brw_vs_prog_data *vs_prog_data = (void *)
6396          ice->shaders.prog[MESA_SHADER_VERTEX]->prog_data;
6397       struct iris_vertex_element_state *cso = ice->state.cso_vertex_elements;
6398 
6399       iris_emit_cmd(batch, GENX(3DSTATE_VF_SGVS), sgv) {
6400          if (vs_prog_data->uses_vertexid) {
6401             sgv.VertexIDEnable = true;
6402             sgv.VertexIDComponentNumber = 2;
6403             sgv.VertexIDElementOffset =
6404                cso->count - ice->state.vs_needs_edge_flag;
6405          }
6406 
6407          if (vs_prog_data->uses_instanceid) {
6408             sgv.InstanceIDEnable = true;
6409             sgv.InstanceIDComponentNumber = 3;
6410             sgv.InstanceIDElementOffset =
6411                cso->count - ice->state.vs_needs_edge_flag;
6412          }
6413       }
6414    }
6415 
6416    if (dirty & IRIS_DIRTY_VF) {
6417       iris_emit_cmd(batch, GENX(3DSTATE_VF), vf) {
6418          if (draw->primitive_restart) {
6419             vf.IndexedDrawCutIndexEnable = true;
6420             vf.CutIndex = draw->restart_index;
6421          }
6422       }
6423    }
6424 
6425    if (dirty & IRIS_DIRTY_VF_STATISTICS) {
6426       iris_emit_cmd(batch, GENX(3DSTATE_VF_STATISTICS), vf) {
6427          vf.StatisticsEnable = true;
6428       }
6429    }
6430 
6431 #if GEN_GEN == 8
6432    if (dirty & IRIS_DIRTY_PMA_FIX) {
6433       bool enable = want_pma_fix(ice);
6434       genX(update_pma_fix)(ice, batch, enable);
6435    }
6436 #endif
6437 
6438    if (ice->state.current_hash_scale != 1)
6439       genX(emit_hashing_mode)(ice, batch, UINT_MAX, UINT_MAX, 1);
6440 
6441 #if GEN_GEN >= 12
6442    genX(invalidate_aux_map_state)(batch);
6443 #endif
6444 }
6445 
6446 static void
iris_upload_render_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)6447 iris_upload_render_state(struct iris_context *ice,
6448                          struct iris_batch *batch,
6449                          const struct pipe_draw_info *draw)
6450 {
6451    bool use_predicate = ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT;
6452 
6453    iris_batch_sync_region_start(batch);
6454 
6455    /* Always pin the binder.  If we're emitting new binding table pointers,
6456     * we need it.  If not, we're probably inheriting old tables via the
6457     * context, and need it anyway.  Since true zero-bindings cases are
6458     * practically non-existent, just pin it and avoid last_res tracking.
6459     */
6460    iris_use_pinned_bo(batch, ice->state.binder.bo, false,
6461                       IRIS_DOMAIN_NONE);
6462 
6463    if (!batch->contains_draw_with_next_seqno) {
6464       iris_restore_render_saved_bos(ice, batch, draw);
6465       batch->contains_draw_with_next_seqno = batch->contains_draw = true;
6466    }
6467 
6468    iris_upload_dirty_render_state(ice, batch, draw);
6469 
6470    if (draw->index_size > 0) {
6471       unsigned offset;
6472 
6473       if (draw->has_user_indices) {
6474          u_upload_data(ice->ctx.stream_uploader, 0,
6475                        draw->count * draw->index_size, 4, draw->index.user,
6476                        &offset, &ice->state.last_res.index_buffer);
6477       } else {
6478          struct iris_resource *res = (void *) draw->index.resource;
6479          res->bind_history |= PIPE_BIND_INDEX_BUFFER;
6480 
6481          pipe_resource_reference(&ice->state.last_res.index_buffer,
6482                                  draw->index.resource);
6483          offset = 0;
6484       }
6485 
6486       struct iris_genx_state *genx = ice->state.genx;
6487       struct iris_bo *bo = iris_resource_bo(ice->state.last_res.index_buffer);
6488 
6489       uint32_t ib_packet[GENX(3DSTATE_INDEX_BUFFER_length)];
6490       iris_pack_command(GENX(3DSTATE_INDEX_BUFFER), ib_packet, ib) {
6491          ib.IndexFormat = draw->index_size >> 1;
6492          ib.MOCS = iris_mocs(bo, &batch->screen->isl_dev,
6493                              ISL_SURF_USAGE_INDEX_BUFFER_BIT);
6494          ib.BufferSize = bo->size - offset;
6495          ib.BufferStartingAddress = ro_bo(NULL, bo->gtt_offset + offset);
6496       }
6497 
6498       if (memcmp(genx->last_index_buffer, ib_packet, sizeof(ib_packet)) != 0) {
6499          memcpy(genx->last_index_buffer, ib_packet, sizeof(ib_packet));
6500          iris_batch_emit(batch, ib_packet, sizeof(ib_packet));
6501          iris_use_pinned_bo(batch, bo, false, IRIS_DOMAIN_OTHER_READ);
6502       }
6503 
6504 #if GEN_GEN < 11
6505       /* The VF cache key only uses 32-bits, see vertex buffer comment above */
6506       uint16_t high_bits = bo->gtt_offset >> 32ull;
6507       if (high_bits != ice->state.last_index_bo_high_bits) {
6508          iris_emit_pipe_control_flush(batch,
6509                                       "workaround: VF cache 32-bit key [IB]",
6510                                       PIPE_CONTROL_VF_CACHE_INVALIDATE |
6511                                       PIPE_CONTROL_CS_STALL);
6512          ice->state.last_index_bo_high_bits = high_bits;
6513       }
6514 #endif
6515    }
6516 
6517 #define _3DPRIM_END_OFFSET          0x2420
6518 #define _3DPRIM_START_VERTEX        0x2430
6519 #define _3DPRIM_VERTEX_COUNT        0x2434
6520 #define _3DPRIM_INSTANCE_COUNT      0x2438
6521 #define _3DPRIM_START_INSTANCE      0x243C
6522 #define _3DPRIM_BASE_VERTEX         0x2440
6523 
6524    if (draw->indirect) {
6525       if (draw->indirect->indirect_draw_count) {
6526          use_predicate = true;
6527 
6528          struct iris_bo *draw_count_bo =
6529             iris_resource_bo(draw->indirect->indirect_draw_count);
6530          unsigned draw_count_offset =
6531             draw->indirect->indirect_draw_count_offset;
6532 
6533          iris_emit_pipe_control_flush(batch,
6534                                       "ensure indirect draw buffer is flushed",
6535                                       PIPE_CONTROL_FLUSH_ENABLE);
6536 
6537          if (ice->state.predicate == IRIS_PREDICATE_STATE_USE_BIT) {
6538             struct gen_mi_builder b;
6539             gen_mi_builder_init(&b, batch);
6540 
6541             /* comparison = draw id < draw count */
6542             struct gen_mi_value comparison =
6543                gen_mi_ult(&b, gen_mi_imm(draw->drawid),
6544                               gen_mi_mem32(ro_bo(draw_count_bo,
6545                                                  draw_count_offset)));
6546 
6547             /* predicate = comparison & conditional rendering predicate */
6548             gen_mi_store(&b, gen_mi_reg32(MI_PREDICATE_RESULT),
6549                              gen_mi_iand(&b, comparison,
6550                                              gen_mi_reg32(CS_GPR(15))));
6551          } else {
6552             uint32_t mi_predicate;
6553 
6554             /* Upload the id of the current primitive to MI_PREDICATE_SRC1. */
6555             iris_load_register_imm64(batch, MI_PREDICATE_SRC1, draw->drawid);
6556             /* Upload the current draw count from the draw parameters buffer
6557              * to MI_PREDICATE_SRC0.
6558              */
6559             iris_load_register_mem32(batch, MI_PREDICATE_SRC0,
6560                                      draw_count_bo, draw_count_offset);
6561             /* Zero the top 32-bits of MI_PREDICATE_SRC0 */
6562             iris_load_register_imm32(batch, MI_PREDICATE_SRC0 + 4, 0);
6563 
6564             if (draw->drawid == 0) {
6565                mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOADINV |
6566                               MI_PREDICATE_COMBINEOP_SET |
6567                               MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6568             } else {
6569                /* While draw_index < draw_count the predicate's result will be
6570                 *  (draw_index == draw_count) ^ TRUE = TRUE
6571                 * When draw_index == draw_count the result is
6572                 *  (TRUE) ^ TRUE = FALSE
6573                 * After this all results will be:
6574                 *  (FALSE) ^ FALSE = FALSE
6575                 */
6576                mi_predicate = MI_PREDICATE | MI_PREDICATE_LOADOP_LOAD |
6577                               MI_PREDICATE_COMBINEOP_XOR |
6578                               MI_PREDICATE_COMPAREOP_SRCS_EQUAL;
6579             }
6580             iris_batch_emit(batch, &mi_predicate, sizeof(uint32_t));
6581          }
6582       }
6583       struct iris_bo *bo = iris_resource_bo(draw->indirect->buffer);
6584       assert(bo);
6585 
6586       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6587          lrm.RegisterAddress = _3DPRIM_VERTEX_COUNT;
6588          lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 0);
6589       }
6590       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6591          lrm.RegisterAddress = _3DPRIM_INSTANCE_COUNT;
6592          lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 4);
6593       }
6594       iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6595          lrm.RegisterAddress = _3DPRIM_START_VERTEX;
6596          lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 8);
6597       }
6598       if (draw->index_size) {
6599          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6600             lrm.RegisterAddress = _3DPRIM_BASE_VERTEX;
6601             lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6602          }
6603          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6604             lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6605             lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 16);
6606          }
6607       } else {
6608          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6609             lrm.RegisterAddress = _3DPRIM_START_INSTANCE;
6610             lrm.MemoryAddress = ro_bo(bo, draw->indirect->offset + 12);
6611          }
6612          iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_IMM), lri) {
6613             lri.RegisterOffset = _3DPRIM_BASE_VERTEX;
6614             lri.DataDWord = 0;
6615          }
6616       }
6617    } else if (draw->count_from_stream_output) {
6618       struct iris_stream_output_target *so =
6619          (void *) draw->count_from_stream_output;
6620 
6621       /* XXX: Replace with actual cache tracking */
6622       iris_emit_pipe_control_flush(batch,
6623                                    "draw count from stream output stall",
6624                                    PIPE_CONTROL_CS_STALL);
6625 
6626       struct gen_mi_builder b;
6627       gen_mi_builder_init(&b, batch);
6628 
6629       struct iris_address addr =
6630          ro_bo(iris_resource_bo(so->offset.res), so->offset.offset);
6631       struct gen_mi_value offset =
6632          gen_mi_iadd_imm(&b, gen_mi_mem32(addr), -so->base.buffer_offset);
6633 
6634       gen_mi_store(&b, gen_mi_reg32(_3DPRIM_VERTEX_COUNT),
6635                        gen_mi_udiv32_imm(&b, offset, so->stride));
6636 
6637       _iris_emit_lri(batch, _3DPRIM_START_VERTEX, 0);
6638       _iris_emit_lri(batch, _3DPRIM_BASE_VERTEX, 0);
6639       _iris_emit_lri(batch, _3DPRIM_START_INSTANCE, 0);
6640       _iris_emit_lri(batch, _3DPRIM_INSTANCE_COUNT, draw->instance_count);
6641    }
6642 
6643    iris_emit_cmd(batch, GENX(3DPRIMITIVE), prim) {
6644       prim.VertexAccessType = draw->index_size > 0 ? RANDOM : SEQUENTIAL;
6645       prim.PredicateEnable = use_predicate;
6646 
6647       if (draw->indirect || draw->count_from_stream_output) {
6648          prim.IndirectParameterEnable = true;
6649       } else {
6650          prim.StartInstanceLocation = draw->start_instance;
6651          prim.InstanceCount = draw->instance_count;
6652          prim.VertexCountPerInstance = draw->count;
6653 
6654          prim.StartVertexLocation = draw->start;
6655 
6656          if (draw->index_size) {
6657             prim.BaseVertexLocation += draw->index_bias;
6658          } else {
6659             prim.StartVertexLocation += draw->index_bias;
6660          }
6661       }
6662    }
6663 
6664    iris_batch_sync_region_end(batch);
6665 }
6666 
6667 static void
iris_load_indirect_location(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)6668 iris_load_indirect_location(struct iris_context *ice,
6669                             struct iris_batch *batch,
6670                             const struct pipe_grid_info *grid)
6671 {
6672 #define GPGPU_DISPATCHDIMX 0x2500
6673 #define GPGPU_DISPATCHDIMY 0x2504
6674 #define GPGPU_DISPATCHDIMZ 0x2508
6675 
6676    assert(grid->indirect);
6677 
6678    struct iris_state_ref *grid_size = &ice->state.grid_size;
6679    struct iris_bo *bo = iris_resource_bo(grid_size->res);
6680    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6681       lrm.RegisterAddress = GPGPU_DISPATCHDIMX;
6682       lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 0);
6683    }
6684    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6685       lrm.RegisterAddress = GPGPU_DISPATCHDIMY;
6686       lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 4);
6687    }
6688    iris_emit_cmd(batch, GENX(MI_LOAD_REGISTER_MEM), lrm) {
6689       lrm.RegisterAddress = GPGPU_DISPATCHDIMZ;
6690       lrm.MemoryAddress = ro_bo(bo, grid_size->offset + 8);
6691    }
6692 }
6693 
6694 static void
iris_upload_gpgpu_walker(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)6695 iris_upload_gpgpu_walker(struct iris_context *ice,
6696                          struct iris_batch *batch,
6697                          const struct pipe_grid_info *grid)
6698 {
6699    const uint64_t stage_dirty = ice->state.stage_dirty;
6700    struct iris_screen *screen = batch->screen;
6701    const struct gen_device_info *devinfo = &screen->devinfo;
6702    struct iris_binder *binder = &ice->state.binder;
6703    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6704    struct iris_uncompiled_shader *ish =
6705       ice->shaders.uncompiled[MESA_SHADER_COMPUTE];
6706    struct iris_compiled_shader *shader =
6707       ice->shaders.prog[MESA_SHADER_COMPUTE];
6708    struct brw_stage_prog_data *prog_data = shader->prog_data;
6709    struct brw_cs_prog_data *cs_prog_data = (void *) prog_data;
6710    const uint32_t group_size = grid->block[0] * grid->block[1] * grid->block[2];
6711    const unsigned simd_size =
6712       brw_cs_simd_size_for_group_size(devinfo, cs_prog_data, group_size);
6713    const unsigned threads = DIV_ROUND_UP(group_size, simd_size);
6714 
6715 
6716    if (stage_dirty & IRIS_STAGE_DIRTY_CS) {
6717       /* The MEDIA_VFE_STATE documentation for Gen8+ says:
6718        *
6719        *   "A stalling PIPE_CONTROL is required before MEDIA_VFE_STATE unless
6720        *    the only bits that are changed are scoreboard related: Scoreboard
6721        *    Enable, Scoreboard Type, Scoreboard Mask, Scoreboard Delta.  For
6722        *    these scoreboard related states, a MEDIA_STATE_FLUSH is
6723        *    sufficient."
6724        */
6725       iris_emit_pipe_control_flush(batch,
6726                                    "workaround: stall before MEDIA_VFE_STATE",
6727                                    PIPE_CONTROL_CS_STALL);
6728 
6729       iris_emit_cmd(batch, GENX(MEDIA_VFE_STATE), vfe) {
6730          if (prog_data->total_scratch) {
6731             struct iris_bo *bo =
6732                iris_get_scratch_space(ice, prog_data->total_scratch,
6733                                       MESA_SHADER_COMPUTE);
6734             vfe.PerThreadScratchSpace = ffs(prog_data->total_scratch) - 11;
6735             vfe.ScratchSpaceBasePointer = rw_bo(bo, 0, IRIS_DOMAIN_NONE);
6736          }
6737 
6738          vfe.MaximumNumberofThreads =
6739             devinfo->max_cs_threads * screen->subslice_total - 1;
6740 #if GEN_GEN < 11
6741          vfe.ResetGatewayTimer =
6742             Resettingrelativetimerandlatchingtheglobaltimestamp;
6743 #endif
6744 #if GEN_GEN == 8
6745          vfe.BypassGatewayControl = true;
6746 #endif
6747          vfe.NumberofURBEntries = 2;
6748          vfe.URBEntryAllocationSize = 2;
6749 
6750          vfe.CURBEAllocationSize =
6751             ALIGN(cs_prog_data->push.per_thread.regs * threads +
6752                   cs_prog_data->push.cross_thread.regs, 2);
6753       }
6754    }
6755 
6756    /* TODO: Combine subgroup-id with cbuf0 so we can push regular uniforms */
6757    if ((stage_dirty & IRIS_STAGE_DIRTY_CS) ||
6758        cs_prog_data->local_size[0] == 0 /* Variable local group size */) {
6759       uint32_t curbe_data_offset = 0;
6760       assert(cs_prog_data->push.cross_thread.dwords == 0 &&
6761              cs_prog_data->push.per_thread.dwords == 1 &&
6762              cs_prog_data->base.param[0] == BRW_PARAM_BUILTIN_SUBGROUP_ID);
6763       const unsigned push_const_size =
6764          brw_cs_push_const_total_size(cs_prog_data, threads);
6765       uint32_t *curbe_data_map =
6766          stream_state(batch, ice->state.dynamic_uploader,
6767                       &ice->state.last_res.cs_thread_ids,
6768                       ALIGN(push_const_size, 64), 64,
6769                       &curbe_data_offset);
6770       assert(curbe_data_map);
6771       memset(curbe_data_map, 0x5a, ALIGN(push_const_size, 64));
6772       iris_fill_cs_push_const_buffer(cs_prog_data, threads, curbe_data_map);
6773 
6774       iris_emit_cmd(batch, GENX(MEDIA_CURBE_LOAD), curbe) {
6775          curbe.CURBETotalDataLength = ALIGN(push_const_size, 64);
6776          curbe.CURBEDataStartAddress = curbe_data_offset;
6777       }
6778    }
6779 
6780    for (unsigned i = 0; i < IRIS_MAX_GLOBAL_BINDINGS; i++) {
6781       struct pipe_resource *res = ice->state.global_bindings[i];
6782       if (!res)
6783          continue;
6784 
6785       iris_use_pinned_bo(batch, iris_resource_bo(res),
6786                          true, IRIS_DOMAIN_NONE);
6787    }
6788 
6789    if (stage_dirty & (IRIS_STAGE_DIRTY_SAMPLER_STATES_CS |
6790                       IRIS_STAGE_DIRTY_BINDINGS_CS |
6791                       IRIS_STAGE_DIRTY_CONSTANTS_CS |
6792                       IRIS_STAGE_DIRTY_CS)) {
6793       uint32_t desc[GENX(INTERFACE_DESCRIPTOR_DATA_length)];
6794 
6795       iris_pack_state(GENX(INTERFACE_DESCRIPTOR_DATA), desc, idd) {
6796          idd.SharedLocalMemorySize =
6797             encode_slm_size(GEN_GEN, ish->kernel_shared_size);
6798          idd.KernelStartPointer =
6799             KSP(shader) + brw_cs_prog_data_prog_offset(cs_prog_data, simd_size);
6800          idd.SamplerStatePointer = shs->sampler_table.offset;
6801          idd.BindingTablePointer = binder->bt_offset[MESA_SHADER_COMPUTE];
6802          idd.NumberofThreadsinGPGPUThreadGroup = threads;
6803       }
6804 
6805       for (int i = 0; i < GENX(INTERFACE_DESCRIPTOR_DATA_length); i++)
6806          desc[i] |= ((uint32_t *) shader->derived_data)[i];
6807 
6808       iris_emit_cmd(batch, GENX(MEDIA_INTERFACE_DESCRIPTOR_LOAD), load) {
6809          load.InterfaceDescriptorTotalLength =
6810             GENX(INTERFACE_DESCRIPTOR_DATA_length) * sizeof(uint32_t);
6811          load.InterfaceDescriptorDataStartAddress =
6812             emit_state(batch, ice->state.dynamic_uploader,
6813                        &ice->state.last_res.cs_desc, desc, sizeof(desc), 64);
6814       }
6815    }
6816 
6817    if (grid->indirect)
6818       iris_load_indirect_location(ice, batch, grid);
6819 
6820    const uint32_t right_mask = brw_cs_right_mask(group_size, simd_size);
6821 
6822    iris_emit_cmd(batch, GENX(GPGPU_WALKER), ggw) {
6823       ggw.IndirectParameterEnable    = grid->indirect != NULL;
6824       ggw.SIMDSize                   = simd_size / 16;
6825       ggw.ThreadDepthCounterMaximum  = 0;
6826       ggw.ThreadHeightCounterMaximum = 0;
6827       ggw.ThreadWidthCounterMaximum  = threads - 1;
6828       ggw.ThreadGroupIDXDimension    = grid->grid[0];
6829       ggw.ThreadGroupIDYDimension    = grid->grid[1];
6830       ggw.ThreadGroupIDZDimension    = grid->grid[2];
6831       ggw.RightExecutionMask         = right_mask;
6832       ggw.BottomExecutionMask        = 0xffffffff;
6833    }
6834 
6835    iris_emit_cmd(batch, GENX(MEDIA_STATE_FLUSH), msf);
6836 }
6837 
6838 static void
iris_upload_compute_state(struct iris_context * ice,struct iris_batch * batch,const struct pipe_grid_info * grid)6839 iris_upload_compute_state(struct iris_context *ice,
6840                           struct iris_batch *batch,
6841                           const struct pipe_grid_info *grid)
6842 {
6843    const uint64_t stage_dirty = ice->state.stage_dirty;
6844    struct iris_shader_state *shs = &ice->state.shaders[MESA_SHADER_COMPUTE];
6845    struct iris_compiled_shader *shader =
6846       ice->shaders.prog[MESA_SHADER_COMPUTE];
6847 
6848    iris_batch_sync_region_start(batch);
6849 
6850    /* Always pin the binder.  If we're emitting new binding table pointers,
6851     * we need it.  If not, we're probably inheriting old tables via the
6852     * context, and need it anyway.  Since true zero-bindings cases are
6853     * practically non-existent, just pin it and avoid last_res tracking.
6854     */
6855    iris_use_pinned_bo(batch, ice->state.binder.bo, false, IRIS_DOMAIN_NONE);
6856 
6857    if (((stage_dirty & IRIS_STAGE_DIRTY_CONSTANTS_CS) &&
6858         shs->sysvals_need_upload) ||
6859        shader->kernel_input_size > 0)
6860       upload_sysvals(ice, MESA_SHADER_COMPUTE, grid);
6861 
6862    if (stage_dirty & IRIS_STAGE_DIRTY_BINDINGS_CS)
6863       iris_populate_binding_table(ice, batch, MESA_SHADER_COMPUTE, false);
6864 
6865    if (stage_dirty & IRIS_STAGE_DIRTY_SAMPLER_STATES_CS)
6866       iris_upload_sampler_states(ice, MESA_SHADER_COMPUTE);
6867 
6868    iris_use_optional_res(batch, shs->sampler_table.res, false,
6869                          IRIS_DOMAIN_NONE);
6870    iris_use_pinned_bo(batch, iris_resource_bo(shader->assembly.res), false,
6871                       IRIS_DOMAIN_NONE);
6872 
6873    if (ice->state.need_border_colors)
6874       iris_use_pinned_bo(batch, ice->state.border_color_pool.bo, false,
6875                          IRIS_DOMAIN_NONE);
6876 
6877 #if GEN_GEN >= 12
6878    genX(invalidate_aux_map_state)(batch);
6879 #endif
6880 
6881    iris_upload_gpgpu_walker(ice, batch, grid);
6882 
6883    if (!batch->contains_draw_with_next_seqno) {
6884       iris_restore_compute_saved_bos(ice, batch, grid);
6885       batch->contains_draw_with_next_seqno = batch->contains_draw = true;
6886    }
6887 
6888    iris_batch_sync_region_end(batch);
6889 }
6890 
6891 /**
6892  * State module teardown.
6893  */
6894 static void
iris_destroy_state(struct iris_context * ice)6895 iris_destroy_state(struct iris_context *ice)
6896 {
6897    struct iris_genx_state *genx = ice->state.genx;
6898 
6899    pipe_resource_reference(&ice->draw.draw_params.res, NULL);
6900    pipe_resource_reference(&ice->draw.derived_draw_params.res, NULL);
6901 
6902    /* Loop over all VBOs, including ones for draw parameters */
6903    for (unsigned i = 0; i < ARRAY_SIZE(genx->vertex_buffers); i++) {
6904       pipe_resource_reference(&genx->vertex_buffers[i].resource, NULL);
6905    }
6906 
6907    free(ice->state.genx);
6908 
6909    for (int i = 0; i < 4; i++) {
6910       pipe_so_target_reference(&ice->state.so_target[i], NULL);
6911    }
6912 
6913    for (unsigned i = 0; i < ice->state.framebuffer.nr_cbufs; i++) {
6914       pipe_surface_reference(&ice->state.framebuffer.cbufs[i], NULL);
6915    }
6916    pipe_surface_reference(&ice->state.framebuffer.zsbuf, NULL);
6917 
6918    for (int stage = 0; stage < MESA_SHADER_STAGES; stage++) {
6919       struct iris_shader_state *shs = &ice->state.shaders[stage];
6920       pipe_resource_reference(&shs->sampler_table.res, NULL);
6921       for (int i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++) {
6922          pipe_resource_reference(&shs->constbuf[i].buffer, NULL);
6923          pipe_resource_reference(&shs->constbuf_surf_state[i].res, NULL);
6924       }
6925       for (int i = 0; i < PIPE_MAX_SHADER_IMAGES; i++) {
6926          pipe_resource_reference(&shs->image[i].base.resource, NULL);
6927          pipe_resource_reference(&shs->image[i].surface_state.ref.res, NULL);
6928          free(shs->image[i].surface_state.cpu);
6929       }
6930       for (int i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++) {
6931          pipe_resource_reference(&shs->ssbo[i].buffer, NULL);
6932          pipe_resource_reference(&shs->ssbo_surf_state[i].res, NULL);
6933       }
6934       for (int i = 0; i < IRIS_MAX_TEXTURE_SAMPLERS; i++) {
6935          pipe_sampler_view_reference((struct pipe_sampler_view **)
6936                                      &shs->textures[i], NULL);
6937       }
6938    }
6939 
6940    pipe_resource_reference(&ice->state.grid_size.res, NULL);
6941    pipe_resource_reference(&ice->state.grid_surf_state.res, NULL);
6942 
6943    pipe_resource_reference(&ice->state.null_fb.res, NULL);
6944    pipe_resource_reference(&ice->state.unbound_tex.res, NULL);
6945 
6946    pipe_resource_reference(&ice->state.last_res.cc_vp, NULL);
6947    pipe_resource_reference(&ice->state.last_res.sf_cl_vp, NULL);
6948    pipe_resource_reference(&ice->state.last_res.color_calc, NULL);
6949    pipe_resource_reference(&ice->state.last_res.scissor, NULL);
6950    pipe_resource_reference(&ice->state.last_res.blend, NULL);
6951    pipe_resource_reference(&ice->state.last_res.index_buffer, NULL);
6952    pipe_resource_reference(&ice->state.last_res.cs_thread_ids, NULL);
6953    pipe_resource_reference(&ice->state.last_res.cs_desc, NULL);
6954 }
6955 
6956 /* ------------------------------------------------------------------- */
6957 
6958 static void
iris_rebind_buffer(struct iris_context * ice,struct iris_resource * res)6959 iris_rebind_buffer(struct iris_context *ice,
6960                    struct iris_resource *res)
6961 {
6962    struct pipe_context *ctx = &ice->ctx;
6963    struct iris_genx_state *genx = ice->state.genx;
6964 
6965    assert(res->base.target == PIPE_BUFFER);
6966 
6967    /* Buffers can't be framebuffer attachments, nor display related,
6968     * and we don't have upstream Clover support.
6969     */
6970    assert(!(res->bind_history & (PIPE_BIND_DEPTH_STENCIL |
6971                                  PIPE_BIND_RENDER_TARGET |
6972                                  PIPE_BIND_BLENDABLE |
6973                                  PIPE_BIND_DISPLAY_TARGET |
6974                                  PIPE_BIND_CURSOR |
6975                                  PIPE_BIND_COMPUTE_RESOURCE |
6976                                  PIPE_BIND_GLOBAL)));
6977 
6978    if (res->bind_history & PIPE_BIND_VERTEX_BUFFER) {
6979       uint64_t bound_vbs = ice->state.bound_vertex_buffers;
6980       while (bound_vbs) {
6981          const int i = u_bit_scan64(&bound_vbs);
6982          struct iris_vertex_buffer_state *state = &genx->vertex_buffers[i];
6983 
6984          /* Update the CPU struct */
6985          STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_start) == 32);
6986          STATIC_ASSERT(GENX(VERTEX_BUFFER_STATE_BufferStartingAddress_bits) == 64);
6987          uint64_t *addr = (uint64_t *) &state->state[1];
6988          struct iris_bo *bo = iris_resource_bo(state->resource);
6989 
6990          if (*addr != bo->gtt_offset + state->offset) {
6991             *addr = bo->gtt_offset + state->offset;
6992             ice->state.dirty |= IRIS_DIRTY_VERTEX_BUFFERS;
6993          }
6994       }
6995    }
6996 
6997    /* We don't need to handle PIPE_BIND_INDEX_BUFFER here: we re-emit
6998     * the 3DSTATE_INDEX_BUFFER packet whenever the address changes.
6999     *
7000     * There is also no need to handle these:
7001     * - PIPE_BIND_COMMAND_ARGS_BUFFER (emitted for every indirect draw)
7002     * - PIPE_BIND_QUERY_BUFFER (no persistent state references)
7003     */
7004 
7005    if (res->bind_history & PIPE_BIND_STREAM_OUTPUT) {
7006       /* XXX: be careful about resetting vs appending... */
7007       assert(false);
7008    }
7009 
7010    for (int s = MESA_SHADER_VERTEX; s < MESA_SHADER_STAGES; s++) {
7011       struct iris_shader_state *shs = &ice->state.shaders[s];
7012       enum pipe_shader_type p_stage = stage_to_pipe(s);
7013 
7014       if (!(res->bind_stages & (1 << s)))
7015          continue;
7016 
7017       if (res->bind_history & PIPE_BIND_CONSTANT_BUFFER) {
7018          /* Skip constant buffer 0, it's for regular uniforms, not UBOs */
7019          uint32_t bound_cbufs = shs->bound_cbufs & ~1u;
7020          while (bound_cbufs) {
7021             const int i = u_bit_scan(&bound_cbufs);
7022             struct pipe_shader_buffer *cbuf = &shs->constbuf[i];
7023             struct iris_state_ref *surf_state = &shs->constbuf_surf_state[i];
7024 
7025             if (res->bo == iris_resource_bo(cbuf->buffer)) {
7026                pipe_resource_reference(&surf_state->res, NULL);
7027                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_CONSTANTS_VS << s;
7028             }
7029          }
7030       }
7031 
7032       if (res->bind_history & PIPE_BIND_SHADER_BUFFER) {
7033          uint32_t bound_ssbos = shs->bound_ssbos;
7034          while (bound_ssbos) {
7035             const int i = u_bit_scan(&bound_ssbos);
7036             struct pipe_shader_buffer *ssbo = &shs->ssbo[i];
7037 
7038             if (res->bo == iris_resource_bo(ssbo->buffer)) {
7039                struct pipe_shader_buffer buf = {
7040                   .buffer = &res->base,
7041                   .buffer_offset = ssbo->buffer_offset,
7042                   .buffer_size = ssbo->buffer_size,
7043                };
7044                iris_set_shader_buffers(ctx, p_stage, i, 1, &buf,
7045                                        (shs->writable_ssbos >> i) & 1);
7046             }
7047          }
7048       }
7049 
7050       if (res->bind_history & PIPE_BIND_SAMPLER_VIEW) {
7051          uint32_t bound_sampler_views = shs->bound_sampler_views;
7052          while (bound_sampler_views) {
7053             const int i = u_bit_scan(&bound_sampler_views);
7054             struct iris_sampler_view *isv = shs->textures[i];
7055             struct iris_bo *bo = isv->res->bo;
7056 
7057             if (update_surface_state_addrs(ice->state.surface_uploader,
7058                                            &isv->surface_state, bo)) {
7059                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7060             }
7061          }
7062       }
7063 
7064       if (res->bind_history & PIPE_BIND_SHADER_IMAGE) {
7065          uint32_t bound_image_views = shs->bound_image_views;
7066          while (bound_image_views) {
7067             const int i = u_bit_scan(&bound_image_views);
7068             struct iris_image_view *iv = &shs->image[i];
7069             struct iris_bo *bo = iris_resource_bo(iv->base.resource);
7070 
7071             if (update_surface_state_addrs(ice->state.surface_uploader,
7072                                            &iv->surface_state, bo)) {
7073                ice->state.stage_dirty |= IRIS_STAGE_DIRTY_BINDINGS_VS << s;
7074             }
7075          }
7076       }
7077    }
7078 }
7079 
7080 /* ------------------------------------------------------------------- */
7081 
7082 /**
7083  * Introduce a batch synchronization boundary, and update its cache coherency
7084  * status to reflect the execution of a PIPE_CONTROL command with the
7085  * specified flags.
7086  */
7087 static void
batch_mark_sync_for_pipe_control(struct iris_batch * batch,uint32_t flags)7088 batch_mark_sync_for_pipe_control(struct iris_batch *batch, uint32_t flags)
7089 {
7090    iris_batch_sync_boundary(batch);
7091 
7092    if ((flags & PIPE_CONTROL_CS_STALL)) {
7093       if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7094          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7095 
7096       if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7097          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7098 
7099       if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7100          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7101 
7102       if ((flags & (PIPE_CONTROL_CACHE_FLUSH_BITS |
7103                     PIPE_CONTROL_STALL_AT_SCOREBOARD)))
7104          iris_batch_mark_flush_sync(batch, IRIS_DOMAIN_OTHER_READ);
7105    }
7106 
7107    if ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH))
7108       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_RENDER_WRITE);
7109 
7110    if ((flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))
7111       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_DEPTH_WRITE);
7112 
7113    if ((flags & PIPE_CONTROL_FLUSH_ENABLE))
7114       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_WRITE);
7115 
7116    if ((flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) &&
7117        (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE))
7118       iris_batch_mark_invalidate_sync(batch, IRIS_DOMAIN_OTHER_READ);
7119 }
7120 
7121 static unsigned
flags_to_post_sync_op(uint32_t flags)7122 flags_to_post_sync_op(uint32_t flags)
7123 {
7124    if (flags & PIPE_CONTROL_WRITE_IMMEDIATE)
7125       return WriteImmediateData;
7126 
7127    if (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT)
7128       return WritePSDepthCount;
7129 
7130    if (flags & PIPE_CONTROL_WRITE_TIMESTAMP)
7131       return WriteTimestamp;
7132 
7133    return 0;
7134 }
7135 
7136 /**
7137  * Do the given flags have a Post Sync or LRI Post Sync operation?
7138  */
7139 static enum pipe_control_flags
get_post_sync_flags(enum pipe_control_flags flags)7140 get_post_sync_flags(enum pipe_control_flags flags)
7141 {
7142    flags &= PIPE_CONTROL_WRITE_IMMEDIATE |
7143             PIPE_CONTROL_WRITE_DEPTH_COUNT |
7144             PIPE_CONTROL_WRITE_TIMESTAMP |
7145             PIPE_CONTROL_LRI_POST_SYNC_OP;
7146 
7147    /* Only one "Post Sync Op" is allowed, and it's mutually exclusive with
7148     * "LRI Post Sync Operation".  So more than one bit set would be illegal.
7149     */
7150    assert(util_bitcount(flags) <= 1);
7151 
7152    return flags;
7153 }
7154 
7155 #define IS_COMPUTE_PIPELINE(batch) (batch->name == IRIS_BATCH_COMPUTE)
7156 
7157 /**
7158  * Emit a series of PIPE_CONTROL commands, taking into account any
7159  * workarounds necessary to actually accomplish the caller's request.
7160  *
7161  * Unless otherwise noted, spec quotations in this function come from:
7162  *
7163  * Synchronization of the 3D Pipeline > PIPE_CONTROL Command > Programming
7164  * Restrictions for PIPE_CONTROL.
7165  *
7166  * You should not use this function directly.  Use the helpers in
7167  * iris_pipe_control.c instead, which may split the pipe control further.
7168  */
7169 static void
iris_emit_raw_pipe_control(struct iris_batch * batch,const char * reason,uint32_t flags,struct iris_bo * bo,uint32_t offset,uint64_t imm)7170 iris_emit_raw_pipe_control(struct iris_batch *batch,
7171                            const char *reason,
7172                            uint32_t flags,
7173                            struct iris_bo *bo,
7174                            uint32_t offset,
7175                            uint64_t imm)
7176 {
7177    UNUSED const struct gen_device_info *devinfo = &batch->screen->devinfo;
7178    enum pipe_control_flags post_sync_flags = get_post_sync_flags(flags);
7179    enum pipe_control_flags non_lri_post_sync_flags =
7180       post_sync_flags & ~PIPE_CONTROL_LRI_POST_SYNC_OP;
7181 
7182    /* Recursive PIPE_CONTROL workarounds --------------------------------
7183     * (http://knowyourmeme.com/memes/xzibit-yo-dawg)
7184     *
7185     * We do these first because we want to look at the original operation,
7186     * rather than any workarounds we set.
7187     */
7188    if (GEN_GEN == 9 && (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE)) {
7189       /* The PIPE_CONTROL "VF Cache Invalidation Enable" bit description
7190        * lists several workarounds:
7191        *
7192        *    "Project: SKL, KBL, BXT
7193        *
7194        *     If the VF Cache Invalidation Enable is set to a 1 in a
7195        *     PIPE_CONTROL, a separate Null PIPE_CONTROL, all bitfields
7196        *     sets to 0, with the VF Cache Invalidation Enable set to 0
7197        *     needs to be sent prior to the PIPE_CONTROL with VF Cache
7198        *     Invalidation Enable set to a 1."
7199        */
7200       iris_emit_raw_pipe_control(batch,
7201                                  "workaround: recursive VF cache invalidate",
7202                                  0, NULL, 0, 0);
7203    }
7204 
7205    /* GEN:BUG:1409226450, Wait for EU to be idle before pipe control which
7206     * invalidates the instruction cache
7207     */
7208    if (GEN_GEN == 12 && (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE)) {
7209       iris_emit_raw_pipe_control(batch,
7210                                  "workaround: CS stall before instruction "
7211                                  "cache invalidate",
7212                                  PIPE_CONTROL_CS_STALL |
7213                                  PIPE_CONTROL_STALL_AT_SCOREBOARD, bo, offset,
7214                                  imm);
7215    }
7216 
7217    if ((GEN_GEN == 9 || (GEN_GEN == 12 && devinfo->revision == 0 /* A0*/)) &&
7218         IS_COMPUTE_PIPELINE(batch) && post_sync_flags) {
7219       /* Project: SKL / Argument: LRI Post Sync Operation [23]
7220        *
7221        * "PIPECONTROL command with “Command Streamer Stall Enable” must be
7222        *  programmed prior to programming a PIPECONTROL command with "LRI
7223        *  Post Sync Operation" in GPGPU mode of operation (i.e when
7224        *  PIPELINE_SELECT command is set to GPGPU mode of operation)."
7225        *
7226        * The same text exists a few rows below for Post Sync Op.
7227        *
7228        * On Gen12 this is GEN:BUG:1607156449.
7229        */
7230       iris_emit_raw_pipe_control(batch,
7231                                  "workaround: CS stall before gpgpu post-sync",
7232                                  PIPE_CONTROL_CS_STALL, bo, offset, imm);
7233    }
7234 
7235    /* "Flush Types" workarounds ---------------------------------------------
7236     * We do these now because they may add post-sync operations or CS stalls.
7237     */
7238 
7239    if (GEN_GEN < 11 && flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) {
7240       /* Project: BDW, SKL+ (stopping at CNL) / Argument: VF Invalidate
7241        *
7242        * "'Post Sync Operation' must be enabled to 'Write Immediate Data' or
7243        *  'Write PS Depth Count' or 'Write Timestamp'."
7244        */
7245       if (!bo) {
7246          flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7247          post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7248          non_lri_post_sync_flags |= PIPE_CONTROL_WRITE_IMMEDIATE;
7249          bo = batch->screen->workaround_address.bo;
7250          offset = batch->screen->workaround_address.offset;
7251       }
7252    }
7253 
7254    if (flags & PIPE_CONTROL_DEPTH_STALL) {
7255       /* From the PIPE_CONTROL instruction table, bit 13 (Depth Stall Enable):
7256        *
7257        *    "This bit must be DISABLED for operations other than writing
7258        *     PS_DEPTH_COUNT."
7259        *
7260        * This seems like nonsense.  An Ivybridge workaround requires us to
7261        * emit a PIPE_CONTROL with a depth stall and write immediate post-sync
7262        * operation.  Gen8+ requires us to emit depth stalls and depth cache
7263        * flushes together.  So, it's hard to imagine this means anything other
7264        * than "we originally intended this to be used for PS_DEPTH_COUNT".
7265        *
7266        * We ignore the supposed restriction and do nothing.
7267        */
7268    }
7269 
7270    if (flags & (PIPE_CONTROL_RENDER_TARGET_FLUSH |
7271                 PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7272       /* From the PIPE_CONTROL instruction table, bit 12 and bit 1:
7273        *
7274        *    "This bit must be DISABLED for End-of-pipe (Read) fences,
7275        *     PS_DEPTH_COUNT or TIMESTAMP queries."
7276        *
7277        * TODO: Implement end-of-pipe checking.
7278        */
7279       assert(!(post_sync_flags & (PIPE_CONTROL_WRITE_DEPTH_COUNT |
7280                                   PIPE_CONTROL_WRITE_TIMESTAMP)));
7281    }
7282 
7283    if (GEN_GEN < 11 && (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD)) {
7284       /* From the PIPE_CONTROL instruction table, bit 1:
7285        *
7286        *    "This bit is ignored if Depth Stall Enable is set.
7287        *     Further, the render cache is not flushed even if Write Cache
7288        *     Flush Enable bit is set."
7289        *
7290        * We assert that the caller doesn't do this combination, to try and
7291        * prevent mistakes.  It shouldn't hurt the GPU, though.
7292        *
7293        * We skip this check on Gen11+ as the "Stall at Pixel Scoreboard"
7294        * and "Render Target Flush" combo is explicitly required for BTI
7295        * update workarounds.
7296        */
7297       assert(!(flags & (PIPE_CONTROL_DEPTH_STALL |
7298                         PIPE_CONTROL_RENDER_TARGET_FLUSH)));
7299    }
7300 
7301    /* PIPE_CONTROL page workarounds ------------------------------------- */
7302 
7303    if (GEN_GEN <= 8 && (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE)) {
7304       /* From the PIPE_CONTROL page itself:
7305        *
7306        *    "IVB, HSW, BDW
7307        *     Restriction: Pipe_control with CS-stall bit set must be issued
7308        *     before a pipe-control command that has the State Cache
7309        *     Invalidate bit set."
7310        */
7311       flags |= PIPE_CONTROL_CS_STALL;
7312    }
7313 
7314    if (flags & PIPE_CONTROL_FLUSH_LLC) {
7315       /* From the PIPE_CONTROL instruction table, bit 26 (Flush LLC):
7316        *
7317        *    "Project: ALL
7318        *     SW must always program Post-Sync Operation to "Write Immediate
7319        *     Data" when Flush LLC is set."
7320        *
7321        * For now, we just require the caller to do it.
7322        */
7323       assert(flags & PIPE_CONTROL_WRITE_IMMEDIATE);
7324    }
7325 
7326    /* "Post-Sync Operation" workarounds -------------------------------- */
7327 
7328    /* Project: All / Argument: Global Snapshot Count Reset [19]
7329     *
7330     * "This bit must not be exercised on any product.
7331     *  Requires stall bit ([20] of DW1) set."
7332     *
7333     * We don't use this, so we just assert that it isn't used.  The
7334     * PIPE_CONTROL instruction page indicates that they intended this
7335     * as a debug feature and don't think it is useful in production,
7336     * but it may actually be usable, should we ever want to.
7337     */
7338    assert((flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) == 0);
7339 
7340    if (flags & (PIPE_CONTROL_MEDIA_STATE_CLEAR |
7341                 PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE)) {
7342       /* Project: All / Arguments:
7343        *
7344        * - Generic Media State Clear [16]
7345        * - Indirect State Pointers Disable [16]
7346        *
7347        *    "Requires stall bit ([20] of DW1) set."
7348        *
7349        * Also, the PIPE_CONTROL instruction table, bit 16 (Generic Media
7350        * State Clear) says:
7351        *
7352        *    "PIPECONTROL command with “Command Streamer Stall Enable” must be
7353        *     programmed prior to programming a PIPECONTROL command with "Media
7354        *     State Clear" set in GPGPU mode of operation"
7355        *
7356        * This is a subset of the earlier rule, so there's nothing to do.
7357        */
7358       flags |= PIPE_CONTROL_CS_STALL;
7359    }
7360 
7361    if (flags & PIPE_CONTROL_STORE_DATA_INDEX) {
7362       /* Project: All / Argument: Store Data Index
7363        *
7364        * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7365        *  than '0'."
7366        *
7367        * For now, we just assert that the caller does this.  We might want to
7368        * automatically add a write to the workaround BO...
7369        */
7370       assert(non_lri_post_sync_flags != 0);
7371    }
7372 
7373    if (flags & PIPE_CONTROL_SYNC_GFDT) {
7374       /* Project: All / Argument: Sync GFDT
7375        *
7376        * "Post-Sync Operation ([15:14] of DW1) must be set to something other
7377        *  than '0' or 0x2520[13] must be set."
7378        *
7379        * For now, we just assert that the caller does this.
7380        */
7381       assert(non_lri_post_sync_flags != 0);
7382    }
7383 
7384    if (flags & PIPE_CONTROL_TLB_INVALIDATE) {
7385       /* Project: IVB+ / Argument: TLB inv
7386        *
7387        *    "Requires stall bit ([20] of DW1) set."
7388        *
7389        * Also, from the PIPE_CONTROL instruction table:
7390        *
7391        *    "Project: SKL+
7392        *     Post Sync Operation or CS stall must be set to ensure a TLB
7393        *     invalidation occurs.  Otherwise no cycle will occur to the TLB
7394        *     cache to invalidate."
7395        *
7396        * This is not a subset of the earlier rule, so there's nothing to do.
7397        */
7398       flags |= PIPE_CONTROL_CS_STALL;
7399    }
7400 
7401    if (GEN_GEN >= 12 && ((flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ||
7402                          (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH))) {
7403       /* From the PIPE_CONTROL instruction table, bit 28 (Tile Cache Flush
7404        * Enable):
7405        *
7406        *    Unified Cache (Tile Cache Disabled):
7407        *
7408        *    When the Color and Depth (Z) streams are enabled to be cached in
7409        *    the DC space of L2, Software must use "Render Target Cache Flush
7410        *    Enable" and "Depth Cache Flush Enable" along with "Tile Cache
7411        *    Flush" for getting the color and depth (Z) write data to be
7412        *    globally observable.  In this mode of operation it is not required
7413        *    to set "CS Stall" upon setting "Tile Cache Flush" bit.
7414        */
7415       flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
7416    }
7417 
7418    if (GEN_GEN == 9 && devinfo->gt == 4) {
7419       /* TODO: The big Skylake GT4 post sync op workaround */
7420    }
7421 
7422    /* "GPGPU specific workarounds" (both post-sync and flush) ------------ */
7423 
7424    if (IS_COMPUTE_PIPELINE(batch)) {
7425       if (GEN_GEN >= 9 && (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE)) {
7426          /* Project: SKL+ / Argument: Tex Invalidate
7427           * "Requires stall bit ([20] of DW) set for all GPGPU Workloads."
7428           */
7429          flags |= PIPE_CONTROL_CS_STALL;
7430       }
7431 
7432       if (GEN_GEN == 8 && (post_sync_flags ||
7433                            (flags & (PIPE_CONTROL_NOTIFY_ENABLE |
7434                                      PIPE_CONTROL_DEPTH_STALL |
7435                                      PIPE_CONTROL_RENDER_TARGET_FLUSH |
7436                                      PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7437                                      PIPE_CONTROL_DATA_CACHE_FLUSH)))) {
7438          /* Project: BDW / Arguments:
7439           *
7440           * - LRI Post Sync Operation   [23]
7441           * - Post Sync Op              [15:14]
7442           * - Notify En                 [8]
7443           * - Depth Stall               [13]
7444           * - Render Target Cache Flush [12]
7445           * - Depth Cache Flush         [0]
7446           * - DC Flush Enable           [5]
7447           *
7448           *    "Requires stall bit ([20] of DW) set for all GPGPU and Media
7449           *     Workloads."
7450           */
7451          flags |= PIPE_CONTROL_CS_STALL;
7452 
7453          /* Also, from the PIPE_CONTROL instruction table, bit 20:
7454           *
7455           *    "Project: BDW
7456           *     This bit must be always set when PIPE_CONTROL command is
7457           *     programmed by GPGPU and MEDIA workloads, except for the cases
7458           *     when only Read Only Cache Invalidation bits are set (State
7459           *     Cache Invalidation Enable, Instruction cache Invalidation
7460           *     Enable, Texture Cache Invalidation Enable, Constant Cache
7461           *     Invalidation Enable). This is to WA FFDOP CG issue, this WA
7462           *     need not implemented when FF_DOP_CG is disable via "Fixed
7463           *     Function DOP Clock Gate Disable" bit in RC_PSMI_CTRL register."
7464           *
7465           * It sounds like we could avoid CS stalls in some cases, but we
7466           * don't currently bother.  This list isn't exactly the list above,
7467           * either...
7468           */
7469       }
7470    }
7471 
7472    /* "Stall" workarounds ----------------------------------------------
7473     * These have to come after the earlier ones because we may have added
7474     * some additional CS stalls above.
7475     */
7476 
7477    if (GEN_GEN < 9 && (flags & PIPE_CONTROL_CS_STALL)) {
7478       /* Project: PRE-SKL, VLV, CHV
7479        *
7480        * "[All Stepping][All SKUs]:
7481        *
7482        *  One of the following must also be set:
7483        *
7484        *  - Render Target Cache Flush Enable ([12] of DW1)
7485        *  - Depth Cache Flush Enable ([0] of DW1)
7486        *  - Stall at Pixel Scoreboard ([1] of DW1)
7487        *  - Depth Stall ([13] of DW1)
7488        *  - Post-Sync Operation ([13] of DW1)
7489        *  - DC Flush Enable ([5] of DW1)"
7490        *
7491        * If we don't already have one of those bits set, we choose to add
7492        * "Stall at Pixel Scoreboard".  Some of the other bits require a
7493        * CS stall as a workaround (see above), which would send us into
7494        * an infinite recursion of PIPE_CONTROLs.  "Stall at Pixel Scoreboard"
7495        * appears to be safe, so we choose that.
7496        */
7497       const uint32_t wa_bits = PIPE_CONTROL_RENDER_TARGET_FLUSH |
7498                                PIPE_CONTROL_DEPTH_CACHE_FLUSH |
7499                                PIPE_CONTROL_WRITE_IMMEDIATE |
7500                                PIPE_CONTROL_WRITE_DEPTH_COUNT |
7501                                PIPE_CONTROL_WRITE_TIMESTAMP |
7502                                PIPE_CONTROL_STALL_AT_SCOREBOARD |
7503                                PIPE_CONTROL_DEPTH_STALL |
7504                                PIPE_CONTROL_DATA_CACHE_FLUSH;
7505       if (!(flags & wa_bits))
7506          flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
7507    }
7508 
7509    if (GEN_GEN >= 12 && (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH)) {
7510       /* GEN:BUG:1409600907:
7511        *
7512        * "PIPE_CONTROL with Depth Stall Enable bit must be set
7513        * with any PIPE_CONTROL with Depth Flush Enable bit set.
7514        */
7515       flags |= PIPE_CONTROL_DEPTH_STALL;
7516    }
7517 
7518    /* Emit --------------------------------------------------------------- */
7519 
7520    if (INTEL_DEBUG & DEBUG_PIPE_CONTROL) {
7521       fprintf(stderr,
7522               "  PC [%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%"PRIx64"]: %s\n",
7523               (flags & PIPE_CONTROL_FLUSH_ENABLE) ? "PipeCon " : "",
7524               (flags & PIPE_CONTROL_CS_STALL) ? "CS " : "",
7525               (flags & PIPE_CONTROL_STALL_AT_SCOREBOARD) ? "Scoreboard " : "",
7526               (flags & PIPE_CONTROL_VF_CACHE_INVALIDATE) ? "VF " : "",
7527               (flags & PIPE_CONTROL_RENDER_TARGET_FLUSH) ? "RT " : "",
7528               (flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE) ? "Const " : "",
7529               (flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE) ? "TC " : "",
7530               (flags & PIPE_CONTROL_DATA_CACHE_FLUSH) ? "DC " : "",
7531               (flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH) ? "ZFlush " : "",
7532               (flags & PIPE_CONTROL_DEPTH_STALL) ? "ZStall " : "",
7533               (flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE) ? "State " : "",
7534               (flags & PIPE_CONTROL_TLB_INVALIDATE) ? "TLB " : "",
7535               (flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE) ? "Inst " : "",
7536               (flags & PIPE_CONTROL_MEDIA_STATE_CLEAR) ? "MediaClear " : "",
7537               (flags & PIPE_CONTROL_NOTIFY_ENABLE) ? "Notify " : "",
7538               (flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET) ?
7539                  "SnapRes" : "",
7540               (flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE) ?
7541                   "ISPDis" : "",
7542               (flags & PIPE_CONTROL_WRITE_IMMEDIATE) ? "WriteImm " : "",
7543               (flags & PIPE_CONTROL_WRITE_DEPTH_COUNT) ? "WriteZCount " : "",
7544               (flags & PIPE_CONTROL_WRITE_TIMESTAMP) ? "WriteTimestamp " : "",
7545               (flags & PIPE_CONTROL_FLUSH_HDC) ? "HDC " : "",
7546               imm, reason);
7547    }
7548 
7549    batch_mark_sync_for_pipe_control(batch, flags);
7550    iris_batch_sync_region_start(batch);
7551 
7552    iris_emit_cmd(batch, GENX(PIPE_CONTROL), pc) {
7553 #if GEN_GEN >= 12
7554       pc.TileCacheFlushEnable = flags & PIPE_CONTROL_TILE_CACHE_FLUSH;
7555 #endif
7556 #if GEN_GEN >= 11
7557       pc.HDCPipelineFlushEnable = flags & PIPE_CONTROL_FLUSH_HDC;
7558 #endif
7559       pc.LRIPostSyncOperation = NoLRIOperation;
7560       pc.PipeControlFlushEnable = flags & PIPE_CONTROL_FLUSH_ENABLE;
7561       pc.DCFlushEnable = flags & PIPE_CONTROL_DATA_CACHE_FLUSH;
7562       pc.StoreDataIndex = 0;
7563       pc.CommandStreamerStallEnable = flags & PIPE_CONTROL_CS_STALL;
7564       pc.GlobalSnapshotCountReset =
7565          flags & PIPE_CONTROL_GLOBAL_SNAPSHOT_COUNT_RESET;
7566       pc.TLBInvalidate = flags & PIPE_CONTROL_TLB_INVALIDATE;
7567       pc.GenericMediaStateClear = flags & PIPE_CONTROL_MEDIA_STATE_CLEAR;
7568       pc.StallAtPixelScoreboard = flags & PIPE_CONTROL_STALL_AT_SCOREBOARD;
7569       pc.RenderTargetCacheFlushEnable =
7570          flags & PIPE_CONTROL_RENDER_TARGET_FLUSH;
7571       pc.DepthCacheFlushEnable = flags & PIPE_CONTROL_DEPTH_CACHE_FLUSH;
7572       pc.StateCacheInvalidationEnable =
7573          flags & PIPE_CONTROL_STATE_CACHE_INVALIDATE;
7574       pc.VFCacheInvalidationEnable = flags & PIPE_CONTROL_VF_CACHE_INVALIDATE;
7575       pc.ConstantCacheInvalidationEnable =
7576          flags & PIPE_CONTROL_CONST_CACHE_INVALIDATE;
7577       pc.PostSyncOperation = flags_to_post_sync_op(flags);
7578       pc.DepthStallEnable = flags & PIPE_CONTROL_DEPTH_STALL;
7579       pc.InstructionCacheInvalidateEnable =
7580          flags & PIPE_CONTROL_INSTRUCTION_INVALIDATE;
7581       pc.NotifyEnable = flags & PIPE_CONTROL_NOTIFY_ENABLE;
7582       pc.IndirectStatePointersDisable =
7583          flags & PIPE_CONTROL_INDIRECT_STATE_POINTERS_DISABLE;
7584       pc.TextureCacheInvalidationEnable =
7585          flags & PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
7586       pc.Address = rw_bo(bo, offset, IRIS_DOMAIN_OTHER_WRITE);
7587       pc.ImmediateData = imm;
7588    }
7589 
7590    iris_batch_sync_region_end(batch);
7591 }
7592 
7593 #if GEN_GEN == 9
7594 /**
7595  * Preemption on Gen9 has to be enabled or disabled in various cases.
7596  *
7597  * See these workarounds for preemption:
7598  *  - WaDisableMidObjectPreemptionForGSLineStripAdj
7599  *  - WaDisableMidObjectPreemptionForTrifanOrPolygon
7600  *  - WaDisableMidObjectPreemptionForLineLoop
7601  *  - WA#0798
7602  *
7603  * We don't put this in the vtable because it's only used on Gen9.
7604  */
7605 void
gen9_toggle_preemption(struct iris_context * ice,struct iris_batch * batch,const struct pipe_draw_info * draw)7606 gen9_toggle_preemption(struct iris_context *ice,
7607                        struct iris_batch *batch,
7608                        const struct pipe_draw_info *draw)
7609 {
7610    struct iris_genx_state *genx = ice->state.genx;
7611    bool object_preemption = true;
7612 
7613    /* WaDisableMidObjectPreemptionForGSLineStripAdj
7614     *
7615     *    "WA: Disable mid-draw preemption when draw-call is a linestrip_adj
7616     *     and GS is enabled."
7617     */
7618    if (draw->mode == PIPE_PRIM_LINE_STRIP_ADJACENCY &&
7619        ice->shaders.prog[MESA_SHADER_GEOMETRY])
7620       object_preemption = false;
7621 
7622    /* WaDisableMidObjectPreemptionForTrifanOrPolygon
7623     *
7624     *    "TriFan miscompare in Execlist Preemption test. Cut index that is
7625     *     on a previous context. End the previous, the resume another context
7626     *     with a tri-fan or polygon, and the vertex count is corrupted. If we
7627     *     prempt again we will cause corruption.
7628     *
7629     *     WA: Disable mid-draw preemption when draw-call has a tri-fan."
7630     */
7631    if (draw->mode == PIPE_PRIM_TRIANGLE_FAN)
7632       object_preemption = false;
7633 
7634    /* WaDisableMidObjectPreemptionForLineLoop
7635     *
7636     *    "VF Stats Counters Missing a vertex when preemption enabled.
7637     *
7638     *     WA: Disable mid-draw preemption when the draw uses a lineloop
7639     *     topology."
7640     */
7641    if (draw->mode == PIPE_PRIM_LINE_LOOP)
7642       object_preemption = false;
7643 
7644    /* WA#0798
7645     *
7646     *    "VF is corrupting GAFS data when preempted on an instance boundary
7647     *     and replayed with instancing enabled.
7648     *
7649     *     WA: Disable preemption when using instanceing."
7650     */
7651    if (draw->instance_count > 1)
7652       object_preemption = false;
7653 
7654    if (genx->object_preemption != object_preemption) {
7655       iris_enable_obj_preemption(batch, object_preemption);
7656       genx->object_preemption = object_preemption;
7657    }
7658 }
7659 #endif
7660 
7661 static void
iris_lost_genx_state(struct iris_context * ice,struct iris_batch * batch)7662 iris_lost_genx_state(struct iris_context *ice, struct iris_batch *batch)
7663 {
7664    struct iris_genx_state *genx = ice->state.genx;
7665 
7666    memset(genx->last_index_buffer, 0, sizeof(genx->last_index_buffer));
7667 }
7668 
7669 static void
iris_emit_mi_report_perf_count(struct iris_batch * batch,struct iris_bo * bo,uint32_t offset_in_bytes,uint32_t report_id)7670 iris_emit_mi_report_perf_count(struct iris_batch *batch,
7671                                struct iris_bo *bo,
7672                                uint32_t offset_in_bytes,
7673                                uint32_t report_id)
7674 {
7675    iris_batch_sync_region_start(batch);
7676    iris_emit_cmd(batch, GENX(MI_REPORT_PERF_COUNT), mi_rpc) {
7677       mi_rpc.MemoryAddress = rw_bo(bo, offset_in_bytes,
7678                                    IRIS_DOMAIN_OTHER_WRITE);
7679       mi_rpc.ReportID = report_id;
7680    }
7681    iris_batch_sync_region_end(batch);
7682 }
7683 
7684 /**
7685  * Update the pixel hashing modes that determine the balancing of PS threads
7686  * across subslices and slices.
7687  *
7688  * \param width Width bound of the rendering area (already scaled down if \p
7689  *              scale is greater than 1).
7690  * \param height Height bound of the rendering area (already scaled down if \p
7691  *               scale is greater than 1).
7692  * \param scale The number of framebuffer samples that could potentially be
7693  *              affected by an individual channel of the PS thread.  This is
7694  *              typically one for single-sampled rendering, but for operations
7695  *              like CCS resolves and fast clears a single PS invocation may
7696  *              update a huge number of pixels, in which case a finer
7697  *              balancing is desirable in order to maximally utilize the
7698  *              bandwidth available.  UINT_MAX can be used as shorthand for
7699  *              "finest hashing mode available".
7700  */
7701 void
genX(emit_hashing_mode)7702 genX(emit_hashing_mode)(struct iris_context *ice, struct iris_batch *batch,
7703                         unsigned width, unsigned height, unsigned scale)
7704 {
7705 #if GEN_GEN == 9
7706    const struct gen_device_info *devinfo = &batch->screen->devinfo;
7707    const unsigned slice_hashing[] = {
7708       /* Because all Gen9 platforms with more than one slice require
7709        * three-way subslice hashing, a single "normal" 16x16 slice hashing
7710        * block is guaranteed to suffer from substantial imbalance, with one
7711        * subslice receiving twice as much work as the other two in the
7712        * slice.
7713        *
7714        * The performance impact of that would be particularly severe when
7715        * three-way hashing is also in use for slice balancing (which is the
7716        * case for all Gen9 GT4 platforms), because one of the slices
7717        * receives one every three 16x16 blocks in either direction, which
7718        * is roughly the periodicity of the underlying subslice imbalance
7719        * pattern ("roughly" because in reality the hardware's
7720        * implementation of three-way hashing doesn't do exact modulo 3
7721        * arithmetic, which somewhat decreases the magnitude of this effect
7722        * in practice).  This leads to a systematic subslice imbalance
7723        * within that slice regardless of the size of the primitive.  The
7724        * 32x32 hashing mode guarantees that the subslice imbalance within a
7725        * single slice hashing block is minimal, largely eliminating this
7726        * effect.
7727        */
7728       _32x32,
7729       /* Finest slice hashing mode available. */
7730       NORMAL
7731    };
7732    const unsigned subslice_hashing[] = {
7733       /* 16x16 would provide a slight cache locality benefit especially
7734        * visible in the sampler L1 cache efficiency of low-bandwidth
7735        * non-LLC platforms, but it comes at the cost of greater subslice
7736        * imbalance for primitives of dimensions approximately intermediate
7737        * between 16x4 and 16x16.
7738        */
7739       _16x4,
7740       /* Finest subslice hashing mode available. */
7741       _8x4
7742    };
7743    /* Dimensions of the smallest hashing block of a given hashing mode.  If
7744     * the rendering area is smaller than this there can't possibly be any
7745     * benefit from switching to this mode, so we optimize out the
7746     * transition.
7747     */
7748    const unsigned min_size[][2] = {
7749       { 16, 4 },
7750       { 8, 4 }
7751    };
7752    const unsigned idx = scale > 1;
7753 
7754    if (width > min_size[idx][0] || height > min_size[idx][1]) {
7755       uint32_t gt_mode;
7756 
7757       iris_pack_state(GENX(GT_MODE), &gt_mode, reg) {
7758          reg.SliceHashing = (devinfo->num_slices > 1 ? slice_hashing[idx] : 0);
7759          reg.SliceHashingMask = (devinfo->num_slices > 1 ? -1 : 0);
7760          reg.SubsliceHashing = subslice_hashing[idx];
7761          reg.SubsliceHashingMask = -1;
7762       };
7763 
7764       iris_emit_raw_pipe_control(batch,
7765                                  "workaround: CS stall before GT_MODE LRI",
7766                                  PIPE_CONTROL_STALL_AT_SCOREBOARD |
7767                                  PIPE_CONTROL_CS_STALL,
7768                                  NULL, 0, 0);
7769 
7770       iris_emit_lri(batch, GT_MODE, gt_mode);
7771 
7772       ice->state.current_hash_scale = scale;
7773    }
7774 #endif
7775 }
7776 
7777 static void
iris_set_frontend_noop(struct pipe_context * ctx,bool enable)7778 iris_set_frontend_noop(struct pipe_context *ctx, bool enable)
7779 {
7780    struct iris_context *ice = (struct iris_context *) ctx;
7781 
7782    if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_RENDER], enable)) {
7783       ice->state.dirty |= IRIS_ALL_DIRTY_FOR_RENDER;
7784       ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_RENDER;
7785    }
7786 
7787    if (iris_batch_prepare_noop(&ice->batches[IRIS_BATCH_COMPUTE], enable)) {
7788       ice->state.dirty |= IRIS_ALL_DIRTY_FOR_COMPUTE;
7789       ice->state.stage_dirty |= IRIS_ALL_STAGE_DIRTY_FOR_COMPUTE;
7790    }
7791 }
7792 
7793 void
genX(init_screen_state)7794 genX(init_screen_state)(struct iris_screen *screen)
7795 {
7796    screen->vtbl.destroy_state = iris_destroy_state;
7797    screen->vtbl.init_render_context = iris_init_render_context;
7798    screen->vtbl.init_compute_context = iris_init_compute_context;
7799    screen->vtbl.upload_render_state = iris_upload_render_state;
7800    screen->vtbl.update_surface_base_address = iris_update_surface_base_address;
7801    screen->vtbl.upload_compute_state = iris_upload_compute_state;
7802    screen->vtbl.emit_raw_pipe_control = iris_emit_raw_pipe_control;
7803    screen->vtbl.emit_mi_report_perf_count = iris_emit_mi_report_perf_count;
7804    screen->vtbl.rebind_buffer = iris_rebind_buffer;
7805    screen->vtbl.load_register_reg32 = iris_load_register_reg32;
7806    screen->vtbl.load_register_reg64 = iris_load_register_reg64;
7807    screen->vtbl.load_register_imm32 = iris_load_register_imm32;
7808    screen->vtbl.load_register_imm64 = iris_load_register_imm64;
7809    screen->vtbl.load_register_mem32 = iris_load_register_mem32;
7810    screen->vtbl.load_register_mem64 = iris_load_register_mem64;
7811    screen->vtbl.store_register_mem32 = iris_store_register_mem32;
7812    screen->vtbl.store_register_mem64 = iris_store_register_mem64;
7813    screen->vtbl.store_data_imm32 = iris_store_data_imm32;
7814    screen->vtbl.store_data_imm64 = iris_store_data_imm64;
7815    screen->vtbl.copy_mem_mem = iris_copy_mem_mem;
7816    screen->vtbl.derived_program_state_size = iris_derived_program_state_size;
7817    screen->vtbl.store_derived_program_state = iris_store_derived_program_state;
7818    screen->vtbl.create_so_decl_list = iris_create_so_decl_list;
7819    screen->vtbl.populate_vs_key = iris_populate_vs_key;
7820    screen->vtbl.populate_tcs_key = iris_populate_tcs_key;
7821    screen->vtbl.populate_tes_key = iris_populate_tes_key;
7822    screen->vtbl.populate_gs_key = iris_populate_gs_key;
7823    screen->vtbl.populate_fs_key = iris_populate_fs_key;
7824    screen->vtbl.populate_cs_key = iris_populate_cs_key;
7825    screen->vtbl.lost_genx_state = iris_lost_genx_state;
7826 }
7827 
7828 void
genX(init_state)7829 genX(init_state)(struct iris_context *ice)
7830 {
7831    struct pipe_context *ctx = &ice->ctx;
7832    struct iris_screen *screen = (struct iris_screen *)ctx->screen;
7833 
7834    ctx->create_blend_state = iris_create_blend_state;
7835    ctx->create_depth_stencil_alpha_state = iris_create_zsa_state;
7836    ctx->create_rasterizer_state = iris_create_rasterizer_state;
7837    ctx->create_sampler_state = iris_create_sampler_state;
7838    ctx->create_sampler_view = iris_create_sampler_view;
7839    ctx->create_surface = iris_create_surface;
7840    ctx->create_vertex_elements_state = iris_create_vertex_elements;
7841    ctx->bind_blend_state = iris_bind_blend_state;
7842    ctx->bind_depth_stencil_alpha_state = iris_bind_zsa_state;
7843    ctx->bind_sampler_states = iris_bind_sampler_states;
7844    ctx->bind_rasterizer_state = iris_bind_rasterizer_state;
7845    ctx->bind_vertex_elements_state = iris_bind_vertex_elements_state;
7846    ctx->delete_blend_state = iris_delete_state;
7847    ctx->delete_depth_stencil_alpha_state = iris_delete_state;
7848    ctx->delete_rasterizer_state = iris_delete_state;
7849    ctx->delete_sampler_state = iris_delete_state;
7850    ctx->delete_vertex_elements_state = iris_delete_state;
7851    ctx->set_blend_color = iris_set_blend_color;
7852    ctx->set_clip_state = iris_set_clip_state;
7853    ctx->set_constant_buffer = iris_set_constant_buffer;
7854    ctx->set_shader_buffers = iris_set_shader_buffers;
7855    ctx->set_shader_images = iris_set_shader_images;
7856    ctx->set_sampler_views = iris_set_sampler_views;
7857    ctx->set_compute_resources = iris_set_compute_resources;
7858    ctx->set_global_binding = iris_set_global_binding;
7859    ctx->set_tess_state = iris_set_tess_state;
7860    ctx->set_framebuffer_state = iris_set_framebuffer_state;
7861    ctx->set_polygon_stipple = iris_set_polygon_stipple;
7862    ctx->set_sample_mask = iris_set_sample_mask;
7863    ctx->set_scissor_states = iris_set_scissor_states;
7864    ctx->set_stencil_ref = iris_set_stencil_ref;
7865    ctx->set_vertex_buffers = iris_set_vertex_buffers;
7866    ctx->set_viewport_states = iris_set_viewport_states;
7867    ctx->sampler_view_destroy = iris_sampler_view_destroy;
7868    ctx->surface_destroy = iris_surface_destroy;
7869    ctx->draw_vbo = iris_draw_vbo;
7870    ctx->launch_grid = iris_launch_grid;
7871    ctx->create_stream_output_target = iris_create_stream_output_target;
7872    ctx->stream_output_target_destroy = iris_stream_output_target_destroy;
7873    ctx->set_stream_output_targets = iris_set_stream_output_targets;
7874    ctx->set_frontend_noop = iris_set_frontend_noop;
7875 
7876    ice->state.dirty = ~0ull;
7877    ice->state.stage_dirty = ~0ull;
7878 
7879    ice->state.statistics_counters_enabled = true;
7880 
7881    ice->state.sample_mask = 0xffff;
7882    ice->state.num_viewports = 1;
7883    ice->state.prim_mode = PIPE_PRIM_MAX;
7884    ice->state.genx = calloc(1, sizeof(struct iris_genx_state));
7885    ice->draw.derived_params.drawid = -1;
7886 
7887    /* Make a 1x1x1 null surface for unbound textures */
7888    void *null_surf_map =
7889       upload_state(ice->state.surface_uploader, &ice->state.unbound_tex,
7890                    4 * GENX(RENDER_SURFACE_STATE_length), 64);
7891    isl_null_fill_state(&screen->isl_dev, null_surf_map, isl_extent3d(1, 1, 1));
7892    ice->state.unbound_tex.offset +=
7893       iris_bo_offset_from_base_address(iris_resource_bo(ice->state.unbound_tex.res));
7894 
7895    /* Default all scissor rectangles to be empty regions. */
7896    for (int i = 0; i < IRIS_MAX_VIEWPORTS; i++) {
7897       ice->state.scissors[i] = (struct pipe_scissor_state) {
7898          .minx = 1, .maxx = 0, .miny = 1, .maxy = 0,
7899       };
7900    }
7901 }
7902