1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  * All Rights Reserved.
4  *
5  * Permission is hereby granted, free of charge, to any person obtaining a
6  * copy of this software and associated documentation files (the "Software"),
7  * to deal in the Software without restriction, including without limitation
8  * on the rights to use, copy, modify, merge, publish, distribute, sub
9  * license, and/or sell copies of the Software, and to permit persons to whom
10  * the Software is furnished to do so, subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice (including the next
13  * paragraph) shall be included in all copies or substantial portions of the
14  * Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
19  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
20  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
21  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
22  * USE OR OTHER DEALINGS IN THE SOFTWARE.
23  */
24 
25 #ifndef SI_STATE_H
26 #define SI_STATE_H
27 
28 #include "pipebuffer/pb_slab.h"
29 #include "si_pm4.h"
30 #include "util/u_blitter.h"
31 
32 #define SI_NUM_GRAPHICS_SHADERS (PIPE_SHADER_TESS_EVAL + 1)
33 #define SI_NUM_SHADERS          (PIPE_SHADER_COMPUTE + 1)
34 
35 #define SI_NUM_VERTEX_BUFFERS SI_MAX_ATTRIBS
36 #define SI_NUM_SAMPLERS       32 /* OpenGL textures units per shader */
37 #define SI_NUM_CONST_BUFFERS  16
38 #define SI_NUM_IMAGES         16
39 #define SI_NUM_IMAGE_SLOTS    (SI_NUM_IMAGES * 2) /* the second half are FMASK slots */
40 #define SI_NUM_SHADER_BUFFERS 32
41 
42 struct si_screen;
43 struct si_shader;
44 struct si_shader_ctx_state;
45 struct si_shader_selector;
46 struct si_texture;
47 struct si_qbo_state;
48 
49 struct si_state_blend {
50    struct si_pm4_state pm4;
51    uint32_t cb_target_mask;
52    /* Set 0xf or 0x0 (4 bits) per render target if the following is
53     * true. ANDed with spi_shader_col_format.
54     */
55    unsigned cb_target_enabled_4bit;
56    unsigned blend_enable_4bit;
57    unsigned need_src_alpha_4bit;
58    unsigned commutative_4bit;
59    unsigned dcc_msaa_corruption_4bit;
60    bool alpha_to_coverage : 1;
61    bool alpha_to_one : 1;
62    bool dual_src_blend : 1;
63    bool logicop_enable : 1;
64 };
65 
66 struct si_state_rasterizer {
67    struct si_pm4_state pm4;
68    /* poly offset states for 16-bit, 24-bit, and 32-bit zbuffers */
69    struct si_pm4_state *pm4_poly_offset;
70    unsigned pa_sc_line_stipple;
71    unsigned pa_cl_clip_cntl;
72    float line_width;
73    float max_point_size;
74    unsigned sprite_coord_enable : 8;
75    unsigned clip_plane_enable : 8;
76    unsigned half_pixel_center : 1;
77    unsigned flatshade : 1;
78    unsigned flatshade_first : 1;
79    unsigned two_side : 1;
80    unsigned multisample_enable : 1;
81    unsigned force_persample_interp : 1;
82    unsigned line_stipple_enable : 1;
83    unsigned poly_stipple_enable : 1;
84    unsigned line_smooth : 1;
85    unsigned poly_smooth : 1;
86    unsigned uses_poly_offset : 1;
87    unsigned clamp_fragment_color : 1;
88    unsigned clamp_vertex_color : 1;
89    unsigned rasterizer_discard : 1;
90    unsigned scissor_enable : 1;
91    unsigned clip_halfz : 1;
92    unsigned cull_front : 1;
93    unsigned cull_back : 1;
94    unsigned depth_clamp_any : 1;
95    unsigned provoking_vertex_first : 1;
96    unsigned polygon_mode_enabled : 1;
97    unsigned polygon_mode_is_lines : 1;
98    unsigned polygon_mode_is_points : 1;
99 };
100 
101 struct si_dsa_stencil_ref_part {
102    uint8_t valuemask[2];
103    uint8_t writemask[2];
104 };
105 
106 struct si_dsa_order_invariance {
107    /** Whether the final result in Z/S buffers is guaranteed to be
108     * invariant under changes to the order in which fragments arrive. */
109    bool zs : 1;
110 
111    /** Whether the set of fragments that pass the combined Z/S test is
112     * guaranteed to be invariant under changes to the order in which
113     * fragments arrive. */
114    bool pass_set : 1;
115 
116    /** Whether the last fragment that passes the combined Z/S test at each
117     * sample is guaranteed to be invariant under changes to the order in
118     * which fragments arrive. */
119    bool pass_last : 1;
120 };
121 
122 struct si_state_dsa {
123    struct si_pm4_state pm4;
124    struct si_dsa_stencil_ref_part stencil_ref;
125 
126    /* 0 = without stencil buffer, 1 = when both Z and S buffers are present */
127    struct si_dsa_order_invariance order_invariance[2];
128 
129    ubyte alpha_func : 3;
130    bool depth_enabled : 1;
131    bool depth_write_enabled : 1;
132    bool stencil_enabled : 1;
133    bool stencil_write_enabled : 1;
134    bool db_can_write : 1;
135 };
136 
137 struct si_stencil_ref {
138    struct pipe_stencil_ref state;
139    struct si_dsa_stencil_ref_part dsa_part;
140 };
141 
142 struct si_vertex_elements {
143    struct si_resource *instance_divisor_factor_buffer;
144    uint32_t rsrc_word3[SI_MAX_ATTRIBS];
145    uint16_t src_offset[SI_MAX_ATTRIBS];
146    uint8_t fix_fetch[SI_MAX_ATTRIBS];
147    uint8_t format_size[SI_MAX_ATTRIBS];
148    uint8_t vertex_buffer_index[SI_MAX_ATTRIBS];
149 
150    /* Bitmask of elements that always need a fixup to be applied. */
151    uint16_t fix_fetch_always;
152 
153    /* Bitmask of elements whose fetch should always be opencoded. */
154    uint16_t fix_fetch_opencode;
155 
156    /* Bitmask of elements which need to be opencoded if the vertex buffer
157     * is unaligned. */
158    uint16_t fix_fetch_unaligned;
159 
160    /* For elements in fix_fetch_unaligned: whether the effective
161     * element load size as seen by the hardware is a dword (as opposed
162     * to a short).
163     */
164    uint16_t hw_load_is_dword;
165 
166    /* Bitmask of vertex buffers requiring alignment check */
167    uint16_t vb_alignment_check_mask;
168 
169    uint8_t count;
170    bool uses_instance_divisors;
171 
172    uint16_t first_vb_use_mask;
173    /* Vertex buffer descriptor list size aligned for optimal prefetch. */
174    uint16_t vb_desc_list_alloc_size;
175    uint16_t instance_divisor_is_one;     /* bitmask of inputs */
176    uint16_t instance_divisor_is_fetched; /* bitmask of inputs */
177 };
178 
179 union si_state {
180    struct si_state_named {
181       struct si_state_blend *blend;
182       struct si_state_rasterizer *rasterizer;
183       struct si_state_dsa *dsa;
184       struct si_pm4_state *poly_offset;
185       struct si_pm4_state *ls;
186       struct si_pm4_state *hs;
187       struct si_pm4_state *es;
188       struct si_pm4_state *gs;
189       struct si_pm4_state *vgt_shader_config;
190       struct si_pm4_state *vs;
191       struct si_pm4_state *ps;
192    } named;
193    struct si_pm4_state *array[sizeof(struct si_state_named) / sizeof(struct si_pm4_state *)];
194 };
195 
196 #define SI_STATE_IDX(name) (offsetof(union si_state, named.name) / sizeof(struct si_pm4_state *))
197 #define SI_STATE_BIT(name) (1 << SI_STATE_IDX(name))
198 #define SI_NUM_STATES      (sizeof(union si_state) / sizeof(struct si_pm4_state *))
199 
si_states_that_always_roll_context(void)200 static inline unsigned si_states_that_always_roll_context(void)
201 {
202    return (SI_STATE_BIT(blend) | SI_STATE_BIT(rasterizer) | SI_STATE_BIT(dsa) |
203            SI_STATE_BIT(poly_offset) | SI_STATE_BIT(vgt_shader_config));
204 }
205 
206 union si_state_atoms {
207    struct si_atoms_s {
208       /* The order matters. */
209       struct si_atom render_cond;
210       struct si_atom streamout_begin;
211       struct si_atom streamout_enable; /* must be after streamout_begin */
212       struct si_atom framebuffer;
213       struct si_atom msaa_sample_locs;
214       struct si_atom db_render_state;
215       struct si_atom dpbb_state;
216       struct si_atom msaa_config;
217       struct si_atom sample_mask;
218       struct si_atom cb_render_state;
219       struct si_atom blend_color;
220       struct si_atom clip_regs;
221       struct si_atom clip_state;
222       struct si_atom shader_pointers;
223       struct si_atom guardband;
224       struct si_atom scissors;
225       struct si_atom viewports;
226       struct si_atom stencil_ref;
227       struct si_atom spi_map;
228       struct si_atom scratch_state;
229       struct si_atom window_rectangles;
230       struct si_atom shader_query;
231       struct si_atom ngg_cull_state;
232    } s;
233    struct si_atom array[sizeof(struct si_atoms_s) / sizeof(struct si_atom)];
234 };
235 
236 #define SI_ATOM_BIT(name) (1 << (offsetof(union si_state_atoms, s.name) / sizeof(struct si_atom)))
237 #define SI_NUM_ATOMS      (sizeof(union si_state_atoms) / sizeof(struct si_atom))
238 
si_atoms_that_always_roll_context(void)239 static inline unsigned si_atoms_that_always_roll_context(void)
240 {
241    return (SI_ATOM_BIT(streamout_begin) | SI_ATOM_BIT(streamout_enable) | SI_ATOM_BIT(framebuffer) |
242            SI_ATOM_BIT(msaa_sample_locs) | SI_ATOM_BIT(sample_mask) | SI_ATOM_BIT(blend_color) |
243            SI_ATOM_BIT(clip_state) | SI_ATOM_BIT(scissors) | SI_ATOM_BIT(viewports) |
244            SI_ATOM_BIT(stencil_ref) | SI_ATOM_BIT(scratch_state) | SI_ATOM_BIT(window_rectangles));
245 }
246 
247 struct si_shader_data {
248    uint32_t sh_base[SI_NUM_SHADERS];
249 };
250 
251 #define SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK                                                      \
252    (S_02881C_USE_VTX_POINT_SIZE(1) | S_02881C_USE_VTX_EDGE_FLAG(1) |                               \
253     S_02881C_USE_VTX_RENDER_TARGET_INDX(1) | S_02881C_USE_VTX_VIEWPORT_INDX(1) |                   \
254     S_02881C_VS_OUT_MISC_VEC_ENA(1) | S_02881C_VS_OUT_MISC_SIDE_BUS_ENA(1))
255 
256 /* The list of registers whose emitted values are remembered by si_context. */
257 enum si_tracked_reg
258 {
259    SI_TRACKED_DB_RENDER_CONTROL, /* 2 consecutive registers */
260    SI_TRACKED_DB_COUNT_CONTROL,
261 
262    SI_TRACKED_DB_RENDER_OVERRIDE2,
263    SI_TRACKED_DB_SHADER_CONTROL,
264 
265    SI_TRACKED_CB_TARGET_MASK,
266    SI_TRACKED_CB_DCC_CONTROL,
267 
268    SI_TRACKED_SX_PS_DOWNCONVERT, /* 3 consecutive registers */
269    SI_TRACKED_SX_BLEND_OPT_EPSILON,
270    SI_TRACKED_SX_BLEND_OPT_CONTROL,
271 
272    SI_TRACKED_PA_SC_LINE_CNTL, /* 2 consecutive registers */
273    SI_TRACKED_PA_SC_AA_CONFIG,
274 
275    SI_TRACKED_DB_EQAA,
276    SI_TRACKED_PA_SC_MODE_CNTL_1,
277 
278    SI_TRACKED_PA_SU_PRIM_FILTER_CNTL,
279    SI_TRACKED_PA_SU_SMALL_PRIM_FILTER_CNTL,
280 
281    SI_TRACKED_PA_CL_VS_OUT_CNTL__VS, /* set with SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK*/
282    SI_TRACKED_PA_CL_VS_OUT_CNTL__CL, /* set with ~SI_TRACKED_PA_CL_VS_OUT_CNTL__VS_MASK */
283    SI_TRACKED_PA_CL_CLIP_CNTL,
284 
285    SI_TRACKED_PA_SC_BINNER_CNTL_0,
286    SI_TRACKED_DB_DFSM_CONTROL,
287 
288    SI_TRACKED_PA_CL_GB_VERT_CLIP_ADJ, /* 4 consecutive registers */
289    SI_TRACKED_PA_CL_GB_VERT_DISC_ADJ,
290    SI_TRACKED_PA_CL_GB_HORZ_CLIP_ADJ,
291    SI_TRACKED_PA_CL_GB_HORZ_DISC_ADJ,
292 
293    SI_TRACKED_PA_SU_HARDWARE_SCREEN_OFFSET,
294    SI_TRACKED_PA_SU_VTX_CNTL,
295 
296    SI_TRACKED_PA_SC_CLIPRECT_RULE,
297 
298    SI_TRACKED_PA_SC_LINE_STIPPLE,
299 
300    SI_TRACKED_VGT_ESGS_RING_ITEMSIZE,
301 
302    SI_TRACKED_VGT_GSVS_RING_OFFSET_1, /* 3 consecutive registers */
303    SI_TRACKED_VGT_GSVS_RING_OFFSET_2,
304    SI_TRACKED_VGT_GSVS_RING_OFFSET_3,
305 
306    SI_TRACKED_VGT_GSVS_RING_ITEMSIZE,
307    SI_TRACKED_VGT_GS_MAX_VERT_OUT,
308 
309    SI_TRACKED_VGT_GS_VERT_ITEMSIZE, /* 4 consecutive registers */
310    SI_TRACKED_VGT_GS_VERT_ITEMSIZE_1,
311    SI_TRACKED_VGT_GS_VERT_ITEMSIZE_2,
312    SI_TRACKED_VGT_GS_VERT_ITEMSIZE_3,
313 
314    SI_TRACKED_VGT_GS_INSTANCE_CNT,
315    SI_TRACKED_VGT_GS_ONCHIP_CNTL,
316    SI_TRACKED_VGT_GS_MAX_PRIMS_PER_SUBGROUP,
317    SI_TRACKED_VGT_GS_MODE,
318    SI_TRACKED_VGT_PRIMITIVEID_EN,
319    SI_TRACKED_VGT_REUSE_OFF,
320    SI_TRACKED_SPI_VS_OUT_CONFIG,
321    SI_TRACKED_PA_CL_VTE_CNTL,
322    SI_TRACKED_PA_CL_NGG_CNTL,
323    SI_TRACKED_GE_MAX_OUTPUT_PER_SUBGROUP,
324    SI_TRACKED_GE_NGG_SUBGRP_CNTL,
325 
326    SI_TRACKED_SPI_SHADER_IDX_FORMAT, /* 2 consecutive registers */
327    SI_TRACKED_SPI_SHADER_POS_FORMAT,
328 
329    SI_TRACKED_SPI_PS_INPUT_ENA, /* 2 consecutive registers */
330    SI_TRACKED_SPI_PS_INPUT_ADDR,
331 
332    SI_TRACKED_SPI_BARYC_CNTL,
333    SI_TRACKED_SPI_PS_IN_CONTROL,
334 
335    SI_TRACKED_SPI_SHADER_Z_FORMAT, /* 2 consecutive registers */
336    SI_TRACKED_SPI_SHADER_COL_FORMAT,
337 
338    SI_TRACKED_CB_SHADER_MASK,
339    SI_TRACKED_VGT_TF_PARAM,
340    SI_TRACKED_VGT_VERTEX_REUSE_BLOCK_CNTL,
341 
342    SI_TRACKED_GE_PC_ALLOC,
343 
344    SI_NUM_TRACKED_REGS,
345 };
346 
347 struct si_tracked_regs {
348    uint64_t reg_saved;
349    uint32_t reg_value[SI_NUM_TRACKED_REGS];
350    uint32_t spi_ps_input_cntl[32];
351 };
352 
353 /* Private read-write buffer slots. */
354 enum
355 {
356    SI_ES_RING_ESGS,
357    SI_GS_RING_ESGS,
358 
359    SI_RING_GSVS,
360 
361    SI_VS_STREAMOUT_BUF0,
362    SI_VS_STREAMOUT_BUF1,
363    SI_VS_STREAMOUT_BUF2,
364    SI_VS_STREAMOUT_BUF3,
365 
366    SI_HS_CONST_DEFAULT_TESS_LEVELS,
367    SI_VS_CONST_INSTANCE_DIVISORS,
368    SI_VS_CONST_CLIP_PLANES,
369    SI_PS_CONST_POLY_STIPPLE,
370    SI_PS_CONST_SAMPLE_POSITIONS,
371 
372    /* Image descriptor of color buffer 0 for KHR_blend_equation_advanced. */
373    SI_PS_IMAGE_COLORBUF0,
374    SI_PS_IMAGE_COLORBUF0_HI,
375    SI_PS_IMAGE_COLORBUF0_FMASK,
376    SI_PS_IMAGE_COLORBUF0_FMASK_HI,
377 
378    GFX10_GS_QUERY_BUF,
379 
380    SI_NUM_RW_BUFFERS,
381 };
382 
383 /* Indices into sctx->descriptors, laid out so that gfx and compute pipelines
384  * are contiguous:
385  *
386  *  0 - rw buffers
387  *  1 - vertex const and shader buffers
388  *  2 - vertex samplers and images
389  *  3 - fragment const and shader buffer
390  *   ...
391  *  11 - compute const and shader buffers
392  *  12 - compute samplers and images
393  */
394 enum
395 {
396    SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS,
397    SI_SHADER_DESCS_SAMPLERS_AND_IMAGES,
398    SI_NUM_SHADER_DESCS,
399 };
400 
401 #define SI_DESCS_RW_BUFFERS    0
402 #define SI_DESCS_FIRST_SHADER  1
403 #define SI_DESCS_FIRST_COMPUTE (SI_DESCS_FIRST_SHADER + PIPE_SHADER_COMPUTE * SI_NUM_SHADER_DESCS)
404 #define SI_NUM_DESCS           (SI_DESCS_FIRST_SHADER + SI_NUM_SHADERS * SI_NUM_SHADER_DESCS)
405 
406 #define SI_DESCS_SHADER_MASK(name)                                                                 \
407    u_bit_consecutive(SI_DESCS_FIRST_SHADER + PIPE_SHADER_##name * SI_NUM_SHADER_DESCS,             \
408                      SI_NUM_SHADER_DESCS)
409 
si_const_and_shader_buffer_descriptors_idx(unsigned shader)410 static inline unsigned si_const_and_shader_buffer_descriptors_idx(unsigned shader)
411 {
412    return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
413           SI_SHADER_DESCS_CONST_AND_SHADER_BUFFERS;
414 }
415 
si_sampler_and_image_descriptors_idx(unsigned shader)416 static inline unsigned si_sampler_and_image_descriptors_idx(unsigned shader)
417 {
418    return SI_DESCS_FIRST_SHADER + shader * SI_NUM_SHADER_DESCS +
419           SI_SHADER_DESCS_SAMPLERS_AND_IMAGES;
420 }
421 
422 /* This represents descriptors in memory, such as buffer resources,
423  * image resources, and sampler states.
424  */
425 struct si_descriptors {
426    /* The list of descriptors in malloc'd memory. */
427    uint32_t *list;
428    /* The list in mapped GPU memory. */
429    uint32_t *gpu_list;
430 
431    /* The buffer where the descriptors have been uploaded. */
432    struct si_resource *buffer;
433    uint64_t gpu_address;
434 
435    /* The maximum number of descriptors. */
436    uint32_t num_elements;
437 
438    /* Slots that are used by currently-bound shaders.
439     * It determines which slots are uploaded.
440     */
441    uint32_t first_active_slot;
442    uint32_t num_active_slots;
443 
444    /* The SH register offset relative to USER_DATA*_0 where the pointer
445     * to the descriptor array will be stored. */
446    short shader_userdata_offset;
447    /* The size of one descriptor. */
448    ubyte element_dw_size;
449    /* If there is only one slot enabled, bind it directly instead of
450     * uploading descriptors. -1 if disabled. */
451    signed char slot_index_to_bind_directly;
452 };
453 
454 struct si_buffer_resources {
455    struct pipe_resource **buffers; /* this has num_buffers elements */
456    unsigned *offsets;              /* this has num_buffers elements */
457 
458    enum radeon_bo_priority priority : 6;
459    enum radeon_bo_priority priority_constbuf : 6;
460 
461    /* The i-th bit is set if that element is enabled (non-NULL resource). */
462    uint64_t enabled_mask;
463    uint64_t writable_mask;
464 };
465 
466 #define si_pm4_state_changed(sctx, member)                                                         \
467    ((sctx)->queued.named.member != (sctx)->emitted.named.member)
468 
469 #define si_pm4_state_enabled_and_changed(sctx, member)                                             \
470    ((sctx)->queued.named.member && si_pm4_state_changed(sctx, member))
471 
472 #define si_pm4_bind_state(sctx, member, value)                                                     \
473    do {                                                                                            \
474       (sctx)->queued.named.member = (value);                                                       \
475       (sctx)->dirty_states |= SI_STATE_BIT(member);                                                \
476    } while (0)
477 
478 #define si_pm4_delete_state(sctx, member, value)                                                   \
479    do {                                                                                            \
480       if ((sctx)->queued.named.member == (value)) {                                                \
481          (sctx)->queued.named.member = NULL;                                                       \
482       }                                                                                            \
483       si_pm4_free_state(sctx, (struct si_pm4_state *)(value), SI_STATE_IDX(member));               \
484    } while (0)
485 
486 /* si_descriptors.c */
487 void si_set_mutable_tex_desc_fields(struct si_screen *sscreen, struct si_texture *tex,
488                                     const struct legacy_surf_level *base_level_info,
489                                     unsigned base_level, unsigned first_level, unsigned block_width,
490                                     bool is_stencil, bool force_dcc_off, uint32_t *state);
491 void si_update_ps_colorbuf0_slot(struct si_context *sctx);
492 void si_get_pipe_constant_buffer(struct si_context *sctx, uint shader, uint slot,
493                                  struct pipe_constant_buffer *cbuf);
494 void si_get_shader_buffers(struct si_context *sctx, enum pipe_shader_type shader, uint start_slot,
495                            uint count, struct pipe_shader_buffer *sbuf);
496 void si_set_ring_buffer(struct si_context *sctx, uint slot, struct pipe_resource *buffer,
497                         unsigned stride, unsigned num_records, bool add_tid, bool swizzle,
498                         unsigned element_size, unsigned index_stride, uint64_t offset);
499 void si_init_all_descriptors(struct si_context *sctx);
500 bool si_upload_graphics_shader_descriptors(struct si_context *sctx);
501 bool si_upload_compute_shader_descriptors(struct si_context *sctx);
502 void si_release_all_descriptors(struct si_context *sctx);
503 void si_gfx_resources_add_all_to_bo_list(struct si_context *sctx);
504 void si_compute_resources_add_all_to_bo_list(struct si_context *sctx);
505 bool si_gfx_resources_check_encrypted(struct si_context *sctx);
506 bool si_compute_resources_check_encrypted(struct si_context *sctx);
507 void si_shader_pointers_mark_dirty(struct si_context *sctx);
508 void si_add_all_descriptors_to_bo_list(struct si_context *sctx);
509 void si_upload_const_buffer(struct si_context *sctx, struct si_resource **buf, const uint8_t *ptr,
510                             unsigned size, uint32_t *const_offset);
511 void si_update_all_texture_descriptors(struct si_context *sctx);
512 void si_shader_change_notify(struct si_context *sctx);
513 void si_update_needs_color_decompress_masks(struct si_context *sctx);
514 void si_emit_graphics_shader_pointers(struct si_context *sctx);
515 void si_emit_compute_shader_pointers(struct si_context *sctx);
516 void si_set_rw_buffer(struct si_context *sctx, uint slot, const struct pipe_constant_buffer *input);
517 void si_set_rw_shader_buffer(struct si_context *sctx, uint slot,
518                              const struct pipe_shader_buffer *sbuffer);
519 void si_set_active_descriptors(struct si_context *sctx, unsigned desc_idx,
520                                uint64_t new_active_mask);
521 void si_set_active_descriptors_for_shader(struct si_context *sctx, struct si_shader_selector *sel);
522 bool si_bindless_descriptor_can_reclaim_slab(void *priv, struct pb_slab_entry *entry);
523 struct pb_slab *si_bindless_descriptor_slab_alloc(void *priv, unsigned heap, unsigned entry_size,
524                                                   unsigned group_index);
525 void si_bindless_descriptor_slab_free(void *priv, struct pb_slab *pslab);
526 void si_rebind_buffer(struct si_context *sctx, struct pipe_resource *buf);
527 /* si_state.c */
528 void si_init_state_compute_functions(struct si_context *sctx);
529 void si_init_state_functions(struct si_context *sctx);
530 void si_init_screen_state_functions(struct si_screen *sscreen);
531 void si_init_cs_preamble_state(struct si_context *sctx, bool uses_reg_shadowing);
532 void si_make_buffer_descriptor(struct si_screen *screen, struct si_resource *buf,
533                                enum pipe_format format, unsigned offset, unsigned size,
534                                uint32_t *state);
535 struct pipe_sampler_view *si_create_sampler_view_custom(struct pipe_context *ctx,
536                                                         struct pipe_resource *texture,
537                                                         const struct pipe_sampler_view *state,
538                                                         unsigned width0, unsigned height0,
539                                                         unsigned force_level);
540 void si_update_fb_dirtiness_after_rendering(struct si_context *sctx);
541 void si_update_ps_iter_samples(struct si_context *sctx);
542 void si_save_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
543 void si_restore_qbo_state(struct si_context *sctx, struct si_qbo_state *st);
544 void si_set_occlusion_query_state(struct si_context *sctx, bool old_perfect_enable);
545 
546 struct si_fast_udiv_info32 {
547    unsigned multiplier; /* the "magic number" multiplier */
548    unsigned pre_shift;  /* shift for the dividend before multiplying */
549    unsigned post_shift; /* shift for the dividend after multiplying */
550    int increment;       /* 0 or 1; if set then increment the numerator, using one of
551                            the two strategies */
552 };
553 
554 struct si_fast_udiv_info32 si_compute_fast_udiv_info32(uint32_t D, unsigned num_bits);
555 
556 /* si_state_binning.c */
557 void si_emit_dpbb_state(struct si_context *sctx);
558 
559 /* si_state_shaders.c */
560 void si_get_ir_cache_key(struct si_shader_selector *sel, bool ngg, bool es,
561                          unsigned char ir_sha1_cache_key[20]);
562 bool si_shader_cache_load_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
563                                  struct si_shader *shader);
564 void si_shader_cache_insert_shader(struct si_screen *sscreen, unsigned char ir_sha1_cache_key[20],
565                                    struct si_shader *shader, bool insert_into_disk_cache);
566 bool si_update_shaders(struct si_context *sctx);
567 void si_init_screen_live_shader_cache(struct si_screen *sscreen);
568 void si_init_shader_functions(struct si_context *sctx);
569 bool si_init_shader_cache(struct si_screen *sscreen);
570 void si_destroy_shader_cache(struct si_screen *sscreen);
571 void si_schedule_initial_compile(struct si_context *sctx, gl_shader_stage stage,
572                                  struct util_queue_fence *ready_fence,
573                                  struct si_compiler_ctx_state *compiler_ctx_state, void *job,
574                                  util_queue_execute_func execute);
575 void si_get_active_slot_masks(const struct si_shader_info *info, uint64_t *const_and_shader_buffers,
576                               uint64_t *samplers_and_images);
577 int si_shader_select_with_key(struct si_screen *sscreen, struct si_shader_ctx_state *state,
578                               struct si_compiler_ctx_state *compiler_state,
579                               struct si_shader_key *key, int thread_index, bool optimized_or_none);
580 void si_shader_selector_key_vs(struct si_context *sctx, struct si_shader_selector *vs,
581                                struct si_shader_key *key, struct si_vs_prolog_bits *prolog_key);
582 unsigned si_get_input_prim(const struct si_shader_selector *gs);
583 bool si_update_ngg(struct si_context *sctx);
584 
585 /* si_state_draw.c */
586 void si_emit_surface_sync(struct si_context *sctx, struct radeon_cmdbuf *cs,
587                           unsigned cp_coher_cntl);
588 void si_prim_discard_signal_next_compute_ib_start(struct si_context *sctx);
589 void gfx10_emit_cache_flush(struct si_context *sctx);
590 void si_emit_cache_flush(struct si_context *sctx);
591 void si_trace_emit(struct si_context *sctx);
592 void si_init_draw_functions(struct si_context *sctx);
593 
594 /* si_state_msaa.c */
595 void si_init_msaa_functions(struct si_context *sctx);
596 void si_emit_sample_locations(struct radeon_cmdbuf *cs, int nr_samples);
597 
598 /* si_state_streamout.c */
599 void si_streamout_buffers_dirty(struct si_context *sctx);
600 void si_emit_streamout_end(struct si_context *sctx);
601 void si_update_prims_generated_query_state(struct si_context *sctx, unsigned type, int diff);
602 void si_init_streamout_functions(struct si_context *sctx);
603 
si_get_constbuf_slot(unsigned slot)604 static inline unsigned si_get_constbuf_slot(unsigned slot)
605 {
606    /* Constant buffers are in slots [32..47], ascending */
607    return SI_NUM_SHADER_BUFFERS + slot;
608 }
609 
si_get_shaderbuf_slot(unsigned slot)610 static inline unsigned si_get_shaderbuf_slot(unsigned slot)
611 {
612    /* shader buffers are in slots [31..0], descending */
613    return SI_NUM_SHADER_BUFFERS - 1 - slot;
614 }
615 
si_get_sampler_slot(unsigned slot)616 static inline unsigned si_get_sampler_slot(unsigned slot)
617 {
618    /* 32 samplers are in sampler slots [16..47], 16 dw per slot, ascending */
619    /* those are equivalent to image slots [32..95], 8 dw per slot, ascending  */
620    return SI_NUM_IMAGE_SLOTS / 2 + slot;
621 }
622 
si_get_image_slot(unsigned slot)623 static inline unsigned si_get_image_slot(unsigned slot)
624 {
625    /* image slots are in [31..0] (sampler slots [15..0]), descending */
626    /* images are in slots [31..16], while FMASKs are in slots [15..0] */
627    return SI_NUM_IMAGE_SLOTS - 1 - slot;
628 }
629 
630 #endif
631