1 /*
2  * Copyright 2012 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  *
23  * Authors:
24  *      Christian König <christian.koenig@amd.com>
25  */
26 
27 #include "util/u_memory.h"
28 #include "radeonsi_pipe.h"
29 #include "radeonsi_pm4.h"
30 #include "sid.h"
31 #include "r600_hw_context_priv.h"
32 
33 #define NUMBER_OF_STATES (sizeof(union si_state) / sizeof(struct si_pm4_state *))
34 
si_pm4_cmd_begin(struct si_pm4_state * state,unsigned opcode)35 void si_pm4_cmd_begin(struct si_pm4_state *state, unsigned opcode)
36 {
37 	state->last_opcode = opcode;
38 	state->last_pm4 = state->ndw++;
39 }
40 
si_pm4_cmd_add(struct si_pm4_state * state,uint32_t dw)41 void si_pm4_cmd_add(struct si_pm4_state *state, uint32_t dw)
42 {
43 	state->pm4[state->ndw++] = dw;
44 }
45 
si_pm4_cmd_end(struct si_pm4_state * state,bool predicate)46 void si_pm4_cmd_end(struct si_pm4_state *state, bool predicate)
47 {
48 	unsigned count;
49 	count = state->ndw - state->last_pm4 - 2;
50 	state->pm4[state->last_pm4] = PKT3(state->last_opcode,
51 					   count, predicate);
52 
53 	assert(state->ndw <= SI_PM4_MAX_DW);
54 }
55 
si_pm4_set_reg(struct si_pm4_state * state,unsigned reg,uint32_t val)56 void si_pm4_set_reg(struct si_pm4_state *state, unsigned reg, uint32_t val)
57 {
58 	unsigned opcode;
59 
60 	if (reg >= SI_CONFIG_REG_OFFSET && reg <= SI_CONFIG_REG_END) {
61 		opcode = PKT3_SET_CONFIG_REG;
62 		reg -= SI_CONFIG_REG_OFFSET;
63 
64 	} else if (reg >= SI_SH_REG_OFFSET && reg <= SI_SH_REG_END) {
65 		opcode = PKT3_SET_SH_REG;
66 		reg -= SI_SH_REG_OFFSET;
67 
68 	} else if (reg >= SI_CONTEXT_REG_OFFSET && reg <= SI_CONTEXT_REG_END) {
69 		opcode = PKT3_SET_CONTEXT_REG;
70 		reg -= SI_CONTEXT_REG_OFFSET;
71 	} else {
72 		R600_ERR("Invalid register offset %08x!\n", reg);
73 		return;
74 	}
75 
76 	reg >>= 2;
77 
78 	if (opcode != state->last_opcode || reg != (state->last_reg + 1)) {
79 		si_pm4_cmd_begin(state, opcode);
80 		si_pm4_cmd_add(state, reg);
81 	}
82 
83 	state->last_reg = reg;
84 	si_pm4_cmd_add(state, val);
85 	si_pm4_cmd_end(state, false);
86 }
87 
si_pm4_add_bo(struct si_pm4_state * state,struct si_resource * bo,enum radeon_bo_usage usage)88 void si_pm4_add_bo(struct si_pm4_state *state,
89                    struct si_resource *bo,
90                    enum radeon_bo_usage usage)
91 {
92 	unsigned idx = state->nbo++;
93 	assert(idx < SI_PM4_MAX_BO);
94 
95 	si_resource_reference(&state->bo[idx], bo);
96 	state->bo_usage[idx] = usage;
97 }
98 
si_pm4_sh_data_begin(struct si_pm4_state * state)99 void si_pm4_sh_data_begin(struct si_pm4_state *state)
100 {
101 	si_pm4_cmd_begin(state, PKT3_NOP);
102 }
103 
si_pm4_sh_data_add(struct si_pm4_state * state,uint32_t dw)104 void si_pm4_sh_data_add(struct si_pm4_state *state, uint32_t dw)
105 {
106 	si_pm4_cmd_add(state, dw);
107 }
108 
si_pm4_sh_data_end(struct si_pm4_state * state,unsigned reg)109 void si_pm4_sh_data_end(struct si_pm4_state *state, unsigned reg)
110 {
111 	unsigned offs = state->last_pm4 + 1;
112 
113 	/* Bail if no data was added */
114 	if (state->ndw == offs) {
115 		state->ndw--;
116 		return;
117 	}
118 
119 	si_pm4_cmd_end(state, false);
120 
121 	si_pm4_cmd_begin(state, PKT3_SET_SH_REG_OFFSET);
122 	si_pm4_cmd_add(state, (reg - SI_SH_REG_OFFSET) >> 2);
123 	state->relocs[state->nrelocs++] = state->ndw;
124 	si_pm4_cmd_add(state, offs << 2);
125 	si_pm4_cmd_add(state, 0);
126 	si_pm4_cmd_end(state, false);
127 }
128 
si_pm4_inval_shader_cache(struct si_pm4_state * state)129 void si_pm4_inval_shader_cache(struct si_pm4_state *state)
130 {
131 	state->cp_coher_cntl |= S_0085F0_SH_ICACHE_ACTION_ENA(1);
132 	state->cp_coher_cntl |= S_0085F0_SH_KCACHE_ACTION_ENA(1);
133 }
134 
si_pm4_inval_texture_cache(struct si_pm4_state * state)135 void si_pm4_inval_texture_cache(struct si_pm4_state *state)
136 {
137 	state->cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
138 }
139 
si_pm4_inval_vertex_cache(struct si_pm4_state * state)140 void si_pm4_inval_vertex_cache(struct si_pm4_state *state)
141 {
142         /* Some GPUs don't have the vertex cache and must use the texture cache instead. */
143 	state->cp_coher_cntl |= S_0085F0_TC_ACTION_ENA(1);
144 }
145 
si_pm4_inval_fb_cache(struct si_pm4_state * state,unsigned nr_cbufs)146 void si_pm4_inval_fb_cache(struct si_pm4_state *state, unsigned nr_cbufs)
147 {
148 	state->cp_coher_cntl |= S_0085F0_CB_ACTION_ENA(1);
149 	state->cp_coher_cntl |= ((1 << nr_cbufs) - 1) << S_0085F0_CB0_DEST_BASE_ENA_SHIFT;
150 }
151 
si_pm4_inval_zsbuf_cache(struct si_pm4_state * state)152 void si_pm4_inval_zsbuf_cache(struct si_pm4_state *state)
153 {
154 	state->cp_coher_cntl |= S_0085F0_DB_ACTION_ENA(1) | S_0085F0_DB_DEST_BASE_ENA(1);
155 }
156 
si_pm4_free_state(struct r600_context * rctx,struct si_pm4_state * state,unsigned idx)157 void si_pm4_free_state(struct r600_context *rctx,
158 		       struct si_pm4_state *state,
159 		       unsigned idx)
160 {
161 	if (state == NULL)
162 		return;
163 
164 	if (idx != ~0 && rctx->emitted.array[idx] == state) {
165 		rctx->emitted.array[idx] = NULL;
166 	}
167 
168 	for (int i = 0; i < state->nbo; ++i) {
169 		si_resource_reference(&state->bo[i], NULL);
170 	}
171 	FREE(state);
172 }
173 
si_pm4_sync_flags(struct r600_context * rctx)174 uint32_t si_pm4_sync_flags(struct r600_context *rctx)
175 {
176 	uint32_t cp_coher_cntl = 0;
177 
178 	for (int i = 0; i < NUMBER_OF_STATES; ++i) {
179 		struct si_pm4_state *state = rctx->queued.array[i];
180 
181 		if (!state || rctx->emitted.array[i] == state)
182 			continue;
183 
184 		cp_coher_cntl |= state->cp_coher_cntl;
185 	}
186 	return cp_coher_cntl;
187 }
188 
si_pm4_dirty_dw(struct r600_context * rctx)189 unsigned si_pm4_dirty_dw(struct r600_context *rctx)
190 {
191 	unsigned count = 0;
192 
193 	for (int i = 0; i < NUMBER_OF_STATES; ++i) {
194 		struct si_pm4_state *state = rctx->queued.array[i];
195 
196 		if (!state || rctx->emitted.array[i] == state)
197 			continue;
198 
199 		count += state->ndw;
200 	}
201 
202 	return count;
203 }
204 
si_pm4_emit(struct r600_context * rctx,struct si_pm4_state * state)205 void si_pm4_emit(struct r600_context *rctx, struct si_pm4_state *state)
206 {
207 	struct radeon_winsys_cs *cs = rctx->cs;
208 	for (int i = 0; i < state->nbo; ++i) {
209 		r600_context_bo_reloc(rctx, state->bo[i],
210 				      state->bo_usage[i]);
211 	}
212 
213 	memcpy(&cs->buf[cs->cdw], state->pm4, state->ndw * 4);
214 
215 	for (int i = 0; i < state->nrelocs; ++i) {
216 		cs->buf[cs->cdw + state->relocs[i]] += cs->cdw << 2;
217 	}
218 
219 	cs->cdw += state->ndw;
220 }
221 
si_pm4_emit_dirty(struct r600_context * rctx)222 void si_pm4_emit_dirty(struct r600_context *rctx)
223 {
224 	for (int i = 0; i < NUMBER_OF_STATES; ++i) {
225 		struct si_pm4_state *state = rctx->queued.array[i];
226 
227 		if (!state || rctx->emitted.array[i] == state)
228 			continue;
229 
230 		si_pm4_emit(rctx, state);
231 		rctx->emitted.array[i] = state;
232 	}
233 }
234 
si_pm4_reset_emitted(struct r600_context * rctx)235 void si_pm4_reset_emitted(struct r600_context *rctx)
236 {
237 	memset(&rctx->emitted, 0, sizeof(rctx->emitted));
238 }
239