1 /**************************************************************************
2 *
3 * Copyright (C) 2015 Red Hat Inc.
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice shall be included
13 * in all copies or substantial portions of the Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
16 * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
22 *
23 **************************************************************************/
24 #include <stdint.h>
25 #include <string.h>
26
27 #include "util/u_memory.h"
28 #include "util/u_math.h"
29 #include "util/u_format.h"
30 #include "pipe/p_state.h"
31 #include "testvirgl_encode.h"
32 #include "virgl_protocol.h"
33 #include "tgsi/tgsi_dump.h"
34 #include "tgsi/tgsi_parse.h"
35
virgl_encoder_write_cmd_dword(struct virgl_context * ctx,uint32_t dword)36 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
37 uint32_t dword)
38 {
39 int len = (dword >> 16);
40
41 if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
42 ctx->flush(ctx);
43
44 virgl_encoder_write_dword(ctx->cbuf, dword);
45 return 0;
46 }
47
virgl_encoder_write_res(struct virgl_context * ctx,struct virgl_resource * res)48 static void virgl_encoder_write_res(struct virgl_context *ctx,
49 struct virgl_resource *res)
50 {
51 if (res)
52 virgl_encoder_write_dword(ctx->cbuf, res->handle);
53 else
54 virgl_encoder_write_dword(ctx->cbuf, 0);
55 }
56
virgl_encode_bind_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)57 int virgl_encode_bind_object(struct virgl_context *ctx,
58 uint32_t handle, uint32_t object)
59 {
60 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
61 virgl_encoder_write_dword(ctx->cbuf, handle);
62 return 0;
63 }
64
virgl_encode_delete_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)65 int virgl_encode_delete_object(struct virgl_context *ctx,
66 uint32_t handle, uint32_t object)
67 {
68 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
69 virgl_encoder_write_dword(ctx->cbuf, handle);
70 return 0;
71 }
72
virgl_encode_blend_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_blend_state * blend_state)73 int virgl_encode_blend_state(struct virgl_context *ctx,
74 uint32_t handle,
75 const struct pipe_blend_state *blend_state)
76 {
77 uint32_t tmp;
78 int i;
79
80 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
81 virgl_encoder_write_dword(ctx->cbuf, handle);
82
83 tmp =
84 VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
85 VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
86 VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
87 VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
88 VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
89
90 virgl_encoder_write_dword(ctx->cbuf, tmp);
91
92 tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
93 virgl_encoder_write_dword(ctx->cbuf, tmp);
94
95 for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
96 tmp =
97 VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
98 VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
99 VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
100 VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
101 VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
102 VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(blend_state->rt[i].alpha_src_factor) |
103 VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
104 VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
105 virgl_encoder_write_dword(ctx->cbuf, tmp);
106 }
107 return 0;
108 }
109
virgl_encode_dsa_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_depth_stencil_alpha_state * dsa_state)110 int virgl_encode_dsa_state(struct virgl_context *ctx,
111 uint32_t handle,
112 const struct pipe_depth_stencil_alpha_state *dsa_state)
113 {
114 uint32_t tmp;
115 int i;
116 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
117 virgl_encoder_write_dword(ctx->cbuf, handle);
118
119 tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
120 VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
121 VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
122 VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
123 VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
124 virgl_encoder_write_dword(ctx->cbuf, tmp);
125
126 for (i = 0; i < 2; i++) {
127 tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
128 VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
129 VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
130 VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
131 VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
132 VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
133 VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
134 virgl_encoder_write_dword(ctx->cbuf, tmp);
135 }
136
137 virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
138 return 0;
139 }
virgl_encode_rasterizer_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_rasterizer_state * state)140 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
141 uint32_t handle,
142 const struct pipe_rasterizer_state *state)
143 {
144 uint32_t tmp;
145
146 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
147 virgl_encoder_write_dword(ctx->cbuf, handle);
148
149 tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
150 VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip) |
151 VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
152 VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
153 VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
154 VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
155 VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
156 VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
157 VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
158 VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
159 VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
160 VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
161 VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
162 VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
163 VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
164 VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
165 VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
166 VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
167 VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
168 VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
169 VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
170 VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
171 VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
172 VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
173 VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
174 VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
175 VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
176 VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule);
177
178 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
179 virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
180 virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
181 tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
182 VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
183 VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
184 virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
185 virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
186 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
187 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
188 virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
189 return 0;
190 }
191
virgl_emit_shader_header(struct virgl_context * ctx,uint32_t handle,uint32_t len,uint32_t type,uint32_t offlen,uint32_t num_tokens)192 static void virgl_emit_shader_header(struct virgl_context *ctx,
193 uint32_t handle, uint32_t len,
194 uint32_t type, uint32_t offlen,
195 uint32_t num_tokens)
196 {
197 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
198 virgl_encoder_write_dword(ctx->cbuf, handle);
199 virgl_encoder_write_dword(ctx->cbuf, type);
200 virgl_encoder_write_dword(ctx->cbuf, offlen);
201 virgl_encoder_write_dword(ctx->cbuf, num_tokens);
202 }
203
virgl_emit_shader_streamout(struct virgl_context * ctx,const struct pipe_stream_output_info * so_info)204 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
205 const struct pipe_stream_output_info *so_info)
206 {
207 int num_outputs = 0;
208 uint i;
209 uint32_t tmp;
210
211 if (so_info)
212 num_outputs = so_info->num_outputs;
213
214 virgl_encoder_write_dword(ctx->cbuf, num_outputs);
215 if (num_outputs) {
216 for (i = 0; i < 4; i++)
217 virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
218
219 for (i = 0; i < so_info->num_outputs; i++) {
220 tmp =
221 VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
222 VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
223 VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
224 VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
225 VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
226 virgl_encoder_write_dword(ctx->cbuf, tmp);
227 virgl_encoder_write_dword(ctx->cbuf, 0);
228 }
229 }
230 }
231
virgl_encode_shader_state(struct virgl_context * ctx,uint32_t handle,uint32_t type,const struct pipe_shader_state * shader,const char * shad_str)232 int virgl_encode_shader_state(struct virgl_context *ctx,
233 uint32_t handle,
234 uint32_t type,
235 const struct pipe_shader_state *shader,
236 const char *shad_str)
237 {
238 char *str, *sptr;
239 uint32_t shader_len, len;
240 int ret;
241 int num_tokens;
242 int str_total_size = 65536;
243 uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
244 bool first_pass;
245
246 if (!shad_str) {
247 num_tokens = tgsi_num_tokens(shader->tokens);
248 str = CALLOC(1, str_total_size);
249 if (!str)
250 return -1;
251
252 ret = tgsi_dump_str(shader->tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
253 if (ret == -1) {
254 fprintf(stderr, "Failed to translate shader in available space\n");
255 FREE(str);
256 return -1;
257 }
258 } else {
259 num_tokens = 300;
260 str = (char *)shad_str;
261 }
262
263 shader_len = strlen(str) + 1;
264
265 left_bytes = shader_len;
266
267 base_hdr_size = 5;
268 strm_hdr_size = shader->stream_output.num_outputs ? shader->stream_output.num_outputs * 2 + 4 : 0;
269 first_pass = true;
270 sptr = str;
271 while (left_bytes) {
272 uint32_t length, offlen;
273 int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
274 if (ctx->cbuf->cdw + hdr_len + 1 > VIRGL_MAX_CMDBUF_DWORDS)
275 ctx->flush(ctx);
276
277 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
278
279 length = MIN2(thispass, left_bytes);
280 len = ((length + 3) / 4) + hdr_len;
281
282 if (first_pass)
283 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
284 else
285 offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
286
287 virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
288
289 virgl_emit_shader_streamout(ctx, first_pass ? &shader->stream_output : NULL);
290
291 virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
292
293 sptr += length;
294 first_pass = false;
295 left_bytes -= length;
296 }
297
298 if (str != shad_str)
299 FREE(str);
300 return 0;
301 }
302
303
virgl_encode_clear(struct virgl_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)304 int virgl_encode_clear(struct virgl_context *ctx,
305 unsigned buffers,
306 const union pipe_color_union *color,
307 double depth, unsigned stencil)
308 {
309 int i;
310
311 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
312 virgl_encoder_write_dword(ctx->cbuf, buffers);
313 for (i = 0; i < 4; i++)
314 virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
315 virgl_encoder_write_double(ctx->cbuf, depth);
316 virgl_encoder_write_dword(ctx->cbuf, stencil);
317 return 0;
318 }
319
virgl_encoder_set_framebuffer_state(struct virgl_context * ctx,const struct pipe_framebuffer_state * state)320 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
321 const struct pipe_framebuffer_state *state)
322 {
323 struct virgl_surface *zsurf = (struct virgl_surface *)state->zsbuf;
324 uint i;
325
326 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
327 virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
328 virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
329 for (i = 0; i < state->nr_cbufs; i++) {
330 struct virgl_surface *surf = (struct virgl_surface *)state->cbufs[i];
331 virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
332 }
333
334 return 0;
335 }
336
virgl_encoder_set_viewport_states(struct virgl_context * ctx,int start_slot,int num_viewports,const struct pipe_viewport_state * states)337 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
338 int start_slot,
339 int num_viewports,
340 const struct pipe_viewport_state *states)
341 {
342 int i,v;
343 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
344 virgl_encoder_write_dword(ctx->cbuf, start_slot);
345 for (v = 0; v < num_viewports; v++) {
346 for (i = 0; i < 3; i++)
347 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
348 for (i = 0; i < 3; i++)
349 virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
350 }
351 return 0;
352 }
353
virgl_encoder_create_vertex_elements(struct virgl_context * ctx,uint32_t handle,unsigned num_elements,const struct pipe_vertex_element * element)354 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
355 uint32_t handle,
356 unsigned num_elements,
357 const struct pipe_vertex_element *element)
358 {
359 uint i;
360 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
361 virgl_encoder_write_dword(ctx->cbuf, handle);
362 for (i = 0; i < num_elements; i++) {
363 virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
364 virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
365 virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
366 virgl_encoder_write_dword(ctx->cbuf, element[i].src_format);
367 }
368 return 0;
369 }
370
virgl_encoder_set_vertex_buffers(struct virgl_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)371 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
372 unsigned num_buffers,
373 const struct pipe_vertex_buffer *buffers)
374 {
375 uint i;
376 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
377 for (i = 0; i < num_buffers; i++) {
378 struct virgl_resource *res = (struct virgl_resource *)buffers[i].buffer;
379 virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
380 virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
381 virgl_encoder_write_res(ctx, res);
382 }
383 return 0;
384 }
385
virgl_encoder_set_index_buffer(struct virgl_context * ctx,const struct pipe_index_buffer * ib)386 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
387 const struct pipe_index_buffer *ib)
388 {
389 int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
390 struct virgl_resource *res = NULL;
391 if (ib)
392 res = (struct virgl_resource *)ib->buffer;
393
394 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
395 virgl_encoder_write_res(ctx, res);
396 if (ib) {
397 virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
398 virgl_encoder_write_dword(ctx->cbuf, ib->offset);
399 }
400 return 0;
401 }
402
virgl_encoder_draw_vbo(struct virgl_context * ctx,const struct pipe_draw_info * info)403 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
404 const struct pipe_draw_info *info)
405 {
406 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, VIRGL_DRAW_VBO_SIZE));
407 virgl_encoder_write_dword(ctx->cbuf, info->start);
408 virgl_encoder_write_dword(ctx->cbuf, info->count);
409 virgl_encoder_write_dword(ctx->cbuf, info->mode);
410 virgl_encoder_write_dword(ctx->cbuf, info->indexed);
411 virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
412 virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
413 virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
414 virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
415 virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
416 virgl_encoder_write_dword(ctx->cbuf, info->min_index);
417 virgl_encoder_write_dword(ctx->cbuf, info->max_index);
418 if (info->count_from_stream_output)
419 virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
420 else
421 virgl_encoder_write_dword(ctx->cbuf, 0);
422 return 0;
423 }
424
virgl_encoder_create_surface(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)425 int virgl_encoder_create_surface(struct virgl_context *ctx,
426 uint32_t handle,
427 struct virgl_resource *res,
428 const struct pipe_surface *templat)
429 {
430 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
431 virgl_encoder_write_dword(ctx->cbuf, handle);
432 virgl_encoder_write_res(ctx, res);
433 virgl_encoder_write_dword(ctx->cbuf, templat->format);
434 if (templat->texture->target == PIPE_BUFFER) {
435 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.first_element);
436 virgl_encoder_write_dword(ctx->cbuf, templat->u.buf.last_element);
437
438 } else {
439 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
440 virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
441 }
442 return 0;
443 }
444
virgl_encoder_create_so_target(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,unsigned buffer_offset,unsigned buffer_size)445 int virgl_encoder_create_so_target(struct virgl_context *ctx,
446 uint32_t handle,
447 struct virgl_resource *res,
448 unsigned buffer_offset,
449 unsigned buffer_size)
450 {
451 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
452 virgl_encoder_write_dword(ctx->cbuf, handle);
453 virgl_encoder_write_res(ctx, res);
454 virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
455 virgl_encoder_write_dword(ctx->cbuf, buffer_size);
456 return 0;
457 }
458
virgl_encoder_iw_emit_header_1d(struct virgl_context * ctx,struct virgl_resource * res,unsigned level,unsigned usage,const struct pipe_box * box,unsigned stride,unsigned layer_stride)459 static void virgl_encoder_iw_emit_header_1d(struct virgl_context *ctx,
460 struct virgl_resource *res,
461 unsigned level, unsigned usage,
462 const struct pipe_box *box,
463 unsigned stride, unsigned layer_stride)
464 {
465 virgl_encoder_write_res(ctx, res);
466 virgl_encoder_write_dword(ctx->cbuf, level);
467 virgl_encoder_write_dword(ctx->cbuf, usage);
468 virgl_encoder_write_dword(ctx->cbuf, stride);
469 virgl_encoder_write_dword(ctx->cbuf, layer_stride);
470 virgl_encoder_write_dword(ctx->cbuf, box->x);
471 virgl_encoder_write_dword(ctx->cbuf, box->y);
472 virgl_encoder_write_dword(ctx->cbuf, box->z);
473 virgl_encoder_write_dword(ctx->cbuf, box->width);
474 virgl_encoder_write_dword(ctx->cbuf, box->height);
475 virgl_encoder_write_dword(ctx->cbuf, box->depth);
476 }
477
virgl_encoder_inline_send_box(struct virgl_context * ctx,struct virgl_resource * res,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,unsigned layer_stride,int length)478 static void virgl_encoder_inline_send_box(struct virgl_context *ctx,
479 struct virgl_resource *res,
480 unsigned level, unsigned usage,
481 const struct pipe_box *box,
482 const void *data, unsigned stride,
483 unsigned layer_stride, int length)
484 {
485 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
486 virgl_encoder_iw_emit_header_1d(ctx, res, level, usage, box, stride, layer_stride);
487 virgl_encoder_write_block(ctx->cbuf, data, length);
488 }
489
virgl_encoder_inline_write(struct virgl_context * ctx,struct virgl_resource * res,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,unsigned layer_stride)490 int virgl_encoder_inline_write(struct virgl_context *ctx,
491 struct virgl_resource *res,
492 unsigned level, unsigned usage,
493 const struct pipe_box *box,
494 const void *data, unsigned stride,
495 unsigned layer_stride)
496 {
497 uint32_t length, thispass, left_bytes;
498 struct pipe_box mybox = *box;
499 unsigned elsize, size;
500 unsigned layer_size;
501 unsigned stride_internal = stride;
502 unsigned layer_stride_internal = layer_stride;
503 int layer, row;
504 elsize = util_format_get_blocksize(res->base.format);
505
506 /* total size of data to transfer */
507 if (!stride)
508 stride_internal = box->width * elsize;
509 layer_size = box->height * stride_internal;
510 if (layer_stride && layer_stride < layer_size)
511 return -1;
512 if (!layer_stride)
513 layer_stride_internal = layer_size;
514 size = layer_stride_internal * box->depth;
515
516 length = 11 + (size + 3) / 4;
517
518 /* can we send it all in one cmdbuf? */
519 if (length < VIRGL_MAX_CMDBUF_DWORDS) {
520 /* is there space in this cmdbuf? if not flush and use another one */
521 if ((ctx->cbuf->cdw + length + 1) > VIRGL_MAX_CMDBUF_DWORDS) {
522 ctx->flush(ctx);
523 }
524 /* send it all in one go. */
525 virgl_encoder_inline_send_box(ctx, res, level, usage, &mybox, data, stride, layer_stride, size);
526 return 0;
527 }
528
529 /* break things down into chunks we can send */
530 /* send layers in separate chunks */
531 for (layer = 0; layer < box->depth; layer++) {
532 const void *layer_data = data;
533 mybox.z = layer;
534 mybox.depth = 1;
535
536 /* send one line in separate chunks */
537 for (row = 0; row < box->height; row++) {
538 const void *row_data = layer_data;
539 mybox.y = row;
540 mybox.height = 1;
541 mybox.x = 0;
542
543 left_bytes = box->width * elsize;
544 while (left_bytes) {
545 if (ctx->cbuf->cdw + 12 > VIRGL_MAX_CMDBUF_DWORDS)
546 ctx->flush(ctx);
547
548 thispass = (VIRGL_MAX_CMDBUF_DWORDS - ctx->cbuf->cdw - 12) * 4;
549
550 length = MIN2(thispass, left_bytes);
551
552 mybox.width = length / elsize;
553
554 virgl_encoder_inline_send_box(ctx, res, level, usage, &mybox, row_data, stride, layer_stride, length);
555 left_bytes -= length;
556 mybox.x += length / elsize;
557 row_data += length;
558 }
559 layer_data += stride_internal;
560 }
561 data += layer_stride_internal;
562 }
563 return 0;
564 }
565
virgl_encoder_flush_frontbuffer(UNUSED struct virgl_context * ctx,UNUSED struct virgl_resource * res)566 int virgl_encoder_flush_frontbuffer(UNUSED struct virgl_context *ctx,
567 UNUSED struct virgl_resource *res)
568 {
569 // virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
570 // virgl_encoder_write_dword(ctx->cbuf, res_handle);
571 return 0;
572 }
573
virgl_encode_sampler_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_sampler_state * state)574 int virgl_encode_sampler_state(struct virgl_context *ctx,
575 uint32_t handle,
576 const struct pipe_sampler_state *state)
577 {
578 uint32_t tmp;
579 int i;
580 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
581 virgl_encoder_write_dword(ctx->cbuf, handle);
582
583 tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
584 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
585 VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
586 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
587 VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
588 VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
589 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
590 VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func);
591
592 virgl_encoder_write_dword(ctx->cbuf, tmp);
593 virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
594 virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
595 virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
596 for (i = 0; i < 4; i++)
597 virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
598 return 0;
599 }
600
601
virgl_encode_sampler_view(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_sampler_view * state)602 int virgl_encode_sampler_view(struct virgl_context *ctx,
603 uint32_t handle,
604 struct virgl_resource *res,
605 const struct pipe_sampler_view *state)
606 {
607 uint32_t tmp;
608 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
609 virgl_encoder_write_dword(ctx->cbuf, handle);
610 virgl_encoder_write_res(ctx, res);
611 virgl_encoder_write_dword(ctx->cbuf, state->format);
612 if (1) {//TODOres->u.b.target == PIPE_BUFFER) {
613 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.first_element);
614 virgl_encoder_write_dword(ctx->cbuf, state->u.buf.last_element);
615 } else {
616 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
617 virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
618 }
619 tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
620 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
621 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
622 VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
623 virgl_encoder_write_dword(ctx->cbuf, tmp);
624 return 0;
625 }
626
virgl_encode_set_sampler_views(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_views,struct virgl_sampler_view ** views)627 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
628 uint32_t shader_type,
629 uint32_t start_slot,
630 uint32_t num_views,
631 struct virgl_sampler_view **views)
632 {
633 uint i;
634 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
635 virgl_encoder_write_dword(ctx->cbuf, shader_type);
636 virgl_encoder_write_dword(ctx->cbuf, start_slot);
637 for (i = 0; i < num_views; i++) {
638 uint32_t handle = views[i] ? views[i]->handle : 0;
639 virgl_encoder_write_dword(ctx->cbuf, handle);
640 }
641 return 0;
642 }
643
virgl_encode_bind_sampler_states(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_handles,uint32_t * handles)644 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
645 uint32_t shader_type,
646 uint32_t start_slot,
647 uint32_t num_handles,
648 uint32_t *handles)
649 {
650 uint i;
651 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
652 virgl_encoder_write_dword(ctx->cbuf, shader_type);
653 virgl_encoder_write_dword(ctx->cbuf, start_slot);
654 for (i = 0; i < num_handles; i++)
655 virgl_encoder_write_dword(ctx->cbuf, handles[i]);
656 return 0;
657 }
658
virgl_encoder_write_constant_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t size,const void * data)659 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
660 uint32_t shader,
661 uint32_t index,
662 uint32_t size,
663 const void *data)
664 {
665 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
666 virgl_encoder_write_dword(ctx->cbuf, shader);
667 virgl_encoder_write_dword(ctx->cbuf, index);
668 if (data)
669 virgl_encoder_write_block(ctx->cbuf, data, size * 4);
670 return 0;
671 }
672
virgl_encoder_set_uniform_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t offset,uint32_t length,struct virgl_resource * res)673 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
674 uint32_t shader,
675 uint32_t index,
676 uint32_t offset,
677 uint32_t length,
678 struct virgl_resource *res)
679 {
680 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
681 virgl_encoder_write_dword(ctx->cbuf, shader);
682 virgl_encoder_write_dword(ctx->cbuf, index);
683 virgl_encoder_write_dword(ctx->cbuf, offset);
684 virgl_encoder_write_dword(ctx->cbuf, length);
685 virgl_encoder_write_res(ctx, res);
686 return 0;
687 }
688
689
virgl_encoder_set_stencil_ref(struct virgl_context * ctx,const struct pipe_stencil_ref * ref)690 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
691 const struct pipe_stencil_ref *ref)
692 {
693 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
694 virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
695 return 0;
696 }
697
virgl_encoder_set_blend_color(struct virgl_context * ctx,const struct pipe_blend_color * color)698 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
699 const struct pipe_blend_color *color)
700 {
701 int i;
702 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
703 for (i = 0; i < 4; i++)
704 virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
705 return 0;
706 }
707
virgl_encoder_set_scissor_state(struct virgl_context * ctx,unsigned start_slot,int num_scissors,const struct pipe_scissor_state * ss)708 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
709 unsigned start_slot,
710 int num_scissors,
711 const struct pipe_scissor_state *ss)
712 {
713 int i;
714 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
715 virgl_encoder_write_dword(ctx->cbuf, start_slot);
716 for (i = 0; i < num_scissors; i++) {
717 virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
718 virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
719 }
720 return 0;
721 }
722
virgl_encoder_set_polygon_stipple(struct virgl_context * ctx,const struct pipe_poly_stipple * ps)723 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
724 const struct pipe_poly_stipple *ps)
725 {
726 int i;
727 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
728 for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
729 virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
730 }
731 }
732
virgl_encoder_set_sample_mask(struct virgl_context * ctx,unsigned sample_mask)733 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
734 unsigned sample_mask)
735 {
736 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
737 virgl_encoder_write_dword(ctx->cbuf, sample_mask);
738 }
739
virgl_encoder_set_clip_state(struct virgl_context * ctx,const struct pipe_clip_state * clip)740 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
741 const struct pipe_clip_state *clip)
742 {
743 int i, j;
744 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
745 for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
746 for (j = 0; j < 4; j++) {
747 virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
748 }
749 }
750 }
751
virgl_encode_resource_copy_region(struct virgl_context * ctx,struct virgl_resource * dst_res,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct virgl_resource * src_res,unsigned src_level,const struct pipe_box * src_box)752 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
753 struct virgl_resource *dst_res,
754 unsigned dst_level,
755 unsigned dstx, unsigned dsty, unsigned dstz,
756 struct virgl_resource *src_res,
757 unsigned src_level,
758 const struct pipe_box *src_box)
759 {
760 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
761 virgl_encoder_write_res(ctx, dst_res);
762 virgl_encoder_write_dword(ctx->cbuf, dst_level);
763 virgl_encoder_write_dword(ctx->cbuf, dstx);
764 virgl_encoder_write_dword(ctx->cbuf, dsty);
765 virgl_encoder_write_dword(ctx->cbuf, dstz);
766 virgl_encoder_write_res(ctx, src_res);
767 virgl_encoder_write_dword(ctx->cbuf, src_level);
768 virgl_encoder_write_dword(ctx->cbuf, src_box->x);
769 virgl_encoder_write_dword(ctx->cbuf, src_box->y);
770 virgl_encoder_write_dword(ctx->cbuf, src_box->z);
771 virgl_encoder_write_dword(ctx->cbuf, src_box->width);
772 virgl_encoder_write_dword(ctx->cbuf, src_box->height);
773 virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
774 return 0;
775 }
776
virgl_encode_blit(struct virgl_context * ctx,struct virgl_resource * dst_res,struct virgl_resource * src_res,const struct pipe_blit_info * blit)777 int virgl_encode_blit(struct virgl_context *ctx,
778 struct virgl_resource *dst_res,
779 struct virgl_resource *src_res,
780 const struct pipe_blit_info *blit)
781 {
782 uint32_t tmp;
783 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
784 tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
785 VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
786 VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable);
787 virgl_encoder_write_dword(ctx->cbuf, tmp);
788 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
789 virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
790
791 virgl_encoder_write_res(ctx, dst_res);
792 virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
793 virgl_encoder_write_dword(ctx->cbuf, blit->dst.format);
794 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
795 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
796 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
797 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
798 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
799 virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
800
801 virgl_encoder_write_res(ctx, src_res);
802 virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
803 virgl_encoder_write_dword(ctx->cbuf, blit->src.format);
804 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
805 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
806 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
807 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
808 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
809 virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
810 return 0;
811 }
812
virgl_encoder_create_query(struct virgl_context * ctx,uint32_t handle,uint query_type,struct virgl_resource * res,uint32_t offset)813 int virgl_encoder_create_query(struct virgl_context *ctx,
814 uint32_t handle,
815 uint query_type,
816 struct virgl_resource *res,
817 uint32_t offset)
818 {
819 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
820 virgl_encoder_write_dword(ctx->cbuf, handle);
821 virgl_encoder_write_dword(ctx->cbuf, query_type);
822 virgl_encoder_write_dword(ctx->cbuf, offset);
823 virgl_encoder_write_res(ctx, res);
824 return 0;
825 }
826
virgl_encoder_begin_query(struct virgl_context * ctx,uint32_t handle)827 int virgl_encoder_begin_query(struct virgl_context *ctx,
828 uint32_t handle)
829 {
830 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
831 virgl_encoder_write_dword(ctx->cbuf, handle);
832 return 0;
833 }
834
virgl_encoder_end_query(struct virgl_context * ctx,uint32_t handle)835 int virgl_encoder_end_query(struct virgl_context *ctx,
836 uint32_t handle)
837 {
838 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
839 virgl_encoder_write_dword(ctx->cbuf, handle);
840 return 0;
841 }
842
virgl_encoder_get_query_result(struct virgl_context * ctx,uint32_t handle,boolean wait)843 int virgl_encoder_get_query_result(struct virgl_context *ctx,
844 uint32_t handle, boolean wait)
845 {
846 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
847 virgl_encoder_write_dword(ctx->cbuf, handle);
848 virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
849 return 0;
850 }
851
virgl_encoder_render_condition(struct virgl_context * ctx,uint32_t handle,boolean condition,uint mode)852 int virgl_encoder_render_condition(struct virgl_context *ctx,
853 uint32_t handle, boolean condition,
854 uint mode)
855 {
856 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
857 virgl_encoder_write_dword(ctx->cbuf, handle);
858 virgl_encoder_write_dword(ctx->cbuf, condition);
859 virgl_encoder_write_dword(ctx->cbuf, mode);
860 return 0;
861 }
862
virgl_encoder_set_so_targets(struct virgl_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,unsigned append_bitmask)863 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
864 unsigned num_targets,
865 struct pipe_stream_output_target **targets,
866 unsigned append_bitmask)
867 {
868 uint i;
869
870 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
871 virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
872 for (i = 0; i < num_targets; i++) {
873 struct virgl_so_target *tg = (struct virgl_so_target *)targets[i];
874 virgl_encoder_write_dword(ctx->cbuf, tg->handle);
875 }
876 return 0;
877 }
878
879
virgl_encoder_set_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)880 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
881 {
882 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
883 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
884 return 0;
885 }
886
virgl_encoder_create_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)887 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
888 {
889 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
890 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
891 return 0;
892 }
893
virgl_encoder_destroy_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)894 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
895 {
896 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
897 virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
898 return 0;
899 }
900
virgl_encode_bind_shader(struct virgl_context * ctx,uint32_t handle,uint32_t type)901 int virgl_encode_bind_shader(struct virgl_context *ctx,
902 uint32_t handle, uint32_t type)
903 {
904 virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
905 virgl_encoder_write_dword(ctx->cbuf, handle);
906 virgl_encoder_write_dword(ctx->cbuf, type);
907 return 0;
908 }
909