1 /*
2  * Copyright 2014, 2015 Red Hat.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * on the rights to use, copy, modify, merge, publish, distribute, sub
8  * license, and/or sell copies of the Software, and to permit persons to whom
9  * the Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
18  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
19  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
20  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
21  * USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <stdint.h>
24 #include <assert.h>
25 #include <string.h>
26 
27 #include "util/format/u_format.h"
28 #include "util/u_memory.h"
29 #include "util/u_math.h"
30 #include "pipe/p_state.h"
31 #include "tgsi/tgsi_dump.h"
32 #include "tgsi/tgsi_parse.h"
33 
34 #include "virgl_context.h"
35 #include "virgl_encode.h"
36 #include "virtio-gpu/virgl_protocol.h"
37 #include "virgl_resource.h"
38 #include "virgl_screen.h"
39 
40 #define VIRGL_ENCODE_MAX_DWORDS MIN2(VIRGL_MAX_CMDBUF_DWORDS, VIRGL_CMD0_MAX_DWORDS)
41 
42 #define CONV_FORMAT(f) [PIPE_FORMAT_##f] = VIRGL_FORMAT_##f,
43 
44 static const enum virgl_formats virgl_formats_conv_table[PIPE_FORMAT_COUNT] = {
45    CONV_FORMAT(B8G8R8A8_UNORM)
46    CONV_FORMAT(B8G8R8X8_UNORM)
47    CONV_FORMAT(A8R8G8B8_UNORM)
48    CONV_FORMAT(X8R8G8B8_UNORM)
49    CONV_FORMAT(B5G5R5A1_UNORM)
50    CONV_FORMAT(B4G4R4A4_UNORM)
51    CONV_FORMAT(B5G6R5_UNORM)
52    CONV_FORMAT(R10G10B10A2_UNORM)
53    CONV_FORMAT(L8_UNORM)
54    CONV_FORMAT(A8_UNORM)
55    CONV_FORMAT(L8A8_UNORM)
56    CONV_FORMAT(L16_UNORM)
57    CONV_FORMAT(Z16_UNORM)
58    CONV_FORMAT(Z32_UNORM)
59    CONV_FORMAT(Z32_FLOAT)
60    CONV_FORMAT(Z24_UNORM_S8_UINT)
61    CONV_FORMAT(S8_UINT_Z24_UNORM)
62    CONV_FORMAT(Z24X8_UNORM)
63    CONV_FORMAT(X8Z24_UNORM)
64    CONV_FORMAT(S8_UINT)
65    CONV_FORMAT(R64_FLOAT)
66    CONV_FORMAT(R64G64_FLOAT)
67    CONV_FORMAT(R64G64B64_FLOAT)
68    CONV_FORMAT(R64G64B64A64_FLOAT)
69    CONV_FORMAT(R32_FLOAT)
70    CONV_FORMAT(R32G32_FLOAT)
71    CONV_FORMAT(R32G32B32_FLOAT)
72    CONV_FORMAT(R32G32B32A32_FLOAT)
73    CONV_FORMAT(R32_UNORM)
74    CONV_FORMAT(R32G32_UNORM)
75    CONV_FORMAT(R32G32B32_UNORM)
76    CONV_FORMAT(R32G32B32A32_UNORM)
77    CONV_FORMAT(R32_USCALED)
78    CONV_FORMAT(R32G32_USCALED)
79    CONV_FORMAT(R32G32B32_USCALED)
80    CONV_FORMAT(R32G32B32A32_USCALED)
81    CONV_FORMAT(R32_SNORM)
82    CONV_FORMAT(R32G32_SNORM)
83    CONV_FORMAT(R32G32B32_SNORM)
84    CONV_FORMAT(R32G32B32A32_SNORM)
85    CONV_FORMAT(R32_SSCALED)
86    CONV_FORMAT(R32G32_SSCALED)
87    CONV_FORMAT(R32G32B32_SSCALED)
88    CONV_FORMAT(R32G32B32A32_SSCALED)
89    CONV_FORMAT(R16_UNORM)
90    CONV_FORMAT(R16G16_UNORM)
91    CONV_FORMAT(R16G16B16_UNORM)
92    CONV_FORMAT(R16G16B16A16_UNORM)
93    CONV_FORMAT(R16_USCALED)
94    CONV_FORMAT(R16G16_USCALED)
95    CONV_FORMAT(R16G16B16_USCALED)
96    CONV_FORMAT(R16G16B16A16_USCALED)
97    CONV_FORMAT(R16_SNORM)
98    CONV_FORMAT(R16G16_SNORM)
99    CONV_FORMAT(R16G16B16_SNORM)
100    CONV_FORMAT(R16G16B16A16_SNORM)
101    CONV_FORMAT(R16_SSCALED)
102    CONV_FORMAT(R16G16_SSCALED)
103    CONV_FORMAT(R16G16B16_SSCALED)
104    CONV_FORMAT(R16G16B16A16_SSCALED)
105    CONV_FORMAT(R8_UNORM)
106    CONV_FORMAT(R8G8_UNORM)
107    CONV_FORMAT(R8G8B8_UNORM)
108    CONV_FORMAT(R8G8B8A8_UNORM)
109    CONV_FORMAT(R8_USCALED)
110    CONV_FORMAT(R8G8_USCALED)
111    CONV_FORMAT(R8G8B8_USCALED)
112    CONV_FORMAT(R8G8B8A8_USCALED)
113    CONV_FORMAT(R8_SNORM)
114    CONV_FORMAT(R8G8_SNORM)
115    CONV_FORMAT(R8G8B8_SNORM)
116    CONV_FORMAT(R8G8B8A8_SNORM)
117    CONV_FORMAT(R8_SSCALED)
118    CONV_FORMAT(R8G8_SSCALED)
119    CONV_FORMAT(R8G8B8_SSCALED)
120    CONV_FORMAT(R8G8B8A8_SSCALED)
121    CONV_FORMAT(R16_FLOAT)
122    CONV_FORMAT(R16G16_FLOAT)
123    CONV_FORMAT(R16G16B16_FLOAT)
124    CONV_FORMAT(R16G16B16A16_FLOAT)
125    CONV_FORMAT(L8_SRGB)
126    CONV_FORMAT(L8A8_SRGB)
127    CONV_FORMAT(R8G8B8_SRGB)
128    CONV_FORMAT(A8B8G8R8_SRGB)
129    CONV_FORMAT(X8B8G8R8_SRGB)
130    CONV_FORMAT(B8G8R8A8_SRGB)
131    CONV_FORMAT(B8G8R8X8_SRGB)
132    CONV_FORMAT(A8R8G8B8_SRGB)
133    CONV_FORMAT(X8R8G8B8_SRGB)
134    CONV_FORMAT(R8G8B8A8_SRGB)
135    CONV_FORMAT(DXT1_RGB)
136    CONV_FORMAT(DXT1_RGBA)
137    CONV_FORMAT(DXT3_RGBA)
138    CONV_FORMAT(DXT5_RGBA)
139    CONV_FORMAT(DXT1_SRGB)
140    CONV_FORMAT(DXT1_SRGBA)
141    CONV_FORMAT(DXT3_SRGBA)
142    CONV_FORMAT(DXT5_SRGBA)
143    CONV_FORMAT(RGTC1_UNORM)
144    CONV_FORMAT(RGTC1_SNORM)
145    CONV_FORMAT(RGTC2_UNORM)
146    CONV_FORMAT(RGTC2_SNORM)
147    CONV_FORMAT(A8B8G8R8_UNORM)
148    CONV_FORMAT(B5G5R5X1_UNORM)
149    CONV_FORMAT(R10G10B10A2_USCALED)
150    CONV_FORMAT(R11G11B10_FLOAT)
151    CONV_FORMAT(R9G9B9E5_FLOAT)
152    CONV_FORMAT(Z32_FLOAT_S8X24_UINT)
153    CONV_FORMAT(B10G10R10A2_UNORM)
154    CONV_FORMAT(R8G8B8X8_UNORM)
155    CONV_FORMAT(B4G4R4X4_UNORM)
156    CONV_FORMAT(X24S8_UINT)
157    CONV_FORMAT(S8X24_UINT)
158    CONV_FORMAT(X32_S8X24_UINT)
159    CONV_FORMAT(B2G3R3_UNORM)
160    CONV_FORMAT(L16A16_UNORM)
161    CONV_FORMAT(A16_UNORM)
162    CONV_FORMAT(I16_UNORM)
163    CONV_FORMAT(LATC1_UNORM)
164    CONV_FORMAT(LATC1_SNORM)
165    CONV_FORMAT(LATC2_UNORM)
166    CONV_FORMAT(LATC2_SNORM)
167    CONV_FORMAT(A8_SNORM)
168    CONV_FORMAT(L8_SNORM)
169    CONV_FORMAT(L8A8_SNORM)
170    CONV_FORMAT(A16_SNORM)
171    CONV_FORMAT(L16_SNORM)
172    CONV_FORMAT(L16A16_SNORM)
173    CONV_FORMAT(A16_FLOAT)
174    CONV_FORMAT(L16_FLOAT)
175    CONV_FORMAT(L16A16_FLOAT)
176    CONV_FORMAT(A32_FLOAT)
177    CONV_FORMAT(L32_FLOAT)
178    CONV_FORMAT(L32A32_FLOAT)
179    CONV_FORMAT(YV12)
180    CONV_FORMAT(YV16)
181    CONV_FORMAT(IYUV)
182    CONV_FORMAT(NV12)
183    CONV_FORMAT(NV21)
184    CONV_FORMAT(R8_UINT)
185    CONV_FORMAT(R8G8_UINT)
186    CONV_FORMAT(R8G8B8_UINT)
187    CONV_FORMAT(R8G8B8A8_UINT)
188    CONV_FORMAT(R8_SINT)
189    CONV_FORMAT(R8G8_SINT)
190    CONV_FORMAT(R8G8B8_SINT)
191    CONV_FORMAT(R8G8B8A8_SINT)
192    CONV_FORMAT(R16_UINT)
193    CONV_FORMAT(R16G16_UINT)
194    CONV_FORMAT(R16G16B16_UINT)
195    CONV_FORMAT(R16G16B16A16_UINT)
196    CONV_FORMAT(R16_SINT)
197    CONV_FORMAT(R16G16_SINT)
198    CONV_FORMAT(R16G16B16_SINT)
199    CONV_FORMAT(R16G16B16A16_SINT)
200    CONV_FORMAT(R32_UINT)
201    CONV_FORMAT(R32G32_UINT)
202    CONV_FORMAT(R32G32B32_UINT)
203    CONV_FORMAT(R32G32B32A32_UINT)
204    CONV_FORMAT(R32_SINT)
205    CONV_FORMAT(R32G32_SINT)
206    CONV_FORMAT(R32G32B32_SINT)
207    CONV_FORMAT(R32G32B32A32_SINT)
208    CONV_FORMAT(A8_UINT)
209    CONV_FORMAT(L8_UINT)
210    CONV_FORMAT(L8A8_UINT)
211    CONV_FORMAT(A8_SINT)
212    CONV_FORMAT(L8_SINT)
213    CONV_FORMAT(L8A8_SINT)
214    CONV_FORMAT(A16_UINT)
215    CONV_FORMAT(L16_UINT)
216    CONV_FORMAT(L16A16_UINT)
217    CONV_FORMAT(A16_SINT)
218    CONV_FORMAT(L16_SINT)
219    CONV_FORMAT(L16A16_SINT)
220    CONV_FORMAT(A32_UINT)
221    CONV_FORMAT(L32_UINT)
222    CONV_FORMAT(L32A32_UINT)
223    CONV_FORMAT(A32_SINT)
224    CONV_FORMAT(L32_SINT)
225    CONV_FORMAT(L32A32_SINT)
226    CONV_FORMAT(R10G10B10A2_SSCALED)
227    CONV_FORMAT(R10G10B10A2_SNORM)
228    CONV_FORMAT(B10G10R10A2_SNORM)
229    CONV_FORMAT(B10G10R10A2_UINT)
230    CONV_FORMAT(R8G8B8X8_SNORM)
231    CONV_FORMAT(R8G8B8X8_SRGB)
232    CONV_FORMAT(R8G8B8X8_UINT)
233    CONV_FORMAT(R8G8B8X8_SINT)
234    CONV_FORMAT(B10G10R10X2_UNORM)
235    CONV_FORMAT(R16G16B16X16_UNORM)
236    CONV_FORMAT(R16G16B16X16_SNORM)
237    CONV_FORMAT(R16G16B16X16_FLOAT)
238    CONV_FORMAT(R16G16B16X16_UINT)
239    CONV_FORMAT(R16G16B16X16_SINT)
240    CONV_FORMAT(R32G32B32X32_FLOAT)
241    CONV_FORMAT(R32G32B32X32_UINT)
242    CONV_FORMAT(R32G32B32X32_SINT)
243    CONV_FORMAT(R10G10B10A2_UINT)
244    CONV_FORMAT(BPTC_RGBA_UNORM)
245    CONV_FORMAT(BPTC_SRGBA)
246    CONV_FORMAT(BPTC_RGB_FLOAT)
247    CONV_FORMAT(BPTC_RGB_UFLOAT)
248    CONV_FORMAT(R10G10B10X2_UNORM)
249    CONV_FORMAT(A4B4G4R4_UNORM)
250    CONV_FORMAT(R8_SRGB)
251    CONV_FORMAT(ETC2_RGB8)
252    CONV_FORMAT(ETC2_SRGB8)
253    CONV_FORMAT(ETC2_RGB8A1)
254    CONV_FORMAT(ETC2_SRGB8A1)
255    CONV_FORMAT(ETC2_RGBA8)
256    CONV_FORMAT(ETC2_SRGBA8)
257    CONV_FORMAT(ETC2_R11_UNORM)
258    CONV_FORMAT(ETC2_R11_SNORM)
259    CONV_FORMAT(ETC2_RG11_UNORM)
260    CONV_FORMAT(ETC2_RG11_SNORM)
261 };
262 
pipe_to_virgl_format(enum pipe_format format)263 enum virgl_formats pipe_to_virgl_format(enum pipe_format format)
264 {
265    enum virgl_formats vformat = virgl_formats_conv_table[format];
266    if (format != PIPE_FORMAT_NONE && !vformat)
267       debug_printf("VIRGL: pipe format %s not in the format table\n", util_format_name(format));
268    return vformat;
269 }
270 
virgl_encoder_write_cmd_dword(struct virgl_context * ctx,uint32_t dword)271 static int virgl_encoder_write_cmd_dword(struct virgl_context *ctx,
272                                         uint32_t dword)
273 {
274    int len = (dword >> 16);
275 
276    if ((ctx->cbuf->cdw + len + 1) > VIRGL_MAX_CMDBUF_DWORDS)
277       ctx->base.flush(&ctx->base, NULL, 0);
278 
279    virgl_encoder_write_dword(ctx->cbuf, dword);
280    return 0;
281 }
282 
virgl_encoder_emit_resource(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_resource * res)283 static void virgl_encoder_emit_resource(struct virgl_screen *vs,
284                                         struct virgl_cmd_buf *buf,
285                                         struct virgl_resource *res)
286 {
287    struct virgl_winsys *vws = vs->vws;
288    if (res && res->hw_res)
289       vws->emit_res(vws, buf, res->hw_res, TRUE);
290    else {
291       virgl_encoder_write_dword(buf, 0);
292    }
293 }
294 
virgl_encoder_write_res(struct virgl_context * ctx,struct virgl_resource * res)295 static void virgl_encoder_write_res(struct virgl_context *ctx,
296                                     struct virgl_resource *res)
297 {
298    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
299    virgl_encoder_emit_resource(vs, ctx->cbuf, res);
300 }
301 
virgl_encode_bind_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)302 int virgl_encode_bind_object(struct virgl_context *ctx,
303                             uint32_t handle, uint32_t object)
304 {
305    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_OBJECT, object, 1));
306    virgl_encoder_write_dword(ctx->cbuf, handle);
307    return 0;
308 }
309 
virgl_encode_delete_object(struct virgl_context * ctx,uint32_t handle,uint32_t object)310 int virgl_encode_delete_object(struct virgl_context *ctx,
311                               uint32_t handle, uint32_t object)
312 {
313    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_OBJECT, object, 1));
314    virgl_encoder_write_dword(ctx->cbuf, handle);
315    return 0;
316 }
317 
virgl_encode_blend_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_blend_state * blend_state)318 int virgl_encode_blend_state(struct virgl_context *ctx,
319                             uint32_t handle,
320                             const struct pipe_blend_state *blend_state)
321 {
322    uint32_t tmp;
323    int i;
324 
325    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_BLEND, VIRGL_OBJ_BLEND_SIZE));
326    virgl_encoder_write_dword(ctx->cbuf, handle);
327 
328    tmp =
329       VIRGL_OBJ_BLEND_S0_INDEPENDENT_BLEND_ENABLE(blend_state->independent_blend_enable) |
330       VIRGL_OBJ_BLEND_S0_LOGICOP_ENABLE(blend_state->logicop_enable) |
331       VIRGL_OBJ_BLEND_S0_DITHER(blend_state->dither) |
332       VIRGL_OBJ_BLEND_S0_ALPHA_TO_COVERAGE(blend_state->alpha_to_coverage) |
333       VIRGL_OBJ_BLEND_S0_ALPHA_TO_ONE(blend_state->alpha_to_one);
334 
335    virgl_encoder_write_dword(ctx->cbuf, tmp);
336 
337    tmp = VIRGL_OBJ_BLEND_S1_LOGICOP_FUNC(blend_state->logicop_func);
338    virgl_encoder_write_dword(ctx->cbuf, tmp);
339 
340    for (i = 0; i < VIRGL_MAX_COLOR_BUFS; i++) {
341       /* We use alpha src factor to pass the advanced blend equation value
342        * to the host. By doing so, we don't have to change the protocol.
343        */
344       uint32_t alpha = (i == 0 && blend_state->advanced_blend_func)
345                         ? blend_state->advanced_blend_func
346                         : blend_state->rt[i].alpha_src_factor;
347       tmp =
348          VIRGL_OBJ_BLEND_S2_RT_BLEND_ENABLE(blend_state->rt[i].blend_enable) |
349          VIRGL_OBJ_BLEND_S2_RT_RGB_FUNC(blend_state->rt[i].rgb_func) |
350          VIRGL_OBJ_BLEND_S2_RT_RGB_SRC_FACTOR(blend_state->rt[i].rgb_src_factor) |
351          VIRGL_OBJ_BLEND_S2_RT_RGB_DST_FACTOR(blend_state->rt[i].rgb_dst_factor)|
352          VIRGL_OBJ_BLEND_S2_RT_ALPHA_FUNC(blend_state->rt[i].alpha_func) |
353          VIRGL_OBJ_BLEND_S2_RT_ALPHA_SRC_FACTOR(alpha) |
354          VIRGL_OBJ_BLEND_S2_RT_ALPHA_DST_FACTOR(blend_state->rt[i].alpha_dst_factor) |
355          VIRGL_OBJ_BLEND_S2_RT_COLORMASK(blend_state->rt[i].colormask);
356       virgl_encoder_write_dword(ctx->cbuf, tmp);
357    }
358    return 0;
359 }
360 
virgl_encode_dsa_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_depth_stencil_alpha_state * dsa_state)361 int virgl_encode_dsa_state(struct virgl_context *ctx,
362                           uint32_t handle,
363                           const struct pipe_depth_stencil_alpha_state *dsa_state)
364 {
365    uint32_t tmp;
366    int i;
367    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_DSA, VIRGL_OBJ_DSA_SIZE));
368    virgl_encoder_write_dword(ctx->cbuf, handle);
369 
370    tmp = VIRGL_OBJ_DSA_S0_DEPTH_ENABLE(dsa_state->depth.enabled) |
371       VIRGL_OBJ_DSA_S0_DEPTH_WRITEMASK(dsa_state->depth.writemask) |
372       VIRGL_OBJ_DSA_S0_DEPTH_FUNC(dsa_state->depth.func) |
373       VIRGL_OBJ_DSA_S0_ALPHA_ENABLED(dsa_state->alpha.enabled) |
374       VIRGL_OBJ_DSA_S0_ALPHA_FUNC(dsa_state->alpha.func);
375    virgl_encoder_write_dword(ctx->cbuf, tmp);
376 
377    for (i = 0; i < 2; i++) {
378       tmp = VIRGL_OBJ_DSA_S1_STENCIL_ENABLED(dsa_state->stencil[i].enabled) |
379          VIRGL_OBJ_DSA_S1_STENCIL_FUNC(dsa_state->stencil[i].func) |
380          VIRGL_OBJ_DSA_S1_STENCIL_FAIL_OP(dsa_state->stencil[i].fail_op) |
381          VIRGL_OBJ_DSA_S1_STENCIL_ZPASS_OP(dsa_state->stencil[i].zpass_op) |
382          VIRGL_OBJ_DSA_S1_STENCIL_ZFAIL_OP(dsa_state->stencil[i].zfail_op) |
383          VIRGL_OBJ_DSA_S1_STENCIL_VALUEMASK(dsa_state->stencil[i].valuemask) |
384          VIRGL_OBJ_DSA_S1_STENCIL_WRITEMASK(dsa_state->stencil[i].writemask);
385       virgl_encoder_write_dword(ctx->cbuf, tmp);
386    }
387 
388    virgl_encoder_write_dword(ctx->cbuf, fui(dsa_state->alpha.ref_value));
389    return 0;
390 }
virgl_encode_rasterizer_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_rasterizer_state * state)391 int virgl_encode_rasterizer_state(struct virgl_context *ctx,
392                                   uint32_t handle,
393                                   const struct pipe_rasterizer_state *state)
394 {
395    uint32_t tmp;
396 
397    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_RASTERIZER, VIRGL_OBJ_RS_SIZE));
398    virgl_encoder_write_dword(ctx->cbuf, handle);
399 
400    tmp = VIRGL_OBJ_RS_S0_FLATSHADE(state->flatshade) |
401       VIRGL_OBJ_RS_S0_DEPTH_CLIP(state->depth_clip_near) |
402       VIRGL_OBJ_RS_S0_CLIP_HALFZ(state->clip_halfz) |
403       VIRGL_OBJ_RS_S0_RASTERIZER_DISCARD(state->rasterizer_discard) |
404       VIRGL_OBJ_RS_S0_FLATSHADE_FIRST(state->flatshade_first) |
405       VIRGL_OBJ_RS_S0_LIGHT_TWOSIZE(state->light_twoside) |
406       VIRGL_OBJ_RS_S0_SPRITE_COORD_MODE(state->sprite_coord_mode) |
407       VIRGL_OBJ_RS_S0_POINT_QUAD_RASTERIZATION(state->point_quad_rasterization) |
408       VIRGL_OBJ_RS_S0_CULL_FACE(state->cull_face) |
409       VIRGL_OBJ_RS_S0_FILL_FRONT(state->fill_front) |
410       VIRGL_OBJ_RS_S0_FILL_BACK(state->fill_back) |
411       VIRGL_OBJ_RS_S0_SCISSOR(state->scissor) |
412       VIRGL_OBJ_RS_S0_FRONT_CCW(state->front_ccw) |
413       VIRGL_OBJ_RS_S0_CLAMP_VERTEX_COLOR(state->clamp_vertex_color) |
414       VIRGL_OBJ_RS_S0_CLAMP_FRAGMENT_COLOR(state->clamp_fragment_color) |
415       VIRGL_OBJ_RS_S0_OFFSET_LINE(state->offset_line) |
416       VIRGL_OBJ_RS_S0_OFFSET_POINT(state->offset_point) |
417       VIRGL_OBJ_RS_S0_OFFSET_TRI(state->offset_tri) |
418       VIRGL_OBJ_RS_S0_POLY_SMOOTH(state->poly_smooth) |
419       VIRGL_OBJ_RS_S0_POLY_STIPPLE_ENABLE(state->poly_stipple_enable) |
420       VIRGL_OBJ_RS_S0_POINT_SMOOTH(state->point_smooth) |
421       VIRGL_OBJ_RS_S0_POINT_SIZE_PER_VERTEX(state->point_size_per_vertex) |
422       VIRGL_OBJ_RS_S0_MULTISAMPLE(state->multisample) |
423       VIRGL_OBJ_RS_S0_LINE_SMOOTH(state->line_smooth) |
424       VIRGL_OBJ_RS_S0_LINE_STIPPLE_ENABLE(state->line_stipple_enable) |
425       VIRGL_OBJ_RS_S0_LINE_LAST_PIXEL(state->line_last_pixel) |
426       VIRGL_OBJ_RS_S0_HALF_PIXEL_CENTER(state->half_pixel_center) |
427       VIRGL_OBJ_RS_S0_BOTTOM_EDGE_RULE(state->bottom_edge_rule) |
428       VIRGL_OBJ_RS_S0_FORCE_PERSAMPLE_INTERP(state->force_persample_interp);
429 
430    virgl_encoder_write_dword(ctx->cbuf, tmp); /* S0 */
431    virgl_encoder_write_dword(ctx->cbuf, fui(state->point_size)); /* S1 */
432    virgl_encoder_write_dword(ctx->cbuf, state->sprite_coord_enable); /* S2 */
433    tmp = VIRGL_OBJ_RS_S3_LINE_STIPPLE_PATTERN(state->line_stipple_pattern) |
434       VIRGL_OBJ_RS_S3_LINE_STIPPLE_FACTOR(state->line_stipple_factor) |
435       VIRGL_OBJ_RS_S3_CLIP_PLANE_ENABLE(state->clip_plane_enable);
436    virgl_encoder_write_dword(ctx->cbuf, tmp); /* S3 */
437    virgl_encoder_write_dword(ctx->cbuf, fui(state->line_width)); /* S4 */
438    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_units)); /* S5 */
439    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_scale)); /* S6 */
440    virgl_encoder_write_dword(ctx->cbuf, fui(state->offset_clamp)); /* S7 */
441    return 0;
442 }
443 
virgl_emit_shader_header(struct virgl_context * ctx,uint32_t handle,uint32_t len,uint32_t type,uint32_t offlen,uint32_t num_tokens)444 static void virgl_emit_shader_header(struct virgl_context *ctx,
445                                      uint32_t handle, uint32_t len,
446                                      uint32_t type, uint32_t offlen,
447                                      uint32_t num_tokens)
448 {
449    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SHADER, len));
450    virgl_encoder_write_dword(ctx->cbuf, handle);
451    virgl_encoder_write_dword(ctx->cbuf, type);
452    virgl_encoder_write_dword(ctx->cbuf, offlen);
453    virgl_encoder_write_dword(ctx->cbuf, num_tokens);
454 }
455 
virgl_emit_shader_streamout(struct virgl_context * ctx,const struct pipe_stream_output_info * so_info)456 static void virgl_emit_shader_streamout(struct virgl_context *ctx,
457                                         const struct pipe_stream_output_info *so_info)
458 {
459    int num_outputs = 0;
460    int i;
461    uint32_t tmp;
462 
463    if (so_info)
464       num_outputs = so_info->num_outputs;
465 
466    virgl_encoder_write_dword(ctx->cbuf, num_outputs);
467    if (num_outputs) {
468       for (i = 0; i < 4; i++)
469          virgl_encoder_write_dword(ctx->cbuf, so_info->stride[i]);
470 
471       for (i = 0; i < so_info->num_outputs; i++) {
472          tmp =
473            VIRGL_OBJ_SHADER_SO_OUTPUT_REGISTER_INDEX(so_info->output[i].register_index) |
474            VIRGL_OBJ_SHADER_SO_OUTPUT_START_COMPONENT(so_info->output[i].start_component) |
475            VIRGL_OBJ_SHADER_SO_OUTPUT_NUM_COMPONENTS(so_info->output[i].num_components) |
476            VIRGL_OBJ_SHADER_SO_OUTPUT_BUFFER(so_info->output[i].output_buffer) |
477            VIRGL_OBJ_SHADER_SO_OUTPUT_DST_OFFSET(so_info->output[i].dst_offset);
478          virgl_encoder_write_dword(ctx->cbuf, tmp);
479          virgl_encoder_write_dword(ctx->cbuf, so_info->output[i].stream);
480       }
481    }
482 }
483 
virgl_encode_shader_state(struct virgl_context * ctx,uint32_t handle,uint32_t type,const struct pipe_stream_output_info * so_info,uint32_t cs_req_local_mem,const struct tgsi_token * tokens)484 int virgl_encode_shader_state(struct virgl_context *ctx,
485                               uint32_t handle,
486                               uint32_t type,
487                               const struct pipe_stream_output_info *so_info,
488                               uint32_t cs_req_local_mem,
489                               const struct tgsi_token *tokens)
490 {
491    char *str, *sptr;
492    uint32_t shader_len, len;
493    bool bret;
494    int num_tokens = tgsi_num_tokens(tokens);
495    int str_total_size = 65536;
496    int retry_size = 1;
497    uint32_t left_bytes, base_hdr_size, strm_hdr_size, thispass;
498    bool first_pass;
499    str = CALLOC(1, str_total_size);
500    if (!str)
501       return -1;
502 
503    do {
504       int old_size;
505 
506       bret = tgsi_dump_str(tokens, TGSI_DUMP_FLOAT_AS_HEX, str, str_total_size);
507       if (bret == false) {
508          if (virgl_debug & VIRGL_DEBUG_VERBOSE)
509             debug_printf("Failed to translate shader in available space - trying again\n");
510          old_size = str_total_size;
511          str_total_size = 65536 * retry_size;
512          retry_size *= 2;
513          str = REALLOC(str, old_size, str_total_size);
514          if (!str)
515             return -1;
516       }
517    } while (bret == false && retry_size < 1024);
518 
519    if (bret == false)
520       return -1;
521 
522    if (virgl_debug & VIRGL_DEBUG_TGSI)
523       debug_printf("TGSI:\n---8<---\n%s\n---8<---\n", str);
524 
525    shader_len = strlen(str) + 1;
526 
527    left_bytes = shader_len;
528 
529    base_hdr_size = 5;
530    strm_hdr_size = so_info->num_outputs ? so_info->num_outputs * 2 + 4 : 0;
531    first_pass = true;
532    sptr = str;
533    while (left_bytes) {
534       uint32_t length, offlen;
535       int hdr_len = base_hdr_size + (first_pass ? strm_hdr_size : 0);
536       if (ctx->cbuf->cdw + hdr_len + 1 >= VIRGL_ENCODE_MAX_DWORDS)
537          ctx->base.flush(&ctx->base, NULL, 0);
538 
539       thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - hdr_len - 1) * 4;
540 
541       length = MIN2(thispass, left_bytes);
542       len = ((length + 3) / 4) + hdr_len;
543 
544       if (first_pass)
545          offlen = VIRGL_OBJ_SHADER_OFFSET_VAL(shader_len);
546       else
547          offlen = VIRGL_OBJ_SHADER_OFFSET_VAL((uintptr_t)sptr - (uintptr_t)str) | VIRGL_OBJ_SHADER_OFFSET_CONT;
548 
549       virgl_emit_shader_header(ctx, handle, len, type, offlen, num_tokens);
550 
551       if (type == PIPE_SHADER_COMPUTE)
552          virgl_encoder_write_dword(ctx->cbuf, cs_req_local_mem);
553       else
554          virgl_emit_shader_streamout(ctx, first_pass ? so_info : NULL);
555 
556       virgl_encoder_write_block(ctx->cbuf, (uint8_t *)sptr, length);
557 
558       sptr += length;
559       first_pass = false;
560       left_bytes -= length;
561    }
562 
563    FREE(str);
564    return 0;
565 }
566 
567 
virgl_encode_clear(struct virgl_context * ctx,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)568 int virgl_encode_clear(struct virgl_context *ctx,
569                       unsigned buffers,
570                       const union pipe_color_union *color,
571                       double depth, unsigned stencil)
572 {
573    int i;
574    uint64_t qword;
575 
576    STATIC_ASSERT(sizeof(qword) == sizeof(depth));
577    memcpy(&qword, &depth, sizeof(qword));
578 
579    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR, 0, VIRGL_OBJ_CLEAR_SIZE));
580    virgl_encoder_write_dword(ctx->cbuf, buffers);
581    for (i = 0; i < 4; i++)
582       virgl_encoder_write_dword(ctx->cbuf, color->ui[i]);
583    virgl_encoder_write_qword(ctx->cbuf, qword);
584    virgl_encoder_write_dword(ctx->cbuf, stencil);
585    return 0;
586 }
587 
virgl_encode_clear_texture(struct virgl_context * ctx,struct virgl_resource * res,unsigned int level,const struct pipe_box * box,const void * data)588 int virgl_encode_clear_texture(struct virgl_context *ctx,
589                                struct virgl_resource *res,
590                                unsigned int level,
591                                const struct pipe_box *box,
592                                const void *data)
593 {
594    const struct util_format_description *desc = util_format_description(res->u.b.format);
595    unsigned block_bits = desc->block.bits;
596    uint32_t arr[4] = {0};
597    /* The spec describe <data> as a pointer to an array of between one
598     * and four components of texel data that will be used as the source
599     * for the constant fill value.
600     * Here, we are just copying the memory into <arr>. We do not try to
601     * re-create the data array. The host part will take care of interpreting
602     * the memory and applying the correct format to the clear call.
603     */
604    memcpy(&arr, data, block_bits / 8);
605 
606    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CLEAR_TEXTURE, 0, VIRGL_CLEAR_TEXTURE_SIZE));
607    virgl_encoder_write_res(ctx, res);
608    virgl_encoder_write_dword(ctx->cbuf, level);
609    virgl_encoder_write_dword(ctx->cbuf, box->x);
610    virgl_encoder_write_dword(ctx->cbuf, box->y);
611    virgl_encoder_write_dword(ctx->cbuf, box->z);
612    virgl_encoder_write_dword(ctx->cbuf, box->width);
613    virgl_encoder_write_dword(ctx->cbuf, box->height);
614    virgl_encoder_write_dword(ctx->cbuf, box->depth);
615    for (unsigned i = 0; i < 4; i++)
616       virgl_encoder_write_dword(ctx->cbuf, arr[i]);
617    return 0;
618 }
619 
virgl_encoder_set_framebuffer_state(struct virgl_context * ctx,const struct pipe_framebuffer_state * state)620 int virgl_encoder_set_framebuffer_state(struct virgl_context *ctx,
621                                        const struct pipe_framebuffer_state *state)
622 {
623    struct virgl_surface *zsurf = virgl_surface(state->zsbuf);
624    int i;
625 
626    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE, 0, VIRGL_SET_FRAMEBUFFER_STATE_SIZE(state->nr_cbufs)));
627    virgl_encoder_write_dword(ctx->cbuf, state->nr_cbufs);
628    virgl_encoder_write_dword(ctx->cbuf, zsurf ? zsurf->handle : 0);
629    for (i = 0; i < state->nr_cbufs; i++) {
630       struct virgl_surface *surf = virgl_surface(state->cbufs[i]);
631       virgl_encoder_write_dword(ctx->cbuf, surf ? surf->handle : 0);
632    }
633 
634    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
635    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_FB_NO_ATTACH) {
636       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_FRAMEBUFFER_STATE_NO_ATTACH, 0, VIRGL_SET_FRAMEBUFFER_STATE_NO_ATTACH_SIZE));
637       virgl_encoder_write_dword(ctx->cbuf, state->width | (state->height << 16));
638       virgl_encoder_write_dword(ctx->cbuf, state->layers | (state->samples << 16));
639    }
640    return 0;
641 }
642 
virgl_encoder_set_viewport_states(struct virgl_context * ctx,int start_slot,int num_viewports,const struct pipe_viewport_state * states)643 int virgl_encoder_set_viewport_states(struct virgl_context *ctx,
644                                       int start_slot,
645                                       int num_viewports,
646                                       const struct pipe_viewport_state *states)
647 {
648    int i,v;
649    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VIEWPORT_STATE, 0, VIRGL_SET_VIEWPORT_STATE_SIZE(num_viewports)));
650    virgl_encoder_write_dword(ctx->cbuf, start_slot);
651    for (v = 0; v < num_viewports; v++) {
652       for (i = 0; i < 3; i++)
653          virgl_encoder_write_dword(ctx->cbuf, fui(states[v].scale[i]));
654       for (i = 0; i < 3; i++)
655          virgl_encoder_write_dword(ctx->cbuf, fui(states[v].translate[i]));
656    }
657    return 0;
658 }
659 
virgl_encoder_create_vertex_elements(struct virgl_context * ctx,uint32_t handle,unsigned num_elements,const struct pipe_vertex_element * element)660 int virgl_encoder_create_vertex_elements(struct virgl_context *ctx,
661                                         uint32_t handle,
662                                         unsigned num_elements,
663                                         const struct pipe_vertex_element *element)
664 {
665    int i;
666    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_VERTEX_ELEMENTS, VIRGL_OBJ_VERTEX_ELEMENTS_SIZE(num_elements)));
667    virgl_encoder_write_dword(ctx->cbuf, handle);
668    for (i = 0; i < num_elements; i++) {
669       virgl_encoder_write_dword(ctx->cbuf, element[i].src_offset);
670       virgl_encoder_write_dword(ctx->cbuf, element[i].instance_divisor);
671       virgl_encoder_write_dword(ctx->cbuf, element[i].vertex_buffer_index);
672       virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(element[i].src_format));
673    }
674    return 0;
675 }
676 
virgl_encoder_set_vertex_buffers(struct virgl_context * ctx,unsigned num_buffers,const struct pipe_vertex_buffer * buffers)677 int virgl_encoder_set_vertex_buffers(struct virgl_context *ctx,
678                                     unsigned num_buffers,
679                                     const struct pipe_vertex_buffer *buffers)
680 {
681    int i;
682    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_VERTEX_BUFFERS, 0, VIRGL_SET_VERTEX_BUFFERS_SIZE(num_buffers)));
683    for (i = 0; i < num_buffers; i++) {
684       struct virgl_resource *res = virgl_resource(buffers[i].buffer.resource);
685       virgl_encoder_write_dword(ctx->cbuf, buffers[i].stride);
686       virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
687       virgl_encoder_write_res(ctx, res);
688    }
689    return 0;
690 }
691 
virgl_encoder_set_index_buffer(struct virgl_context * ctx,const struct virgl_indexbuf * ib)692 int virgl_encoder_set_index_buffer(struct virgl_context *ctx,
693                                   const struct virgl_indexbuf *ib)
694 {
695    int length = VIRGL_SET_INDEX_BUFFER_SIZE(ib);
696    struct virgl_resource *res = NULL;
697    if (ib)
698       res = virgl_resource(ib->buffer);
699 
700    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_INDEX_BUFFER, 0, length));
701    virgl_encoder_write_res(ctx, res);
702    if (ib) {
703       virgl_encoder_write_dword(ctx->cbuf, ib->index_size);
704       virgl_encoder_write_dword(ctx->cbuf, ib->offset);
705    }
706    return 0;
707 }
708 
virgl_encoder_draw_vbo(struct virgl_context * ctx,const struct pipe_draw_info * info)709 int virgl_encoder_draw_vbo(struct virgl_context *ctx,
710                           const struct pipe_draw_info *info)
711 {
712    uint32_t length = VIRGL_DRAW_VBO_SIZE;
713    if (info->mode == PIPE_PRIM_PATCHES)
714       length = VIRGL_DRAW_VBO_SIZE_TESS;
715    if (info->indirect)
716       length = VIRGL_DRAW_VBO_SIZE_INDIRECT;
717    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DRAW_VBO, 0, length));
718    virgl_encoder_write_dword(ctx->cbuf, info->start);
719    virgl_encoder_write_dword(ctx->cbuf, info->count);
720    virgl_encoder_write_dword(ctx->cbuf, info->mode);
721    virgl_encoder_write_dword(ctx->cbuf, !!info->index_size);
722    virgl_encoder_write_dword(ctx->cbuf, info->instance_count);
723    virgl_encoder_write_dword(ctx->cbuf, info->index_bias);
724    virgl_encoder_write_dword(ctx->cbuf, info->start_instance);
725    virgl_encoder_write_dword(ctx->cbuf, info->primitive_restart);
726    virgl_encoder_write_dword(ctx->cbuf, info->restart_index);
727    virgl_encoder_write_dword(ctx->cbuf, info->min_index);
728    virgl_encoder_write_dword(ctx->cbuf, info->max_index);
729    if (info->count_from_stream_output)
730       virgl_encoder_write_dword(ctx->cbuf, info->count_from_stream_output->buffer_size);
731    else
732       virgl_encoder_write_dword(ctx->cbuf, 0);
733    if (length >= VIRGL_DRAW_VBO_SIZE_TESS) {
734       virgl_encoder_write_dword(ctx->cbuf, info->vertices_per_patch); /* vertices per patch */
735       virgl_encoder_write_dword(ctx->cbuf, info->drawid); /* drawid */
736    }
737    if (length == VIRGL_DRAW_VBO_SIZE_INDIRECT) {
738       virgl_encoder_write_res(ctx, virgl_resource(info->indirect->buffer));
739       virgl_encoder_write_dword(ctx->cbuf, info->indirect->offset);
740       virgl_encoder_write_dword(ctx->cbuf, info->indirect->stride); /* indirect stride */
741       virgl_encoder_write_dword(ctx->cbuf, info->indirect->draw_count); /* indirect draw count */
742       virgl_encoder_write_dword(ctx->cbuf, info->indirect->indirect_draw_count_offset); /* indirect draw count offset */
743       if (info->indirect->indirect_draw_count)
744          virgl_encoder_write_res(ctx, virgl_resource(info->indirect->indirect_draw_count));
745       else
746          virgl_encoder_write_dword(ctx->cbuf, 0); /* indirect draw count handle */
747    }
748    return 0;
749 }
750 
virgl_encoder_create_surface(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_surface * templat)751 int virgl_encoder_create_surface(struct virgl_context *ctx,
752                                 uint32_t handle,
753                                 struct virgl_resource *res,
754                                 const struct pipe_surface *templat)
755 {
756    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SURFACE, VIRGL_OBJ_SURFACE_SIZE));
757    virgl_encoder_write_dword(ctx->cbuf, handle);
758    virgl_encoder_write_res(ctx, res);
759    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(templat->format));
760 
761    assert(templat->texture->target != PIPE_BUFFER);
762    virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.level);
763    virgl_encoder_write_dword(ctx->cbuf, templat->u.tex.first_layer | (templat->u.tex.last_layer << 16));
764 
765    return 0;
766 }
767 
virgl_encoder_create_so_target(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,unsigned buffer_offset,unsigned buffer_size)768 int virgl_encoder_create_so_target(struct virgl_context *ctx,
769                                   uint32_t handle,
770                                   struct virgl_resource *res,
771                                   unsigned buffer_offset,
772                                   unsigned buffer_size)
773 {
774    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_STREAMOUT_TARGET, VIRGL_OBJ_STREAMOUT_SIZE));
775    virgl_encoder_write_dword(ctx->cbuf, handle);
776    virgl_encoder_write_res(ctx, res);
777    virgl_encoder_write_dword(ctx->cbuf, buffer_offset);
778    virgl_encoder_write_dword(ctx->cbuf, buffer_size);
779    return 0;
780 }
781 
782 enum virgl_transfer3d_encode_stride {
783    /* The stride and layer_stride are explicitly specified in the command. */
784    virgl_transfer3d_explicit_stride,
785    /* The stride and layer_stride are inferred by the host. In this case, the
786     * host will use the image stride and layer_stride for the specified level.
787     */
788    virgl_transfer3d_host_inferred_stride,
789 };
790 
virgl_encoder_transfer3d_common(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * xfer,enum virgl_transfer3d_encode_stride encode_stride)791 static void virgl_encoder_transfer3d_common(struct virgl_screen *vs,
792                                             struct virgl_cmd_buf *buf,
793                                             struct virgl_transfer *xfer,
794                                             enum virgl_transfer3d_encode_stride encode_stride)
795 
796 {
797    struct pipe_transfer *transfer = &xfer->base;
798    unsigned stride;
799    unsigned layer_stride;
800 
801    if (encode_stride == virgl_transfer3d_explicit_stride) {
802       stride = transfer->stride;
803       layer_stride = transfer->layer_stride;
804    } else if (encode_stride == virgl_transfer3d_host_inferred_stride) {
805       stride = 0;
806       layer_stride = 0;
807    } else {
808       assert(!"Invalid virgl_transfer3d_encode_stride value");
809    }
810 
811    /* We cannot use virgl_encoder_emit_resource with transfer->resource here
812     * because transfer->resource might have a different virgl_hw_res than what
813     * this transfer targets, which is saved in xfer->hw_res.
814     */
815    vs->vws->emit_res(vs->vws, buf, xfer->hw_res, TRUE);
816    virgl_encoder_write_dword(buf, transfer->level);
817    virgl_encoder_write_dword(buf, transfer->usage);
818    virgl_encoder_write_dword(buf, stride);
819    virgl_encoder_write_dword(buf, layer_stride);
820    virgl_encoder_write_dword(buf, transfer->box.x);
821    virgl_encoder_write_dword(buf, transfer->box.y);
822    virgl_encoder_write_dword(buf, transfer->box.z);
823    virgl_encoder_write_dword(buf, transfer->box.width);
824    virgl_encoder_write_dword(buf, transfer->box.height);
825    virgl_encoder_write_dword(buf, transfer->box.depth);
826 }
827 
virgl_encoder_inline_write(struct virgl_context * ctx,struct virgl_resource * res,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,unsigned layer_stride)828 int virgl_encoder_inline_write(struct virgl_context *ctx,
829                               struct virgl_resource *res,
830                               unsigned level, unsigned usage,
831                               const struct pipe_box *box,
832                               const void *data, unsigned stride,
833                               unsigned layer_stride)
834 {
835    uint32_t size = (stride ? stride : box->width) * box->height;
836    uint32_t length, thispass, left_bytes;
837    struct virgl_transfer transfer;
838    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
839 
840    transfer.base.resource = &res->u.b;
841    transfer.hw_res = res->hw_res;
842    transfer.base.level = level;
843    transfer.base.usage = usage;
844    transfer.base.box = *box;
845 
846    length = 11 + (size + 3) / 4;
847    if ((ctx->cbuf->cdw + length + 1) > VIRGL_ENCODE_MAX_DWORDS) {
848       if (box->height > 1 || box->depth > 1) {
849          debug_printf("inline transfer failed due to multi dimensions and too large\n");
850          assert(0);
851       }
852    }
853 
854    left_bytes = size;
855    while (left_bytes) {
856       if (ctx->cbuf->cdw + 12 >= VIRGL_ENCODE_MAX_DWORDS)
857          ctx->base.flush(&ctx->base, NULL, 0);
858 
859       thispass = (VIRGL_ENCODE_MAX_DWORDS - ctx->cbuf->cdw - 12) * 4;
860 
861       length = MIN2(thispass, left_bytes);
862 
863       transfer.base.box.width = length;
864       virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_INLINE_WRITE, 0, ((length + 3) / 4) + 11));
865       virgl_encoder_transfer3d_common(vs, ctx->cbuf, &transfer,
866                                       virgl_transfer3d_host_inferred_stride);
867       virgl_encoder_write_block(ctx->cbuf, data, length);
868       left_bytes -= length;
869       transfer.base.box.x += length;
870       data += length;
871    }
872    return 0;
873 }
874 
virgl_encoder_flush_frontbuffer(struct virgl_context * ctx,struct virgl_resource * res)875 int virgl_encoder_flush_frontbuffer(struct virgl_context *ctx,
876                                    struct virgl_resource *res)
877 {
878 //   virgl_encoder_write_dword(ctx->cbuf, VIRGL_CMD0(VIRGL_CCMD_FLUSH_FRONTUBFFER, 0, 1));
879 //   virgl_encoder_write_dword(ctx->cbuf, res_handle);
880    return 0;
881 }
882 
virgl_encode_sampler_state(struct virgl_context * ctx,uint32_t handle,const struct pipe_sampler_state * state)883 int virgl_encode_sampler_state(struct virgl_context *ctx,
884                               uint32_t handle,
885                               const struct pipe_sampler_state *state)
886 {
887    uint32_t tmp;
888    int i;
889    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_STATE, VIRGL_OBJ_SAMPLER_STATE_SIZE));
890    virgl_encoder_write_dword(ctx->cbuf, handle);
891 
892    tmp = VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_S(state->wrap_s) |
893       VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_T(state->wrap_t) |
894       VIRGL_OBJ_SAMPLE_STATE_S0_WRAP_R(state->wrap_r) |
895       VIRGL_OBJ_SAMPLE_STATE_S0_MIN_IMG_FILTER(state->min_img_filter) |
896       VIRGL_OBJ_SAMPLE_STATE_S0_MIN_MIP_FILTER(state->min_mip_filter) |
897       VIRGL_OBJ_SAMPLE_STATE_S0_MAG_IMG_FILTER(state->mag_img_filter) |
898       VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_MODE(state->compare_mode) |
899       VIRGL_OBJ_SAMPLE_STATE_S0_COMPARE_FUNC(state->compare_func) |
900       VIRGL_OBJ_SAMPLE_STATE_S0_SEAMLESS_CUBE_MAP(state->seamless_cube_map);
901 
902    virgl_encoder_write_dword(ctx->cbuf, tmp);
903    virgl_encoder_write_dword(ctx->cbuf, fui(state->lod_bias));
904    virgl_encoder_write_dword(ctx->cbuf, fui(state->min_lod));
905    virgl_encoder_write_dword(ctx->cbuf, fui(state->max_lod));
906    for (i = 0; i <  4; i++)
907       virgl_encoder_write_dword(ctx->cbuf, state->border_color.ui[i]);
908    return 0;
909 }
910 
911 
virgl_encode_sampler_view(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,const struct pipe_sampler_view * state)912 int virgl_encode_sampler_view(struct virgl_context *ctx,
913                              uint32_t handle,
914                              struct virgl_resource *res,
915                              const struct pipe_sampler_view *state)
916 {
917    unsigned elem_size = util_format_get_blocksize(state->format);
918    struct virgl_screen *rs = virgl_screen(ctx->base.screen);
919    uint32_t tmp;
920    uint32_t dword_fmt_target = pipe_to_virgl_format(state->format);
921    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_SAMPLER_VIEW, VIRGL_OBJ_SAMPLER_VIEW_SIZE));
922    virgl_encoder_write_dword(ctx->cbuf, handle);
923    virgl_encoder_write_res(ctx, res);
924    if (rs->caps.caps.v2.capability_bits & VIRGL_CAP_TEXTURE_VIEW)
925      dword_fmt_target |= (state->target << 24);
926    virgl_encoder_write_dword(ctx->cbuf, dword_fmt_target);
927    if (res->u.b.target == PIPE_BUFFER) {
928       virgl_encoder_write_dword(ctx->cbuf, state->u.buf.offset / elem_size);
929       virgl_encoder_write_dword(ctx->cbuf, (state->u.buf.offset + state->u.buf.size) / elem_size - 1);
930    } else {
931       if (res->metadata.plane) {
932          debug_assert(state->u.tex.first_layer == 0 && state->u.tex.last_layer == 0);
933          virgl_encoder_write_dword(ctx->cbuf, res->metadata.plane);
934       } else {
935          virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_layer | state->u.tex.last_layer << 16);
936       }
937       virgl_encoder_write_dword(ctx->cbuf, state->u.tex.first_level | state->u.tex.last_level << 8);
938    }
939    tmp = VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_R(state->swizzle_r) |
940       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_G(state->swizzle_g) |
941       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_B(state->swizzle_b) |
942       VIRGL_OBJ_SAMPLER_VIEW_SWIZZLE_A(state->swizzle_a);
943    virgl_encoder_write_dword(ctx->cbuf, tmp);
944    return 0;
945 }
946 
virgl_encode_set_sampler_views(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_views,struct virgl_sampler_view ** views)947 int virgl_encode_set_sampler_views(struct virgl_context *ctx,
948                                   uint32_t shader_type,
949                                   uint32_t start_slot,
950                                   uint32_t num_views,
951                                   struct virgl_sampler_view **views)
952 {
953    int i;
954    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLER_VIEWS, 0, VIRGL_SET_SAMPLER_VIEWS_SIZE(num_views)));
955    virgl_encoder_write_dword(ctx->cbuf, shader_type);
956    virgl_encoder_write_dword(ctx->cbuf, start_slot);
957    for (i = 0; i < num_views; i++) {
958       uint32_t handle = views[i] ? views[i]->handle : 0;
959       virgl_encoder_write_dword(ctx->cbuf, handle);
960    }
961    return 0;
962 }
963 
virgl_encode_bind_sampler_states(struct virgl_context * ctx,uint32_t shader_type,uint32_t start_slot,uint32_t num_handles,uint32_t * handles)964 int virgl_encode_bind_sampler_states(struct virgl_context *ctx,
965                                     uint32_t shader_type,
966                                     uint32_t start_slot,
967                                     uint32_t num_handles,
968                                     uint32_t *handles)
969 {
970    int i;
971    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SAMPLER_STATES, 0, VIRGL_BIND_SAMPLER_STATES(num_handles)));
972    virgl_encoder_write_dword(ctx->cbuf, shader_type);
973    virgl_encoder_write_dword(ctx->cbuf, start_slot);
974    for (i = 0; i < num_handles; i++)
975       virgl_encoder_write_dword(ctx->cbuf, handles[i]);
976    return 0;
977 }
978 
virgl_encoder_write_constant_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t size,const void * data)979 int virgl_encoder_write_constant_buffer(struct virgl_context *ctx,
980                                        uint32_t shader,
981                                        uint32_t index,
982                                        uint32_t size,
983                                        const void *data)
984 {
985    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CONSTANT_BUFFER, 0, size + 2));
986    virgl_encoder_write_dword(ctx->cbuf, shader);
987    virgl_encoder_write_dword(ctx->cbuf, index);
988    if (data)
989       virgl_encoder_write_block(ctx->cbuf, data, size * 4);
990    return 0;
991 }
992 
virgl_encoder_set_uniform_buffer(struct virgl_context * ctx,uint32_t shader,uint32_t index,uint32_t offset,uint32_t length,struct virgl_resource * res)993 int virgl_encoder_set_uniform_buffer(struct virgl_context *ctx,
994                                      uint32_t shader,
995                                      uint32_t index,
996                                      uint32_t offset,
997                                      uint32_t length,
998                                      struct virgl_resource *res)
999 {
1000    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_UNIFORM_BUFFER, 0, VIRGL_SET_UNIFORM_BUFFER_SIZE));
1001    virgl_encoder_write_dword(ctx->cbuf, shader);
1002    virgl_encoder_write_dword(ctx->cbuf, index);
1003    virgl_encoder_write_dword(ctx->cbuf, offset);
1004    virgl_encoder_write_dword(ctx->cbuf, length);
1005    virgl_encoder_write_res(ctx, res);
1006    return 0;
1007 }
1008 
1009 
virgl_encoder_set_stencil_ref(struct virgl_context * ctx,const struct pipe_stencil_ref * ref)1010 int virgl_encoder_set_stencil_ref(struct virgl_context *ctx,
1011                                  const struct pipe_stencil_ref *ref)
1012 {
1013    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STENCIL_REF, 0, VIRGL_SET_STENCIL_REF_SIZE));
1014    virgl_encoder_write_dword(ctx->cbuf, VIRGL_STENCIL_REF_VAL(ref->ref_value[0] , (ref->ref_value[1])));
1015    return 0;
1016 }
1017 
virgl_encoder_set_blend_color(struct virgl_context * ctx,const struct pipe_blend_color * color)1018 int virgl_encoder_set_blend_color(struct virgl_context *ctx,
1019                                  const struct pipe_blend_color *color)
1020 {
1021    int i;
1022    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_BLEND_COLOR, 0, VIRGL_SET_BLEND_COLOR_SIZE));
1023    for (i = 0; i < 4; i++)
1024       virgl_encoder_write_dword(ctx->cbuf, fui(color->color[i]));
1025    return 0;
1026 }
1027 
virgl_encoder_set_scissor_state(struct virgl_context * ctx,unsigned start_slot,int num_scissors,const struct pipe_scissor_state * ss)1028 int virgl_encoder_set_scissor_state(struct virgl_context *ctx,
1029                                     unsigned start_slot,
1030                                     int num_scissors,
1031                                     const struct pipe_scissor_state *ss)
1032 {
1033    int i;
1034    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SCISSOR_STATE, 0, VIRGL_SET_SCISSOR_STATE_SIZE(num_scissors)));
1035    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1036    for (i = 0; i < num_scissors; i++) {
1037       virgl_encoder_write_dword(ctx->cbuf, (ss[i].minx | ss[i].miny << 16));
1038       virgl_encoder_write_dword(ctx->cbuf, (ss[i].maxx | ss[i].maxy << 16));
1039    }
1040    return 0;
1041 }
1042 
virgl_encoder_set_polygon_stipple(struct virgl_context * ctx,const struct pipe_poly_stipple * ps)1043 void virgl_encoder_set_polygon_stipple(struct virgl_context *ctx,
1044                                       const struct pipe_poly_stipple *ps)
1045 {
1046    int i;
1047    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_POLYGON_STIPPLE, 0, VIRGL_POLYGON_STIPPLE_SIZE));
1048    for (i = 0; i < VIRGL_POLYGON_STIPPLE_SIZE; i++) {
1049       virgl_encoder_write_dword(ctx->cbuf, ps->stipple[i]);
1050    }
1051 }
1052 
virgl_encoder_set_sample_mask(struct virgl_context * ctx,unsigned sample_mask)1053 void virgl_encoder_set_sample_mask(struct virgl_context *ctx,
1054                                   unsigned sample_mask)
1055 {
1056    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SAMPLE_MASK, 0, VIRGL_SET_SAMPLE_MASK_SIZE));
1057    virgl_encoder_write_dword(ctx->cbuf, sample_mask);
1058 }
1059 
virgl_encoder_set_min_samples(struct virgl_context * ctx,unsigned min_samples)1060 void virgl_encoder_set_min_samples(struct virgl_context *ctx,
1061                                   unsigned min_samples)
1062 {
1063    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_MIN_SAMPLES, 0, VIRGL_SET_MIN_SAMPLES_SIZE));
1064    virgl_encoder_write_dword(ctx->cbuf, min_samples);
1065 }
1066 
virgl_encoder_set_clip_state(struct virgl_context * ctx,const struct pipe_clip_state * clip)1067 void virgl_encoder_set_clip_state(struct virgl_context *ctx,
1068                                  const struct pipe_clip_state *clip)
1069 {
1070    int i, j;
1071    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_CLIP_STATE, 0, VIRGL_SET_CLIP_STATE_SIZE));
1072    for (i = 0; i < VIRGL_MAX_CLIP_PLANES; i++) {
1073       for (j = 0; j < 4; j++) {
1074          virgl_encoder_write_dword(ctx->cbuf, fui(clip->ucp[i][j]));
1075       }
1076    }
1077 }
1078 
virgl_encode_resource_copy_region(struct virgl_context * ctx,struct virgl_resource * dst_res,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct virgl_resource * src_res,unsigned src_level,const struct pipe_box * src_box)1079 int virgl_encode_resource_copy_region(struct virgl_context *ctx,
1080                                      struct virgl_resource *dst_res,
1081                                      unsigned dst_level,
1082                                      unsigned dstx, unsigned dsty, unsigned dstz,
1083                                      struct virgl_resource *src_res,
1084                                      unsigned src_level,
1085                                      const struct pipe_box *src_box)
1086 {
1087    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_RESOURCE_COPY_REGION, 0, VIRGL_CMD_RESOURCE_COPY_REGION_SIZE));
1088    virgl_encoder_write_res(ctx, dst_res);
1089    virgl_encoder_write_dword(ctx->cbuf, dst_level);
1090    virgl_encoder_write_dword(ctx->cbuf, dstx);
1091    virgl_encoder_write_dword(ctx->cbuf, dsty);
1092    virgl_encoder_write_dword(ctx->cbuf, dstz);
1093    virgl_encoder_write_res(ctx, src_res);
1094    virgl_encoder_write_dword(ctx->cbuf, src_level);
1095    virgl_encoder_write_dword(ctx->cbuf, src_box->x);
1096    virgl_encoder_write_dword(ctx->cbuf, src_box->y);
1097    virgl_encoder_write_dword(ctx->cbuf, src_box->z);
1098    virgl_encoder_write_dword(ctx->cbuf, src_box->width);
1099    virgl_encoder_write_dword(ctx->cbuf, src_box->height);
1100    virgl_encoder_write_dword(ctx->cbuf, src_box->depth);
1101    return 0;
1102 }
1103 
virgl_encode_blit(struct virgl_context * ctx,struct virgl_resource * dst_res,struct virgl_resource * src_res,const struct pipe_blit_info * blit)1104 int virgl_encode_blit(struct virgl_context *ctx,
1105                      struct virgl_resource *dst_res,
1106                      struct virgl_resource *src_res,
1107                      const struct pipe_blit_info *blit)
1108 {
1109    uint32_t tmp;
1110    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BLIT, 0, VIRGL_CMD_BLIT_SIZE));
1111    tmp = VIRGL_CMD_BLIT_S0_MASK(blit->mask) |
1112       VIRGL_CMD_BLIT_S0_FILTER(blit->filter) |
1113       VIRGL_CMD_BLIT_S0_SCISSOR_ENABLE(blit->scissor_enable) |
1114       VIRGL_CMD_BLIT_S0_RENDER_CONDITION_ENABLE(blit->render_condition_enable) |
1115       VIRGL_CMD_BLIT_S0_ALPHA_BLEND(blit->alpha_blend);
1116    virgl_encoder_write_dword(ctx->cbuf, tmp);
1117    virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.minx | blit->scissor.miny << 16));
1118    virgl_encoder_write_dword(ctx->cbuf, (blit->scissor.maxx | blit->scissor.maxy << 16));
1119 
1120    virgl_encoder_write_res(ctx, dst_res);
1121    virgl_encoder_write_dword(ctx->cbuf, blit->dst.level);
1122    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->dst.format));
1123    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.x);
1124    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.y);
1125    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.z);
1126    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.width);
1127    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.height);
1128    virgl_encoder_write_dword(ctx->cbuf, blit->dst.box.depth);
1129 
1130    virgl_encoder_write_res(ctx, src_res);
1131    virgl_encoder_write_dword(ctx->cbuf, blit->src.level);
1132    virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(blit->src.format));
1133    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.x);
1134    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.y);
1135    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.z);
1136    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.width);
1137    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.height);
1138    virgl_encoder_write_dword(ctx->cbuf, blit->src.box.depth);
1139    return 0;
1140 }
1141 
virgl_encoder_create_query(struct virgl_context * ctx,uint32_t handle,uint query_type,uint query_index,struct virgl_resource * res,uint32_t offset)1142 int virgl_encoder_create_query(struct virgl_context *ctx,
1143                               uint32_t handle,
1144                               uint query_type,
1145                               uint query_index,
1146                               struct virgl_resource *res,
1147                               uint32_t offset)
1148 {
1149    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_OBJECT, VIRGL_OBJECT_QUERY, VIRGL_OBJ_QUERY_SIZE));
1150    virgl_encoder_write_dword(ctx->cbuf, handle);
1151    virgl_encoder_write_dword(ctx->cbuf, ((query_type & 0xffff) | (query_index << 16)));
1152    virgl_encoder_write_dword(ctx->cbuf, offset);
1153    virgl_encoder_write_res(ctx, res);
1154    return 0;
1155 }
1156 
virgl_encoder_begin_query(struct virgl_context * ctx,uint32_t handle)1157 int virgl_encoder_begin_query(struct virgl_context *ctx,
1158                              uint32_t handle)
1159 {
1160    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BEGIN_QUERY, 0, 1));
1161    virgl_encoder_write_dword(ctx->cbuf, handle);
1162    return 0;
1163 }
1164 
virgl_encoder_end_query(struct virgl_context * ctx,uint32_t handle)1165 int virgl_encoder_end_query(struct virgl_context *ctx,
1166                            uint32_t handle)
1167 {
1168    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_END_QUERY, 0, 1));
1169    virgl_encoder_write_dword(ctx->cbuf, handle);
1170    return 0;
1171 }
1172 
virgl_encoder_get_query_result(struct virgl_context * ctx,uint32_t handle,boolean wait)1173 int virgl_encoder_get_query_result(struct virgl_context *ctx,
1174                                   uint32_t handle, boolean wait)
1175 {
1176    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT, 0, 2));
1177    virgl_encoder_write_dword(ctx->cbuf, handle);
1178    virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1179    return 0;
1180 }
1181 
virgl_encoder_render_condition(struct virgl_context * ctx,uint32_t handle,boolean condition,enum pipe_render_cond_flag mode)1182 int virgl_encoder_render_condition(struct virgl_context *ctx,
1183                                   uint32_t handle, boolean condition,
1184                                   enum pipe_render_cond_flag mode)
1185 {
1186    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_RENDER_CONDITION, 0, VIRGL_RENDER_CONDITION_SIZE));
1187    virgl_encoder_write_dword(ctx->cbuf, handle);
1188    virgl_encoder_write_dword(ctx->cbuf, condition);
1189    virgl_encoder_write_dword(ctx->cbuf, mode);
1190    return 0;
1191 }
1192 
virgl_encoder_set_so_targets(struct virgl_context * ctx,unsigned num_targets,struct pipe_stream_output_target ** targets,unsigned append_bitmask)1193 int virgl_encoder_set_so_targets(struct virgl_context *ctx,
1194                                 unsigned num_targets,
1195                                 struct pipe_stream_output_target **targets,
1196                                 unsigned append_bitmask)
1197 {
1198    int i;
1199 
1200    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_STREAMOUT_TARGETS, 0, num_targets + 1));
1201    virgl_encoder_write_dword(ctx->cbuf, append_bitmask);
1202    for (i = 0; i < num_targets; i++) {
1203       struct virgl_so_target *tg = virgl_so_target(targets[i]);
1204       virgl_encoder_write_dword(ctx->cbuf, tg ? tg->handle : 0);
1205    }
1206    return 0;
1207 }
1208 
1209 
virgl_encoder_set_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1210 int virgl_encoder_set_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1211 {
1212    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SUB_CTX, 0, 1));
1213    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1214    return 0;
1215 }
1216 
virgl_encoder_create_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1217 int virgl_encoder_create_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1218 {
1219    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_CREATE_SUB_CTX, 0, 1));
1220    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1221    return 0;
1222 }
1223 
virgl_encoder_destroy_sub_ctx(struct virgl_context * ctx,uint32_t sub_ctx_id)1224 int virgl_encoder_destroy_sub_ctx(struct virgl_context *ctx, uint32_t sub_ctx_id)
1225 {
1226    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_DESTROY_SUB_CTX, 0, 1));
1227    virgl_encoder_write_dword(ctx->cbuf, sub_ctx_id);
1228    return 0;
1229 }
1230 
virgl_encode_bind_shader(struct virgl_context * ctx,uint32_t handle,uint32_t type)1231 int virgl_encode_bind_shader(struct virgl_context *ctx,
1232                              uint32_t handle, uint32_t type)
1233 {
1234    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_BIND_SHADER, 0, 2));
1235    virgl_encoder_write_dword(ctx->cbuf, handle);
1236    virgl_encoder_write_dword(ctx->cbuf, type);
1237    return 0;
1238 }
1239 
virgl_encode_set_tess_state(struct virgl_context * ctx,const float outer[4],const float inner[2])1240 int virgl_encode_set_tess_state(struct virgl_context *ctx,
1241                                 const float outer[4],
1242                                 const float inner[2])
1243 {
1244    int i;
1245    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TESS_STATE, 0, 6));
1246    for (i = 0; i < 4; i++)
1247       virgl_encoder_write_dword(ctx->cbuf, fui(outer[i]));
1248    for (i = 0; i < 2; i++)
1249       virgl_encoder_write_dword(ctx->cbuf, fui(inner[i]));
1250    return 0;
1251 }
1252 
virgl_encode_set_shader_buffers(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1253 int virgl_encode_set_shader_buffers(struct virgl_context *ctx,
1254                                     enum pipe_shader_type shader,
1255                                     unsigned start_slot, unsigned count,
1256                                     const struct pipe_shader_buffer *buffers)
1257 {
1258    int i;
1259    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_BUFFERS, 0, VIRGL_SET_SHADER_BUFFER_SIZE(count)));
1260 
1261    virgl_encoder_write_dword(ctx->cbuf, shader);
1262    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1263    for (i = 0; i < count; i++) {
1264       if (buffers && buffers[i].buffer) {
1265          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1266          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1267          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1268          virgl_encoder_write_res(ctx, res);
1269 
1270          util_range_add(&res->u.b, &res->valid_buffer_range, buffers[i].buffer_offset,
1271                buffers[i].buffer_offset + buffers[i].buffer_size);
1272          virgl_resource_dirty(res, 0);
1273       } else {
1274          virgl_encoder_write_dword(ctx->cbuf, 0);
1275          virgl_encoder_write_dword(ctx->cbuf, 0);
1276          virgl_encoder_write_dword(ctx->cbuf, 0);
1277       }
1278    }
1279    return 0;
1280 }
1281 
virgl_encode_set_hw_atomic_buffers(struct virgl_context * ctx,unsigned start_slot,unsigned count,const struct pipe_shader_buffer * buffers)1282 int virgl_encode_set_hw_atomic_buffers(struct virgl_context *ctx,
1283                                        unsigned start_slot, unsigned count,
1284                                        const struct pipe_shader_buffer *buffers)
1285 {
1286    int i;
1287    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_ATOMIC_BUFFERS, 0, VIRGL_SET_ATOMIC_BUFFER_SIZE(count)));
1288 
1289    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1290    for (i = 0; i < count; i++) {
1291       if (buffers && buffers[i].buffer) {
1292          struct virgl_resource *res = virgl_resource(buffers[i].buffer);
1293          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_offset);
1294          virgl_encoder_write_dword(ctx->cbuf, buffers[i].buffer_size);
1295          virgl_encoder_write_res(ctx, res);
1296 
1297          util_range_add(&res->u.b, &res->valid_buffer_range, buffers[i].buffer_offset,
1298                buffers[i].buffer_offset + buffers[i].buffer_size);
1299          virgl_resource_dirty(res, 0);
1300       } else {
1301          virgl_encoder_write_dword(ctx->cbuf, 0);
1302          virgl_encoder_write_dword(ctx->cbuf, 0);
1303          virgl_encoder_write_dword(ctx->cbuf, 0);
1304       }
1305    }
1306    return 0;
1307 }
1308 
virgl_encode_set_shader_images(struct virgl_context * ctx,enum pipe_shader_type shader,unsigned start_slot,unsigned count,const struct pipe_image_view * images)1309 int virgl_encode_set_shader_images(struct virgl_context *ctx,
1310                                    enum pipe_shader_type shader,
1311                                    unsigned start_slot, unsigned count,
1312                                    const struct pipe_image_view *images)
1313 {
1314    int i;
1315    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_SHADER_IMAGES, 0, VIRGL_SET_SHADER_IMAGE_SIZE(count)));
1316 
1317    virgl_encoder_write_dword(ctx->cbuf, shader);
1318    virgl_encoder_write_dword(ctx->cbuf, start_slot);
1319    for (i = 0; i < count; i++) {
1320       if (images && images[i].resource) {
1321          struct virgl_resource *res = virgl_resource(images[i].resource);
1322          virgl_encoder_write_dword(ctx->cbuf, pipe_to_virgl_format(images[i].format));
1323          virgl_encoder_write_dword(ctx->cbuf, images[i].access);
1324          virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.offset);
1325          virgl_encoder_write_dword(ctx->cbuf, images[i].u.buf.size);
1326          virgl_encoder_write_res(ctx, res);
1327 
1328          if (res->u.b.target == PIPE_BUFFER) {
1329             util_range_add(&res->u.b, &res->valid_buffer_range, images[i].u.buf.offset,
1330                   images[i].u.buf.offset + images[i].u.buf.size);
1331          }
1332          virgl_resource_dirty(res, images[i].u.tex.level);
1333       } else {
1334          virgl_encoder_write_dword(ctx->cbuf, 0);
1335          virgl_encoder_write_dword(ctx->cbuf, 0);
1336          virgl_encoder_write_dword(ctx->cbuf, 0);
1337          virgl_encoder_write_dword(ctx->cbuf, 0);
1338          virgl_encoder_write_dword(ctx->cbuf, 0);
1339       }
1340    }
1341    return 0;
1342 }
1343 
virgl_encode_memory_barrier(struct virgl_context * ctx,unsigned flags)1344 int virgl_encode_memory_barrier(struct virgl_context *ctx,
1345                                 unsigned flags)
1346 {
1347    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_MEMORY_BARRIER, 0, 1));
1348    virgl_encoder_write_dword(ctx->cbuf, flags);
1349    return 0;
1350 }
1351 
virgl_encode_launch_grid(struct virgl_context * ctx,const struct pipe_grid_info * grid_info)1352 int virgl_encode_launch_grid(struct virgl_context *ctx,
1353                              const struct pipe_grid_info *grid_info)
1354 {
1355    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_LAUNCH_GRID, 0, VIRGL_LAUNCH_GRID_SIZE));
1356    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[0]);
1357    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[1]);
1358    virgl_encoder_write_dword(ctx->cbuf, grid_info->block[2]);
1359    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[0]);
1360    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[1]);
1361    virgl_encoder_write_dword(ctx->cbuf, grid_info->grid[2]);
1362    if (grid_info->indirect) {
1363       struct virgl_resource *res = virgl_resource(grid_info->indirect);
1364       virgl_encoder_write_res(ctx, res);
1365    } else
1366       virgl_encoder_write_dword(ctx->cbuf, 0);
1367    virgl_encoder_write_dword(ctx->cbuf, grid_info->indirect_offset);
1368    return 0;
1369 }
1370 
virgl_encode_texture_barrier(struct virgl_context * ctx,unsigned flags)1371 int virgl_encode_texture_barrier(struct virgl_context *ctx,
1372                                  unsigned flags)
1373 {
1374    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_TEXTURE_BARRIER, 0, 1));
1375    virgl_encoder_write_dword(ctx->cbuf, flags);
1376    return 0;
1377 }
1378 
virgl_encode_host_debug_flagstring(struct virgl_context * ctx,const char * flagstring)1379 int virgl_encode_host_debug_flagstring(struct virgl_context *ctx,
1380                                        const char *flagstring)
1381 {
1382    unsigned long slen = strlen(flagstring) + 1;
1383    uint32_t sslen;
1384    uint32_t string_length;
1385 
1386    if (!slen)
1387       return 0;
1388 
1389    if (slen > 4 * 0xffff) {
1390       debug_printf("VIRGL: host debug flag string too long, will be truncated\n");
1391       slen = 4 * 0xffff;
1392    }
1393 
1394    sslen = (uint32_t )(slen + 3) / 4;
1395    string_length = (uint32_t)MIN2(sslen * 4, slen);
1396 
1397    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_DEBUG_FLAGS, 0, sslen));
1398    virgl_encoder_write_block(ctx->cbuf, (const uint8_t *)flagstring, string_length);
1399    return 0;
1400 }
1401 
virgl_encode_tweak(struct virgl_context * ctx,enum vrend_tweak_type tweak,uint32_t value)1402 int virgl_encode_tweak(struct virgl_context *ctx, enum vrend_tweak_type tweak, uint32_t value)
1403 {
1404    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_SET_TWEAKS, 0, VIRGL_SET_TWEAKS_SIZE));
1405    virgl_encoder_write_dword(ctx->cbuf, tweak);
1406    virgl_encoder_write_dword(ctx->cbuf, value);
1407    return 0;
1408 }
1409 
1410 
virgl_encode_get_query_result_qbo(struct virgl_context * ctx,uint32_t handle,struct virgl_resource * res,boolean wait,uint32_t result_type,uint32_t offset,uint32_t index)1411 int virgl_encode_get_query_result_qbo(struct virgl_context *ctx,
1412                                       uint32_t handle,
1413                                       struct virgl_resource *res, boolean wait,
1414                                       uint32_t result_type,
1415                                       uint32_t offset,
1416                                       uint32_t index)
1417 {
1418    virgl_encoder_write_cmd_dword(ctx, VIRGL_CMD0(VIRGL_CCMD_GET_QUERY_RESULT_QBO, 0, VIRGL_QUERY_RESULT_QBO_SIZE));
1419    virgl_encoder_write_dword(ctx->cbuf, handle);
1420    virgl_encoder_write_res(ctx, res);
1421    virgl_encoder_write_dword(ctx->cbuf, wait ? 1 : 0);
1422    virgl_encoder_write_dword(ctx->cbuf, result_type);
1423    virgl_encoder_write_dword(ctx->cbuf, offset);
1424    virgl_encoder_write_dword(ctx->cbuf, index);
1425    return 0;
1426 }
1427 
virgl_encode_transfer(struct virgl_screen * vs,struct virgl_cmd_buf * buf,struct virgl_transfer * trans,uint32_t direction)1428 void virgl_encode_transfer(struct virgl_screen *vs, struct virgl_cmd_buf *buf,
1429                            struct virgl_transfer *trans, uint32_t direction)
1430 {
1431    uint32_t command;
1432    struct virgl_resource *vres = virgl_resource(trans->base.resource);
1433    enum virgl_transfer3d_encode_stride stride_type =
1434         virgl_transfer3d_host_inferred_stride;
1435 
1436    if (trans->base.box.depth == 1 && trans->base.level == 0 &&
1437        trans->base.resource->target == PIPE_TEXTURE_2D &&
1438        vres->blob_mem == VIRGL_BLOB_MEM_HOST3D_GUEST)
1439       stride_type = virgl_transfer3d_explicit_stride;
1440 
1441    command = VIRGL_CMD0(VIRGL_CCMD_TRANSFER3D, 0, VIRGL_TRANSFER3D_SIZE);
1442    virgl_encoder_write_dword(buf, command);
1443    virgl_encoder_transfer3d_common(vs, buf, trans, stride_type);
1444    virgl_encoder_write_dword(buf, trans->offset);
1445    virgl_encoder_write_dword(buf, direction);
1446 }
1447 
virgl_encode_copy_transfer(struct virgl_context * ctx,struct virgl_transfer * trans)1448 void virgl_encode_copy_transfer(struct virgl_context *ctx,
1449                                 struct virgl_transfer *trans)
1450 {
1451    uint32_t command;
1452    struct virgl_screen *vs = virgl_screen(ctx->base.screen);
1453 
1454    assert(trans->copy_src_hw_res);
1455 
1456    command = VIRGL_CMD0(VIRGL_CCMD_COPY_TRANSFER3D, 0, VIRGL_COPY_TRANSFER3D_SIZE);
1457    virgl_encoder_write_cmd_dword(ctx, command);
1458    /* Copy transfers need to explicitly specify the stride, since it may differ
1459     * from the image stride.
1460     */
1461    virgl_encoder_transfer3d_common(vs, ctx->cbuf, trans, virgl_transfer3d_explicit_stride);
1462    vs->vws->emit_res(vs->vws, ctx->cbuf, trans->copy_src_hw_res, TRUE);
1463    virgl_encoder_write_dword(ctx->cbuf, trans->copy_src_offset);
1464    /* At the moment all copy transfers are synchronized. */
1465    virgl_encoder_write_dword(ctx->cbuf, 1);
1466 }
1467 
virgl_encode_end_transfers(struct virgl_cmd_buf * buf)1468 void virgl_encode_end_transfers(struct virgl_cmd_buf *buf)
1469 {
1470    uint32_t command, diff;
1471    diff = VIRGL_MAX_TBUF_DWORDS - buf->cdw;
1472    if (diff) {
1473       command = VIRGL_CMD0(VIRGL_CCMD_END_TRANSFERS, 0, diff - 1);
1474       virgl_encoder_write_dword(buf, command);
1475    }
1476 }
1477