1 /**************************************************************************
2  *
3  * Copyright 2015 Advanced Micro Devices, Inc.
4  * Copyright 2008 VMware, Inc.
5  * All Rights Reserved.
6  *
7  * Permission is hereby granted, free of charge, to any person obtaining a
8  * copy of this software and associated documentation files (the "Software"),
9  * to deal in the Software without restriction, including without limitation
10  * on the rights to use, copy, modify, merge, publish, distribute, sub
11  * license, and/or sell copies of the Software, and to permit persons to whom
12  * the Software is furnished to do so, subject to the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the next
15  * paragraph) shall be included in all copies or substantial portions of the
16  * Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 
28 #include "dd_pipe.h"
29 
30 #include "util/u_dump.h"
31 #include "util/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "tgsi/tgsi_parse.h"
37 #include "tgsi/tgsi_scan.h"
38 #include "os/os_time.h"
39 #include <inttypes.h>
40 
41 
42 static FILE *
dd_get_file_stream(struct dd_screen * dscreen,unsigned apitrace_call_number)43 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
44 {
45    struct pipe_screen *screen = dscreen->screen;
46    char cmd_line[4096];
47 
48    FILE *f = dd_get_debug_file(dscreen->verbose);
49    if (!f)
50       return NULL;
51 
52    if (os_get_command_line(cmd_line, sizeof(cmd_line)))
53       fprintf(f, "Command: %s\n", cmd_line);
54    fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
55    fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
56    fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
57 
58    if (apitrace_call_number)
59       fprintf(f, "Last apitrace call: %u\n\n",
60               apitrace_call_number);
61    return f;
62 }
63 
64 static void
dd_dump_dmesg(FILE * f)65 dd_dump_dmesg(FILE *f)
66 {
67    char line[2000];
68    FILE *p = popen("dmesg | tail -n60", "r");
69 
70    if (!p)
71       return;
72 
73    fprintf(f, "\nLast 60 lines of dmesg:\n\n");
74    while (fgets(line, sizeof(line), p))
75       fputs(line, f);
76 
77    pclose(p);
78 }
79 
80 static void
dd_close_file_stream(FILE * f)81 dd_close_file_stream(FILE *f)
82 {
83    fclose(f);
84 }
85 
86 static unsigned
dd_num_active_viewports(struct dd_draw_state * dstate)87 dd_num_active_viewports(struct dd_draw_state *dstate)
88 {
89    struct tgsi_shader_info info;
90    const struct tgsi_token *tokens;
91 
92    if (dstate->shaders[PIPE_SHADER_GEOMETRY])
93       tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
94    else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
95       tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
96    else if (dstate->shaders[PIPE_SHADER_VERTEX])
97       tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
98    else
99       return 1;
100 
101    tgsi_scan_shader(tokens, &info);
102    return info.writes_viewport_index ? PIPE_MAX_VIEWPORTS : 1;
103 }
104 
105 #define COLOR_RESET	"\033[0m"
106 #define COLOR_SHADER	"\033[1;32m"
107 #define COLOR_STATE	"\033[1;33m"
108 
109 #define DUMP(name, var) do { \
110    fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
111    util_dump_##name(f, var); \
112    fprintf(f, "\n"); \
113 } while(0)
114 
115 #define DUMP_I(name, var, i) do { \
116    fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
117    util_dump_##name(f, var); \
118    fprintf(f, "\n"); \
119 } while(0)
120 
121 #define DUMP_M(name, var, member) do { \
122    fprintf(f, "  " #member ": "); \
123    util_dump_##name(f, (var)->member); \
124    fprintf(f, "\n"); \
125 } while(0)
126 
127 #define DUMP_M_ADDR(name, var, member) do { \
128    fprintf(f, "  " #member ": "); \
129    util_dump_##name(f, &(var)->member); \
130    fprintf(f, "\n"); \
131 } while(0)
132 
133 static void
print_named_value(FILE * f,const char * name,int value)134 print_named_value(FILE *f, const char *name, int value)
135 {
136    fprintf(f, COLOR_STATE "%s" COLOR_RESET " = %i\n", name, value);
137 }
138 
139 static void
print_named_xvalue(FILE * f,const char * name,int value)140 print_named_xvalue(FILE *f, const char *name, int value)
141 {
142    fprintf(f, COLOR_STATE "%s" COLOR_RESET " = 0x%08x\n", name, value);
143 }
144 
145 static void
util_dump_uint(FILE * f,unsigned i)146 util_dump_uint(FILE *f, unsigned i)
147 {
148    fprintf(f, "%u", i);
149 }
150 
151 static void
util_dump_hex(FILE * f,unsigned i)152 util_dump_hex(FILE *f, unsigned i)
153 {
154    fprintf(f, "0x%x", i);
155 }
156 
157 static void
util_dump_double(FILE * f,double d)158 util_dump_double(FILE *f, double d)
159 {
160    fprintf(f, "%f", d);
161 }
162 
163 static void
util_dump_format(FILE * f,enum pipe_format format)164 util_dump_format(FILE *f, enum pipe_format format)
165 {
166    fprintf(f, "%s", util_format_name(format));
167 }
168 
169 static void
util_dump_color_union(FILE * f,const union pipe_color_union * color)170 util_dump_color_union(FILE *f, const union pipe_color_union *color)
171 {
172    fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
173            color->f[0], color->f[1], color->f[2], color->f[3],
174            color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
175 }
176 
177 static void
util_dump_query(FILE * f,struct dd_query * query)178 util_dump_query(FILE *f, struct dd_query *query)
179 {
180    if (query->type >= PIPE_QUERY_DRIVER_SPECIFIC)
181       fprintf(f, "PIPE_QUERY_DRIVER_SPECIFIC + %i",
182               query->type - PIPE_QUERY_DRIVER_SPECIFIC);
183    else
184       fprintf(f, "%s", util_dump_query_type(query->type, false));
185 }
186 
187 static void
dd_dump_render_condition(struct dd_draw_state * dstate,FILE * f)188 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
189 {
190    if (dstate->render_cond.query) {
191       fprintf(f, "render condition:\n");
192       DUMP_M(query, &dstate->render_cond, query);
193       DUMP_M(uint, &dstate->render_cond, condition);
194       DUMP_M(uint, &dstate->render_cond, mode);
195       fprintf(f, "\n");
196    }
197 }
198 
199 static void
dd_dump_draw_vbo(struct dd_draw_state * dstate,struct pipe_draw_info * info,FILE * f)200 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info, FILE *f)
201 {
202    int sh, i;
203    const char *shader_str[PIPE_SHADER_TYPES];
204 
205    shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
206    shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
207    shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
208    shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
209    shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
210    shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
211 
212    DUMP(draw_info, info);
213    if (info->indexed) {
214       DUMP(index_buffer, &dstate->index_buffer);
215       if (dstate->index_buffer.buffer)
216          DUMP_M(resource, &dstate->index_buffer, buffer);
217    }
218    if (info->count_from_stream_output)
219       DUMP_M(stream_output_target, info,
220              count_from_stream_output);
221    if (info->indirect)
222       DUMP_M(resource, info, indirect);
223    fprintf(f, "\n");
224 
225    /* TODO: dump active queries */
226 
227    dd_dump_render_condition(dstate, f);
228 
229    for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
230       if (dstate->vertex_buffers[i].buffer ||
231           dstate->vertex_buffers[i].user_buffer) {
232          DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
233          if (dstate->vertex_buffers[i].buffer)
234             DUMP_M(resource, &dstate->vertex_buffers[i], buffer);
235       }
236 
237    if (dstate->velems) {
238       print_named_value(f, "num vertex elements",
239                         dstate->velems->state.velems.count);
240       for (i = 0; i < dstate->velems->state.velems.count; i++) {
241          fprintf(f, "  ");
242          DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
243       }
244    }
245 
246    print_named_value(f, "num stream output targets", dstate->num_so_targets);
247    for (i = 0; i < dstate->num_so_targets; i++)
248       if (dstate->so_targets[i]) {
249          DUMP_I(stream_output_target, dstate->so_targets[i], i);
250          DUMP_M(resource, dstate->so_targets[i], buffer);
251          fprintf(f, "  offset = %i\n", dstate->so_offsets[i]);
252       }
253 
254    fprintf(f, "\n");
255    for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
256       if (sh == PIPE_SHADER_COMPUTE)
257          continue;
258 
259       if (sh == PIPE_SHADER_TESS_CTRL &&
260           !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
261           dstate->shaders[PIPE_SHADER_TESS_EVAL])
262          fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
263                  "default_inner_level = {%f, %f}}\n",
264                  dstate->tess_default_levels[0],
265                  dstate->tess_default_levels[1],
266                  dstate->tess_default_levels[2],
267                  dstate->tess_default_levels[3],
268                  dstate->tess_default_levels[4],
269                  dstate->tess_default_levels[5]);
270 
271       if (sh == PIPE_SHADER_FRAGMENT)
272          if (dstate->rs) {
273             unsigned num_viewports = dd_num_active_viewports(dstate);
274 
275             if (dstate->rs->state.rs.clip_plane_enable)
276                DUMP(clip_state, &dstate->clip_state);
277 
278             for (i = 0; i < num_viewports; i++)
279                DUMP_I(viewport_state, &dstate->viewports[i], i);
280 
281             if (dstate->rs->state.rs.scissor)
282                for (i = 0; i < num_viewports; i++)
283                   DUMP_I(scissor_state, &dstate->scissors[i], i);
284 
285             DUMP(rasterizer_state, &dstate->rs->state.rs);
286 
287             if (dstate->rs->state.rs.poly_stipple_enable)
288                DUMP(poly_stipple, &dstate->polygon_stipple);
289             fprintf(f, "\n");
290          }
291 
292       if (!dstate->shaders[sh])
293          continue;
294 
295       fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
296       DUMP(shader_state, &dstate->shaders[sh]->state.shader);
297 
298       for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
299          if (dstate->constant_buffers[sh][i].buffer ||
300              dstate->constant_buffers[sh][i].user_buffer) {
301             DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
302             if (dstate->constant_buffers[sh][i].buffer)
303                DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
304          }
305 
306       for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
307          if (dstate->sampler_states[sh][i])
308             DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
309 
310       for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
311          if (dstate->sampler_views[sh][i]) {
312             DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
313             DUMP_M(resource, dstate->sampler_views[sh][i], texture);
314          }
315 
316       for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
317          if (dstate->shader_images[sh][i].resource) {
318             DUMP_I(image_view, &dstate->shader_images[sh][i], i);
319             if (dstate->shader_images[sh][i].resource)
320                DUMP_M(resource, &dstate->shader_images[sh][i], resource);
321          }
322 
323       for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
324          if (dstate->shader_buffers[sh][i].buffer) {
325             DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
326             if (dstate->shader_buffers[sh][i].buffer)
327                DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
328          }
329 
330       fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
331    }
332 
333    if (dstate->dsa)
334       DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
335    DUMP(stencil_ref, &dstate->stencil_ref);
336 
337    if (dstate->blend)
338       DUMP(blend_state, &dstate->blend->state.blend);
339    DUMP(blend_color, &dstate->blend_color);
340 
341    print_named_value(f, "min_samples", dstate->min_samples);
342    print_named_xvalue(f, "sample_mask", dstate->sample_mask);
343    fprintf(f, "\n");
344 
345    DUMP(framebuffer_state, &dstate->framebuffer_state);
346    for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
347       if (dstate->framebuffer_state.cbufs[i]) {
348          fprintf(f, "  " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n    ", i);
349          DUMP(surface, dstate->framebuffer_state.cbufs[i]);
350          fprintf(f, "    ");
351          DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
352       }
353    if (dstate->framebuffer_state.zsbuf) {
354       fprintf(f, "  " COLOR_STATE "zsbuf:" COLOR_RESET "\n    ");
355       DUMP(surface, dstate->framebuffer_state.zsbuf);
356       fprintf(f, "    ");
357       DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
358    }
359    fprintf(f, "\n");
360 }
361 
362 static void
dd_dump_launch_grid(struct dd_draw_state * dstate,struct pipe_grid_info * info,FILE * f)363 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
364 {
365    fprintf(f, "%s:\n", __func__+8);
366    /* TODO */
367 }
368 
369 static void
dd_dump_resource_copy_region(struct dd_draw_state * dstate,struct call_resource_copy_region * info,FILE * f)370 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
371                              struct call_resource_copy_region *info,
372                              FILE *f)
373 {
374    fprintf(f, "%s:\n", __func__+8);
375    DUMP_M(resource, info, dst);
376    DUMP_M(uint, info, dst_level);
377    DUMP_M(uint, info, dstx);
378    DUMP_M(uint, info, dsty);
379    DUMP_M(uint, info, dstz);
380    DUMP_M(resource, info, src);
381    DUMP_M(uint, info, src_level);
382    DUMP_M_ADDR(box, info, src_box);
383 }
384 
385 static void
dd_dump_blit(struct dd_draw_state * dstate,struct pipe_blit_info * info,FILE * f)386 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
387 {
388    fprintf(f, "%s:\n", __func__+8);
389    DUMP_M(resource, info, dst.resource);
390    DUMP_M(uint, info, dst.level);
391    DUMP_M_ADDR(box, info, dst.box);
392    DUMP_M(format, info, dst.format);
393 
394    DUMP_M(resource, info, src.resource);
395    DUMP_M(uint, info, src.level);
396    DUMP_M_ADDR(box, info, src.box);
397    DUMP_M(format, info, src.format);
398 
399    DUMP_M(hex, info, mask);
400    DUMP_M(uint, info, filter);
401    DUMP_M(uint, info, scissor_enable);
402    DUMP_M_ADDR(scissor_state, info, scissor);
403    DUMP_M(uint, info, render_condition_enable);
404 
405    if (info->render_condition_enable)
406       dd_dump_render_condition(dstate, f);
407 }
408 
409 static void
dd_dump_generate_mipmap(struct dd_draw_state * dstate,FILE * f)410 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
411 {
412    fprintf(f, "%s:\n", __func__+8);
413    /* TODO */
414 }
415 
416 static void
dd_dump_flush_resource(struct dd_draw_state * dstate,struct pipe_resource * res,FILE * f)417 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
418                        FILE *f)
419 {
420    fprintf(f, "%s:\n", __func__+8);
421    DUMP(resource, res);
422 }
423 
424 static void
dd_dump_clear(struct dd_draw_state * dstate,struct call_clear * info,FILE * f)425 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
426 {
427    fprintf(f, "%s:\n", __func__+8);
428    DUMP_M(uint, info, buffers);
429    DUMP_M_ADDR(color_union, info, color);
430    DUMP_M(double, info, depth);
431    DUMP_M(hex, info, stencil);
432 }
433 
434 static void
dd_dump_clear_buffer(struct dd_draw_state * dstate,struct call_clear_buffer * info,FILE * f)435 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
436                      FILE *f)
437 {
438    int i;
439    const char *value = (const char*)info->clear_value;
440 
441    fprintf(f, "%s:\n", __func__+8);
442    DUMP_M(resource, info, res);
443    DUMP_M(uint, info, offset);
444    DUMP_M(uint, info, size);
445    DUMP_M(uint, info, clear_value_size);
446 
447    fprintf(f, "  clear_value:");
448    for (i = 0; i < info->clear_value_size; i++)
449       fprintf(f, " %02x", value[i]);
450    fprintf(f, "\n");
451 }
452 
453 static void
dd_dump_clear_render_target(struct dd_draw_state * dstate,FILE * f)454 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
455 {
456    fprintf(f, "%s:\n", __func__+8);
457    /* TODO */
458 }
459 
460 static void
dd_dump_clear_depth_stencil(struct dd_draw_state * dstate,FILE * f)461 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
462 {
463    fprintf(f, "%s:\n", __func__+8);
464    /* TODO */
465 }
466 
467 static void
dd_dump_driver_state(struct dd_context * dctx,FILE * f,unsigned flags)468 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
469 {
470    if (dctx->pipe->dump_debug_state) {
471 	   fprintf(f,"\n\n**************************************************"
472 		     "***************************\n");
473 	   fprintf(f, "Driver-specific state:\n\n");
474 	   dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
475    }
476 }
477 
478 static void
dd_dump_call(FILE * f,struct dd_draw_state * state,struct dd_call * call)479 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
480 {
481    switch (call->type) {
482    case CALL_DRAW_VBO:
483       dd_dump_draw_vbo(state, &call->info.draw_vbo, f);
484       break;
485    case CALL_LAUNCH_GRID:
486       dd_dump_launch_grid(state, &call->info.launch_grid, f);
487       break;
488    case CALL_RESOURCE_COPY_REGION:
489       dd_dump_resource_copy_region(state,
490                                    &call->info.resource_copy_region, f);
491       break;
492    case CALL_BLIT:
493       dd_dump_blit(state, &call->info.blit, f);
494       break;
495    case CALL_FLUSH_RESOURCE:
496       dd_dump_flush_resource(state, call->info.flush_resource, f);
497       break;
498    case CALL_CLEAR:
499       dd_dump_clear(state, &call->info.clear, f);
500       break;
501    case CALL_CLEAR_BUFFER:
502       dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
503       break;
504    case CALL_CLEAR_RENDER_TARGET:
505       dd_dump_clear_render_target(state, f);
506       break;
507    case CALL_CLEAR_DEPTH_STENCIL:
508       dd_dump_clear_depth_stencil(state, f);
509       break;
510    case CALL_GENERATE_MIPMAP:
511       dd_dump_generate_mipmap(state, f);
512       break;
513    }
514 }
515 
516 static void
dd_write_report(struct dd_context * dctx,struct dd_call * call,unsigned flags,bool dump_dmesg)517 dd_write_report(struct dd_context *dctx, struct dd_call *call, unsigned flags,
518                 bool dump_dmesg)
519 {
520    FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
521                                 dctx->draw_state.apitrace_call_number);
522 
523    if (!f)
524       return;
525 
526    dd_dump_call(f, &dctx->draw_state, call);
527    dd_dump_driver_state(dctx, f, flags);
528    if (dump_dmesg)
529       dd_dump_dmesg(f);
530    dd_close_file_stream(f);
531 }
532 
533 static void
dd_kill_process(void)534 dd_kill_process(void)
535 {
536    sync();
537    fprintf(stderr, "dd: Aborting the process...\n");
538    fflush(stdout);
539    fflush(stderr);
540    exit(1);
541 }
542 
543 static bool
dd_flush_and_check_hang(struct dd_context * dctx,struct pipe_fence_handle ** flush_fence,unsigned flush_flags)544 dd_flush_and_check_hang(struct dd_context *dctx,
545                         struct pipe_fence_handle **flush_fence,
546                         unsigned flush_flags)
547 {
548    struct pipe_fence_handle *fence = NULL;
549    struct pipe_context *pipe = dctx->pipe;
550    struct pipe_screen *screen = pipe->screen;
551    uint64_t timeout_ms = dd_screen(dctx->base.screen)->timeout_ms;
552    bool idle;
553 
554    assert(timeout_ms > 0);
555 
556    pipe->flush(pipe, &fence, flush_flags);
557    if (flush_fence)
558       screen->fence_reference(screen, flush_fence, fence);
559    if (!fence)
560       return false;
561 
562    idle = screen->fence_finish(screen, pipe, fence, timeout_ms * 1000000);
563    screen->fence_reference(screen, &fence, NULL);
564    if (!idle)
565       fprintf(stderr, "dd: GPU hang detected!\n");
566    return !idle;
567 }
568 
569 static void
dd_flush_and_handle_hang(struct dd_context * dctx,struct pipe_fence_handle ** fence,unsigned flags,const char * cause)570 dd_flush_and_handle_hang(struct dd_context *dctx,
571                          struct pipe_fence_handle **fence, unsigned flags,
572                          const char *cause)
573 {
574    if (dd_flush_and_check_hang(dctx, fence, flags)) {
575       FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
576                                    dctx->draw_state.apitrace_call_number);
577 
578       if (f) {
579          fprintf(f, "dd: %s.\n", cause);
580          dd_dump_driver_state(dctx, f,
581                               PIPE_DUMP_DEVICE_STATUS_REGISTERS |
582                               PIPE_DUMP_CURRENT_STATES |
583                               PIPE_DUMP_CURRENT_SHADERS |
584                               PIPE_DUMP_LAST_COMMAND_BUFFER);
585          dd_dump_dmesg(f);
586          dd_close_file_stream(f);
587       }
588 
589       /* Terminate the process to prevent future hangs. */
590       dd_kill_process();
591    }
592 }
593 
594 static void
dd_unreference_copy_of_call(struct dd_call * dst)595 dd_unreference_copy_of_call(struct dd_call *dst)
596 {
597    switch (dst->type) {
598    case CALL_DRAW_VBO:
599       pipe_so_target_reference(&dst->info.draw_vbo.count_from_stream_output, NULL);
600       pipe_resource_reference(&dst->info.draw_vbo.indirect, NULL);
601       pipe_resource_reference(&dst->info.draw_vbo.indirect_params, NULL);
602       break;
603    case CALL_LAUNCH_GRID:
604       pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
605       break;
606    case CALL_RESOURCE_COPY_REGION:
607       pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
608       pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
609       break;
610    case CALL_BLIT:
611       pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
612       pipe_resource_reference(&dst->info.blit.src.resource, NULL);
613       break;
614    case CALL_FLUSH_RESOURCE:
615       pipe_resource_reference(&dst->info.flush_resource, NULL);
616       break;
617    case CALL_CLEAR:
618       break;
619    case CALL_CLEAR_BUFFER:
620       pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
621       break;
622    case CALL_CLEAR_RENDER_TARGET:
623       break;
624    case CALL_CLEAR_DEPTH_STENCIL:
625       break;
626    case CALL_GENERATE_MIPMAP:
627       pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
628       break;
629    }
630 }
631 
632 static void
dd_copy_call(struct dd_call * dst,struct dd_call * src)633 dd_copy_call(struct dd_call *dst, struct dd_call *src)
634 {
635    dst->type = src->type;
636 
637    switch (src->type) {
638    case CALL_DRAW_VBO:
639       pipe_so_target_reference(&dst->info.draw_vbo.count_from_stream_output,
640                                src->info.draw_vbo.count_from_stream_output);
641       pipe_resource_reference(&dst->info.draw_vbo.indirect,
642                               src->info.draw_vbo.indirect);
643       pipe_resource_reference(&dst->info.draw_vbo.indirect_params,
644                               src->info.draw_vbo.indirect_params);
645       dst->info.draw_vbo = src->info.draw_vbo;
646       break;
647    case CALL_LAUNCH_GRID:
648       pipe_resource_reference(&dst->info.launch_grid.indirect,
649                               src->info.launch_grid.indirect);
650       dst->info.launch_grid = src->info.launch_grid;
651       break;
652    case CALL_RESOURCE_COPY_REGION:
653       pipe_resource_reference(&dst->info.resource_copy_region.dst,
654                               src->info.resource_copy_region.dst);
655       pipe_resource_reference(&dst->info.resource_copy_region.src,
656                               src->info.resource_copy_region.src);
657       dst->info.resource_copy_region = src->info.resource_copy_region;
658       break;
659    case CALL_BLIT:
660       pipe_resource_reference(&dst->info.blit.dst.resource,
661                               src->info.blit.dst.resource);
662       pipe_resource_reference(&dst->info.blit.src.resource,
663                               src->info.blit.src.resource);
664       dst->info.blit = src->info.blit;
665       break;
666    case CALL_FLUSH_RESOURCE:
667       pipe_resource_reference(&dst->info.flush_resource,
668                               src->info.flush_resource);
669       break;
670    case CALL_CLEAR:
671       dst->info.clear = src->info.clear;
672       break;
673    case CALL_CLEAR_BUFFER:
674       pipe_resource_reference(&dst->info.clear_buffer.res,
675                               src->info.clear_buffer.res);
676       dst->info.clear_buffer = src->info.clear_buffer;
677       break;
678    case CALL_CLEAR_RENDER_TARGET:
679       break;
680    case CALL_CLEAR_DEPTH_STENCIL:
681       break;
682    case CALL_GENERATE_MIPMAP:
683       pipe_resource_reference(&dst->info.generate_mipmap.res,
684                               src->info.generate_mipmap.res);
685       dst->info.generate_mipmap = src->info.generate_mipmap;
686       break;
687    }
688 }
689 
690 static void
dd_init_copy_of_draw_state(struct dd_draw_state_copy * state)691 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
692 {
693    unsigned i,j;
694 
695    /* Just clear pointers to gallium objects. Don't clear the whole structure,
696     * because it would kill performance with its size of 130 KB.
697     */
698    memset(&state->base.index_buffer, 0,
699           sizeof(state->base.index_buffer));
700    memset(state->base.vertex_buffers, 0,
701           sizeof(state->base.vertex_buffers));
702    memset(state->base.so_targets, 0,
703           sizeof(state->base.so_targets));
704    memset(state->base.constant_buffers, 0,
705           sizeof(state->base.constant_buffers));
706    memset(state->base.sampler_views, 0,
707           sizeof(state->base.sampler_views));
708    memset(state->base.shader_images, 0,
709           sizeof(state->base.shader_images));
710    memset(state->base.shader_buffers, 0,
711           sizeof(state->base.shader_buffers));
712    memset(&state->base.framebuffer_state, 0,
713           sizeof(state->base.framebuffer_state));
714 
715    memset(state->shaders, 0, sizeof(state->shaders));
716 
717    state->base.render_cond.query = &state->render_cond;
718 
719    for (i = 0; i < PIPE_SHADER_TYPES; i++) {
720       state->base.shaders[i] = &state->shaders[i];
721       for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
722          state->base.sampler_states[i][j] = &state->sampler_states[i][j];
723    }
724 
725    state->base.velems = &state->velems;
726    state->base.rs = &state->rs;
727    state->base.dsa = &state->dsa;
728    state->base.blend = &state->blend;
729 }
730 
731 static void
dd_unreference_copy_of_draw_state(struct dd_draw_state_copy * state)732 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
733 {
734    struct dd_draw_state *dst = &state->base;
735    unsigned i,j;
736 
737    util_set_index_buffer(&dst->index_buffer, NULL);
738 
739    for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
740       pipe_resource_reference(&dst->vertex_buffers[i].buffer, NULL);
741    for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
742       pipe_so_target_reference(&dst->so_targets[i], NULL);
743 
744    for (i = 0; i < PIPE_SHADER_TYPES; i++) {
745       if (dst->shaders[i])
746          tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
747 
748       for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
749          pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
750       for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
751          pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
752       for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
753          pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
754       for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
755          pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
756    }
757 
758    util_unreference_framebuffer_state(&dst->framebuffer_state);
759 }
760 
761 static void
dd_copy_draw_state(struct dd_draw_state * dst,struct dd_draw_state * src)762 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
763 {
764    unsigned i,j;
765 
766    if (src->render_cond.query) {
767       *dst->render_cond.query = *src->render_cond.query;
768       dst->render_cond.condition = src->render_cond.condition;
769       dst->render_cond.mode = src->render_cond.mode;
770    } else {
771       dst->render_cond.query = NULL;
772    }
773 
774    util_set_index_buffer(&dst->index_buffer, &src->index_buffer);
775 
776    for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
777       pipe_resource_reference(&dst->vertex_buffers[i].buffer,
778                               src->vertex_buffers[i].buffer);
779       memcpy(&dst->vertex_buffers[i], &src->vertex_buffers[i],
780              sizeof(src->vertex_buffers[i]));
781    }
782 
783    dst->num_so_targets = src->num_so_targets;
784    for (i = 0; i < ARRAY_SIZE(src->so_targets); i++)
785       pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
786    memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
787 
788    for (i = 0; i < PIPE_SHADER_TYPES; i++) {
789       if (!src->shaders[i]) {
790          dst->shaders[i] = NULL;
791          continue;
792       }
793 
794       if (src->shaders[i]) {
795          dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
796          dst->shaders[i]->state.shader.tokens =
797             tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
798       } else {
799          dst->shaders[i] = NULL;
800       }
801 
802       for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
803          pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
804                                  src->constant_buffers[i][j].buffer);
805          memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
806                 sizeof(src->constant_buffers[i][j]));
807       }
808 
809       for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
810          pipe_sampler_view_reference(&dst->sampler_views[i][j],
811                                      src->sampler_views[i][j]);
812          if (src->sampler_states[i][j])
813             dst->sampler_states[i][j]->state.sampler =
814                src->sampler_states[i][j]->state.sampler;
815          else
816             dst->sampler_states[i][j] = NULL;
817       }
818 
819       for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
820          pipe_resource_reference(&dst->shader_images[i][j].resource,
821                                  src->shader_images[i][j].resource);
822          memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
823                 sizeof(src->shader_images[i][j]));
824       }
825 
826       for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
827          pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
828                                  src->shader_buffers[i][j].buffer);
829          memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
830                 sizeof(src->shader_buffers[i][j]));
831       }
832    }
833 
834    if (src->velems)
835       dst->velems->state.velems = src->velems->state.velems;
836    else
837       dst->velems = NULL;
838 
839    if (src->rs)
840       dst->rs->state.rs = src->rs->state.rs;
841    else
842       dst->rs = NULL;
843 
844    if (src->dsa)
845       dst->dsa->state.dsa = src->dsa->state.dsa;
846    else
847       dst->dsa = NULL;
848 
849    if (src->blend)
850       dst->blend->state.blend = src->blend->state.blend;
851    else
852       dst->blend = NULL;
853 
854    dst->blend_color = src->blend_color;
855    dst->stencil_ref = src->stencil_ref;
856    dst->sample_mask = src->sample_mask;
857    dst->min_samples = src->min_samples;
858    dst->clip_state = src->clip_state;
859    util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
860    memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
861    memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
862    memcpy(dst->tess_default_levels, src->tess_default_levels,
863           sizeof(src->tess_default_levels));
864    dst->apitrace_call_number = src->apitrace_call_number;
865 }
866 
867 static void
dd_free_record(struct dd_draw_record ** record)868 dd_free_record(struct dd_draw_record **record)
869 {
870    struct dd_draw_record *next = (*record)->next;
871 
872    dd_unreference_copy_of_call(&(*record)->call);
873    dd_unreference_copy_of_draw_state(&(*record)->draw_state);
874    FREE((*record)->driver_state_log);
875    FREE(*record);
876    *record = next;
877 }
878 
879 static void
dd_dump_record(struct dd_context * dctx,struct dd_draw_record * record,uint32_t hw_sequence_no,int64_t now)880 dd_dump_record(struct dd_context *dctx, struct dd_draw_record *record,
881                uint32_t hw_sequence_no, int64_t now)
882 {
883    FILE *f = dd_get_file_stream(dd_screen(dctx->base.screen),
884                                 record->draw_state.base.apitrace_call_number);
885    if (!f)
886       return;
887 
888    fprintf(f, "Draw call sequence # = %u\n", record->sequence_no);
889    fprintf(f, "HW reached sequence # = %u\n", hw_sequence_no);
890    fprintf(f, "Elapsed time = %"PRIi64" ms\n\n",
891            (now - record->timestamp) / 1000);
892 
893    dd_dump_call(f, &record->draw_state.base, &record->call);
894    fprintf(f, "%s\n", record->driver_state_log);
895 
896    dctx->pipe->dump_debug_state(dctx->pipe, f,
897                                 PIPE_DUMP_DEVICE_STATUS_REGISTERS);
898    dd_dump_dmesg(f);
899    fclose(f);
900 }
901 
PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect,input)902 PIPE_THREAD_ROUTINE(dd_thread_pipelined_hang_detect, input)
903 {
904    struct dd_context *dctx = (struct dd_context *)input;
905    struct dd_screen *dscreen = dd_screen(dctx->base.screen);
906 
907    pipe_mutex_lock(dctx->mutex);
908 
909    while (!dctx->kill_thread) {
910       struct dd_draw_record **record = &dctx->records;
911 
912       /* Loop over all records. */
913       while (*record) {
914          int64_t now;
915 
916          /* If the fence has been signalled, release the record and all older
917           * records.
918           */
919          if (*dctx->mapped_fence >= (*record)->sequence_no) {
920             while (*record)
921                dd_free_record(record);
922             break;
923          }
924 
925          /* The fence hasn't been signalled. Check the timeout. */
926          now = os_time_get();
927          if (os_time_timeout((*record)->timestamp,
928                              (*record)->timestamp + dscreen->timeout_ms * 1000,
929                              now)) {
930             fprintf(stderr, "GPU hang detected.\n");
931 
932             /* Get the oldest unsignalled draw call. */
933             while ((*record)->next &&
934                    *dctx->mapped_fence < (*record)->next->sequence_no)
935                record = &(*record)->next;
936 
937             dd_dump_record(dctx, *record, *dctx->mapped_fence, now);
938             dd_kill_process();
939          }
940 
941          record = &(*record)->next;
942       }
943 
944       /* Unlock and sleep before starting all over again. */
945       pipe_mutex_unlock(dctx->mutex);
946       os_time_sleep(10000); /* 10 ms */
947       pipe_mutex_lock(dctx->mutex);
948    }
949 
950    /* Thread termination. */
951    while (dctx->records)
952       dd_free_record(&dctx->records);
953 
954    pipe_mutex_unlock(dctx->mutex);
955    return 0;
956 }
957 
958 static char *
dd_get_driver_shader_log(struct dd_context * dctx)959 dd_get_driver_shader_log(struct dd_context *dctx)
960 {
961 #if defined(PIPE_OS_LINUX)
962    FILE *f;
963    char *buf;
964    int written_bytes;
965 
966    if (!dctx->max_log_buffer_size)
967       dctx->max_log_buffer_size = 16 * 1024;
968 
969    /* Keep increasing the buffer size until there is enough space.
970     *
971     * open_memstream can resize automatically, but it's VERY SLOW.
972     * fmemopen is much faster.
973     */
974    while (1) {
975       buf = malloc(dctx->max_log_buffer_size);
976       buf[0] = 0;
977 
978       f = fmemopen(buf, dctx->max_log_buffer_size, "a");
979       if (!f) {
980          free(buf);
981          return NULL;
982       }
983 
984       dd_dump_driver_state(dctx, f, PIPE_DUMP_CURRENT_SHADERS);
985       written_bytes = ftell(f);
986       fclose(f);
987 
988       /* Return if the backing buffer is large enough. */
989       if (written_bytes < dctx->max_log_buffer_size - 1)
990          break;
991 
992       /* Try again. */
993       free(buf);
994       dctx->max_log_buffer_size *= 2;
995    }
996 
997    return buf;
998 #else
999    /* Return an empty string. */
1000    return (char*)calloc(1, 4);
1001 #endif
1002 }
1003 
1004 static void
dd_pipelined_process_draw(struct dd_context * dctx,struct dd_call * call)1005 dd_pipelined_process_draw(struct dd_context *dctx, struct dd_call *call)
1006 {
1007    struct pipe_context *pipe = dctx->pipe;
1008    struct dd_draw_record *record;
1009    char *log;
1010 
1011    /* Make a record of the draw call. */
1012    record = MALLOC_STRUCT(dd_draw_record);
1013    if (!record)
1014       return;
1015 
1016    /* Create the log. */
1017    log = dd_get_driver_shader_log(dctx);
1018    if (!log) {
1019       FREE(record);
1020       return;
1021    }
1022 
1023    /* Update the fence with the GPU.
1024     *
1025     * radeonsi/clear_buffer waits in the command processor until shaders are
1026     * idle before writing to memory. That's a necessary condition for isolating
1027     * draw calls.
1028     */
1029    dctx->sequence_no++;
1030    pipe->clear_buffer(pipe, dctx->fence, 0, 4, &dctx->sequence_no, 4);
1031 
1032    /* Initialize the record. */
1033    record->timestamp = os_time_get();
1034    record->sequence_no = dctx->sequence_no;
1035    record->driver_state_log = log;
1036 
1037    memset(&record->call, 0, sizeof(record->call));
1038    dd_copy_call(&record->call, call);
1039 
1040    dd_init_copy_of_draw_state(&record->draw_state);
1041    dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1042 
1043    /* Add the record to the list. */
1044    pipe_mutex_lock(dctx->mutex);
1045    record->next = dctx->records;
1046    dctx->records = record;
1047    pipe_mutex_unlock(dctx->mutex);
1048 }
1049 
1050 static void
dd_context_flush(struct pipe_context * _pipe,struct pipe_fence_handle ** fence,unsigned flags)1051 dd_context_flush(struct pipe_context *_pipe,
1052                  struct pipe_fence_handle **fence, unsigned flags)
1053 {
1054    struct dd_context *dctx = dd_context(_pipe);
1055    struct pipe_context *pipe = dctx->pipe;
1056 
1057    switch (dd_screen(dctx->base.screen)->mode) {
1058    case DD_DETECT_HANGS:
1059       dd_flush_and_handle_hang(dctx, fence, flags,
1060                                "GPU hang detected in pipe->flush()");
1061       break;
1062    case DD_DETECT_HANGS_PIPELINED: /* nothing to do here */
1063    case DD_DUMP_ALL_CALLS:
1064    case DD_DUMP_APITRACE_CALL:
1065       pipe->flush(pipe, fence, flags);
1066       break;
1067    default:
1068       assert(0);
1069    }
1070 }
1071 
1072 static void
dd_before_draw(struct dd_context * dctx)1073 dd_before_draw(struct dd_context *dctx)
1074 {
1075    struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1076 
1077    if (dscreen->mode == DD_DETECT_HANGS &&
1078        !dscreen->no_flush &&
1079        dctx->num_draw_calls >= dscreen->skip_count)
1080       dd_flush_and_handle_hang(dctx, NULL, 0,
1081                                "GPU hang most likely caused by internal "
1082                                "driver commands");
1083 }
1084 
1085 static void
dd_after_draw(struct dd_context * dctx,struct dd_call * call)1086 dd_after_draw(struct dd_context *dctx, struct dd_call *call)
1087 {
1088    struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1089    struct pipe_context *pipe = dctx->pipe;
1090 
1091    if (dctx->num_draw_calls >= dscreen->skip_count) {
1092       switch (dscreen->mode) {
1093       case DD_DETECT_HANGS:
1094          if (!dscreen->no_flush &&
1095             dd_flush_and_check_hang(dctx, NULL, 0)) {
1096             dd_write_report(dctx, call,
1097                          PIPE_DUMP_DEVICE_STATUS_REGISTERS |
1098                          PIPE_DUMP_CURRENT_STATES |
1099                          PIPE_DUMP_CURRENT_SHADERS |
1100                          PIPE_DUMP_LAST_COMMAND_BUFFER,
1101                          true);
1102 
1103             /* Terminate the process to prevent future hangs. */
1104             dd_kill_process();
1105          }
1106          break;
1107       case DD_DETECT_HANGS_PIPELINED:
1108          dd_pipelined_process_draw(dctx, call);
1109          break;
1110       case DD_DUMP_ALL_CALLS:
1111          if (!dscreen->no_flush)
1112             pipe->flush(pipe, NULL, 0);
1113          dd_write_report(dctx, call,
1114                          PIPE_DUMP_CURRENT_STATES |
1115                          PIPE_DUMP_CURRENT_SHADERS |
1116                          PIPE_DUMP_LAST_COMMAND_BUFFER,
1117                          false);
1118          break;
1119       case DD_DUMP_APITRACE_CALL:
1120          if (dscreen->apitrace_dump_call ==
1121              dctx->draw_state.apitrace_call_number) {
1122             dd_write_report(dctx, call,
1123                             PIPE_DUMP_CURRENT_STATES |
1124                             PIPE_DUMP_CURRENT_SHADERS,
1125                             false);
1126             /* No need to continue. */
1127             exit(0);
1128          }
1129          break;
1130       default:
1131          assert(0);
1132       }
1133    }
1134 
1135    ++dctx->num_draw_calls;
1136    if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1137       fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1138               dctx->num_draw_calls);
1139 }
1140 
1141 static void
dd_context_draw_vbo(struct pipe_context * _pipe,const struct pipe_draw_info * info)1142 dd_context_draw_vbo(struct pipe_context *_pipe,
1143                     const struct pipe_draw_info *info)
1144 {
1145    struct dd_context *dctx = dd_context(_pipe);
1146    struct pipe_context *pipe = dctx->pipe;
1147    struct dd_call call;
1148 
1149    call.type = CALL_DRAW_VBO;
1150    call.info.draw_vbo = *info;
1151 
1152    dd_before_draw(dctx);
1153    pipe->draw_vbo(pipe, info);
1154    dd_after_draw(dctx, &call);
1155 }
1156 
1157 static void
dd_context_launch_grid(struct pipe_context * _pipe,const struct pipe_grid_info * info)1158 dd_context_launch_grid(struct pipe_context *_pipe,
1159                        const struct pipe_grid_info *info)
1160 {
1161    struct dd_context *dctx = dd_context(_pipe);
1162    struct pipe_context *pipe = dctx->pipe;
1163    struct dd_call call;
1164 
1165    call.type = CALL_LAUNCH_GRID;
1166    call.info.launch_grid = *info;
1167 
1168    dd_before_draw(dctx);
1169    pipe->launch_grid(pipe, info);
1170    dd_after_draw(dctx, &call);
1171 }
1172 
1173 static void
dd_context_resource_copy_region(struct pipe_context * _pipe,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1174 dd_context_resource_copy_region(struct pipe_context *_pipe,
1175                                 struct pipe_resource *dst, unsigned dst_level,
1176                                 unsigned dstx, unsigned dsty, unsigned dstz,
1177                                 struct pipe_resource *src, unsigned src_level,
1178                                 const struct pipe_box *src_box)
1179 {
1180    struct dd_context *dctx = dd_context(_pipe);
1181    struct pipe_context *pipe = dctx->pipe;
1182    struct dd_call call;
1183 
1184    call.type = CALL_RESOURCE_COPY_REGION;
1185    call.info.resource_copy_region.dst = dst;
1186    call.info.resource_copy_region.dst_level = dst_level;
1187    call.info.resource_copy_region.dstx = dstx;
1188    call.info.resource_copy_region.dsty = dsty;
1189    call.info.resource_copy_region.dstz = dstz;
1190    call.info.resource_copy_region.src = src;
1191    call.info.resource_copy_region.src_level = src_level;
1192    call.info.resource_copy_region.src_box = *src_box;
1193 
1194    dd_before_draw(dctx);
1195    pipe->resource_copy_region(pipe,
1196                               dst, dst_level, dstx, dsty, dstz,
1197                               src, src_level, src_box);
1198    dd_after_draw(dctx, &call);
1199 }
1200 
1201 static void
dd_context_blit(struct pipe_context * _pipe,const struct pipe_blit_info * info)1202 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1203 {
1204    struct dd_context *dctx = dd_context(_pipe);
1205    struct pipe_context *pipe = dctx->pipe;
1206    struct dd_call call;
1207 
1208    call.type = CALL_BLIT;
1209    call.info.blit = *info;
1210 
1211    dd_before_draw(dctx);
1212    pipe->blit(pipe, info);
1213    dd_after_draw(dctx, &call);
1214 }
1215 
1216 static boolean
dd_context_generate_mipmap(struct pipe_context * _pipe,struct pipe_resource * res,enum pipe_format format,unsigned base_level,unsigned last_level,unsigned first_layer,unsigned last_layer)1217 dd_context_generate_mipmap(struct pipe_context *_pipe,
1218                            struct pipe_resource *res,
1219                            enum pipe_format format,
1220                            unsigned base_level,
1221                            unsigned last_level,
1222                            unsigned first_layer,
1223                            unsigned last_layer)
1224 {
1225    struct dd_context *dctx = dd_context(_pipe);
1226    struct pipe_context *pipe = dctx->pipe;
1227    struct dd_call call;
1228    boolean result;
1229 
1230    call.type = CALL_GENERATE_MIPMAP;
1231    call.info.generate_mipmap.res = res;
1232    call.info.generate_mipmap.format = format;
1233    call.info.generate_mipmap.base_level = base_level;
1234    call.info.generate_mipmap.last_level = last_level;
1235    call.info.generate_mipmap.first_layer = first_layer;
1236    call.info.generate_mipmap.last_layer = last_layer;
1237 
1238    dd_before_draw(dctx);
1239    result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1240                                   first_layer, last_layer);
1241    dd_after_draw(dctx, &call);
1242    return result;
1243 }
1244 
1245 static void
dd_context_flush_resource(struct pipe_context * _pipe,struct pipe_resource * resource)1246 dd_context_flush_resource(struct pipe_context *_pipe,
1247                           struct pipe_resource *resource)
1248 {
1249    struct dd_context *dctx = dd_context(_pipe);
1250    struct pipe_context *pipe = dctx->pipe;
1251    struct dd_call call;
1252 
1253    call.type = CALL_FLUSH_RESOURCE;
1254    call.info.flush_resource = resource;
1255 
1256    dd_before_draw(dctx);
1257    pipe->flush_resource(pipe, resource);
1258    dd_after_draw(dctx, &call);
1259 }
1260 
1261 static void
dd_context_clear(struct pipe_context * _pipe,unsigned buffers,const union pipe_color_union * color,double depth,unsigned stencil)1262 dd_context_clear(struct pipe_context *_pipe, unsigned buffers,
1263                  const union pipe_color_union *color, double depth,
1264                  unsigned stencil)
1265 {
1266    struct dd_context *dctx = dd_context(_pipe);
1267    struct pipe_context *pipe = dctx->pipe;
1268    struct dd_call call;
1269 
1270    call.type = CALL_CLEAR;
1271    call.info.clear.buffers = buffers;
1272    call.info.clear.color = *color;
1273    call.info.clear.depth = depth;
1274    call.info.clear.stencil = stencil;
1275 
1276    dd_before_draw(dctx);
1277    pipe->clear(pipe, buffers, color, depth, stencil);
1278    dd_after_draw(dctx, &call);
1279 }
1280 
1281 static void
dd_context_clear_render_target(struct pipe_context * _pipe,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1282 dd_context_clear_render_target(struct pipe_context *_pipe,
1283                                struct pipe_surface *dst,
1284                                const union pipe_color_union *color,
1285                                unsigned dstx, unsigned dsty,
1286                                unsigned width, unsigned height,
1287                                bool render_condition_enabled)
1288 {
1289    struct dd_context *dctx = dd_context(_pipe);
1290    struct pipe_context *pipe = dctx->pipe;
1291    struct dd_call call;
1292 
1293    call.type = CALL_CLEAR_RENDER_TARGET;
1294 
1295    dd_before_draw(dctx);
1296    pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1297                              render_condition_enabled);
1298    dd_after_draw(dctx, &call);
1299 }
1300 
1301 static void
dd_context_clear_depth_stencil(struct pipe_context * _pipe,struct pipe_surface * dst,unsigned clear_flags,double depth,unsigned stencil,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1302 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1303                                struct pipe_surface *dst, unsigned clear_flags,
1304                                double depth, unsigned stencil, unsigned dstx,
1305                                unsigned dsty, unsigned width, unsigned height,
1306                                bool render_condition_enabled)
1307 {
1308    struct dd_context *dctx = dd_context(_pipe);
1309    struct pipe_context *pipe = dctx->pipe;
1310    struct dd_call call;
1311 
1312    call.type = CALL_CLEAR_DEPTH_STENCIL;
1313 
1314    dd_before_draw(dctx);
1315    pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1316                              dstx, dsty, width, height,
1317                              render_condition_enabled);
1318    dd_after_draw(dctx, &call);
1319 }
1320 
1321 static void
dd_context_clear_buffer(struct pipe_context * _pipe,struct pipe_resource * res,unsigned offset,unsigned size,const void * clear_value,int clear_value_size)1322 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1323                         unsigned offset, unsigned size,
1324                         const void *clear_value, int clear_value_size)
1325 {
1326    struct dd_context *dctx = dd_context(_pipe);
1327    struct pipe_context *pipe = dctx->pipe;
1328    struct dd_call call;
1329 
1330    call.type = CALL_CLEAR_BUFFER;
1331    call.info.clear_buffer.res = res;
1332    call.info.clear_buffer.offset = offset;
1333    call.info.clear_buffer.size = size;
1334    call.info.clear_buffer.clear_value = clear_value;
1335    call.info.clear_buffer.clear_value_size = clear_value_size;
1336 
1337    dd_before_draw(dctx);
1338    pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1339    dd_after_draw(dctx, &call);
1340 }
1341 
1342 void
dd_init_draw_functions(struct dd_context * dctx)1343 dd_init_draw_functions(struct dd_context *dctx)
1344 {
1345    CTX_INIT(flush);
1346    CTX_INIT(draw_vbo);
1347    CTX_INIT(launch_grid);
1348    CTX_INIT(resource_copy_region);
1349    CTX_INIT(blit);
1350    CTX_INIT(clear);
1351    CTX_INIT(clear_render_target);
1352    CTX_INIT(clear_depth_stencil);
1353    CTX_INIT(clear_buffer);
1354    CTX_INIT(flush_resource);
1355    CTX_INIT(generate_mipmap);
1356 }
1357