1 /**************************************************************************
2 *
3 * Copyright 2015 Advanced Micro Devices, Inc.
4 * Copyright 2008 VMware, Inc.
5 * All Rights Reserved.
6 *
7 * Permission is hereby granted, free of charge, to any person obtaining a
8 * copy of this software and associated documentation files (the "Software"),
9 * to deal in the Software without restriction, including without limitation
10 * on the rights to use, copy, modify, merge, publish, distribute, sub
11 * license, and/or sell copies of the Software, and to permit persons to whom
12 * the Software is furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice (including the next
15 * paragraph) shall be included in all copies or substantial portions of the
16 * Software.
17 *
18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21 * THE AUTHOR(S) AND/OR THEIR SUPPLIERS BE LIABLE FOR ANY CLAIM,
22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
25 *
26 **************************************************************************/
27
28 #include "dd_pipe.h"
29
30 #include "util/u_dump.h"
31 #include "util/format/u_format.h"
32 #include "util/u_framebuffer.h"
33 #include "util/u_helpers.h"
34 #include "util/u_inlines.h"
35 #include "util/u_memory.h"
36 #include "util/u_process.h"
37 #include "tgsi/tgsi_parse.h"
38 #include "tgsi/tgsi_scan.h"
39 #include "util/os_time.h"
40 #include <inttypes.h>
41 #include "util/detect.h"
42
43 void
dd_get_debug_filename_and_mkdir(char * buf,size_t buflen,bool verbose)44 dd_get_debug_filename_and_mkdir(char *buf, size_t buflen, bool verbose)
45 {
46 static unsigned index;
47 char dir[256];
48 const char *proc_name = util_get_process_name();
49
50 if (!proc_name) {
51 fprintf(stderr, "dd: can't get the process name\n");
52 proc_name = "unknown";
53 }
54
55 snprintf(dir, sizeof(dir), "%s/"DD_DIR, debug_get_option("HOME", "."));
56
57 if (mkdir(dir, 0774) && errno != EEXIST)
58 fprintf(stderr, "dd: can't create a directory (%i)\n", errno);
59
60 snprintf(buf, buflen, "%s/%s_%u_%08u", dir, proc_name, (unsigned int)getpid(),
61 (unsigned int)p_atomic_inc_return(&index) - 1);
62
63 if (verbose)
64 fprintf(stderr, "dd: dumping to file %s\n", buf);
65 }
66
67 FILE *
dd_get_debug_file(bool verbose)68 dd_get_debug_file(bool verbose)
69 {
70 char name[512];
71 FILE *f;
72
73 dd_get_debug_filename_and_mkdir(name, sizeof(name), verbose);
74 f = fopen(name, "w");
75 if (!f) {
76 fprintf(stderr, "dd: can't open file %s\n", name);
77 return NULL;
78 }
79
80 return f;
81 }
82
83 void
dd_parse_apitrace_marker(const char * string,int len,unsigned * call_number)84 dd_parse_apitrace_marker(const char *string, int len, unsigned *call_number)
85 {
86 unsigned num;
87 char *s;
88
89 if (len <= 0)
90 return;
91
92 /* Make it zero-terminated. */
93 s = alloca(len + 1);
94 memcpy(s, string, len);
95 s[len] = 0;
96
97 /* Parse the number. */
98 errno = 0;
99 num = strtol(s, NULL, 10);
100 if (errno)
101 return;
102
103 *call_number = num;
104 }
105
106 void
dd_write_header(FILE * f,struct pipe_screen * screen,unsigned apitrace_call_number)107 dd_write_header(FILE *f, struct pipe_screen *screen, unsigned apitrace_call_number)
108 {
109 char cmd_line[4096];
110 if (util_get_command_line(cmd_line, sizeof(cmd_line)))
111 fprintf(f, "Command: %s\n", cmd_line);
112 fprintf(f, "Driver vendor: %s\n", screen->get_vendor(screen));
113 fprintf(f, "Device vendor: %s\n", screen->get_device_vendor(screen));
114 fprintf(f, "Device name: %s\n\n", screen->get_name(screen));
115
116 if (apitrace_call_number)
117 fprintf(f, "Last apitrace call: %u\n\n", apitrace_call_number);
118 }
119
120 FILE *
dd_get_file_stream(struct dd_screen * dscreen,unsigned apitrace_call_number)121 dd_get_file_stream(struct dd_screen *dscreen, unsigned apitrace_call_number)
122 {
123 struct pipe_screen *screen = dscreen->screen;
124
125 FILE *f = dd_get_debug_file(dscreen->verbose);
126 if (!f)
127 return NULL;
128
129 dd_write_header(f, screen, apitrace_call_number);
130 return f;
131 }
132
133 static void
dd_dump_dmesg(FILE * f)134 dd_dump_dmesg(FILE *f)
135 {
136 #if DETECT_OS_LINUX
137 char line[2000];
138 FILE *p = popen("dmesg | tail -n60", "r");
139
140 if (!p)
141 return;
142
143 fprintf(f, "\nLast 60 lines of dmesg:\n\n");
144 while (fgets(line, sizeof(line), p))
145 fputs(line, f);
146
147 pclose(p);
148 #endif
149 }
150
151 static unsigned
dd_num_active_viewports(struct dd_draw_state * dstate)152 dd_num_active_viewports(struct dd_draw_state *dstate)
153 {
154 struct tgsi_shader_info info;
155 const struct tgsi_token *tokens;
156
157 if (dstate->shaders[PIPE_SHADER_GEOMETRY])
158 tokens = dstate->shaders[PIPE_SHADER_GEOMETRY]->state.shader.tokens;
159 else if (dstate->shaders[PIPE_SHADER_TESS_EVAL])
160 tokens = dstate->shaders[PIPE_SHADER_TESS_EVAL]->state.shader.tokens;
161 else if (dstate->shaders[PIPE_SHADER_VERTEX])
162 tokens = dstate->shaders[PIPE_SHADER_VERTEX]->state.shader.tokens;
163 else
164 return 1;
165
166 if (tokens) {
167 tgsi_scan_shader(tokens, &info);
168 if (info.writes_viewport_index)
169 return PIPE_MAX_VIEWPORTS;
170 }
171
172 return 1;
173 }
174
175 #define COLOR_RESET "\033[0m"
176 #define COLOR_SHADER "\033[1;32m"
177 #define COLOR_STATE "\033[1;33m"
178
179 #define DUMP(name, var) do { \
180 fprintf(f, COLOR_STATE #name ": " COLOR_RESET); \
181 util_dump_##name(f, var); \
182 fprintf(f, "\n"); \
183 } while(0)
184
185 #define DUMP_I(name, var, i) do { \
186 fprintf(f, COLOR_STATE #name " %i: " COLOR_RESET, i); \
187 util_dump_##name(f, var); \
188 fprintf(f, "\n"); \
189 } while(0)
190
191 #define DUMP_M(name, var, member) do { \
192 fprintf(f, " " #member ": "); \
193 util_dump_##name(f, (var)->member); \
194 fprintf(f, "\n"); \
195 } while(0)
196
197 #define DUMP_M_ADDR(name, var, member) do { \
198 fprintf(f, " " #member ": "); \
199 util_dump_##name(f, &(var)->member); \
200 fprintf(f, "\n"); \
201 } while(0)
202
203 #define PRINT_NAMED(type, name, value) \
204 do { \
205 fprintf(f, COLOR_STATE "%s" COLOR_RESET " = ", name); \
206 util_dump_##type(f, value); \
207 fprintf(f, "\n"); \
208 } while (0)
209
210 static void
util_dump_uint(FILE * f,unsigned i)211 util_dump_uint(FILE *f, unsigned i)
212 {
213 fprintf(f, "%u", i);
214 }
215
216 static void
util_dump_int(FILE * f,int i)217 util_dump_int(FILE *f, int i)
218 {
219 fprintf(f, "%d", i);
220 }
221
222 static void
util_dump_hex(FILE * f,unsigned i)223 util_dump_hex(FILE *f, unsigned i)
224 {
225 fprintf(f, "0x%x", i);
226 }
227
228 static void
util_dump_double(FILE * f,double d)229 util_dump_double(FILE *f, double d)
230 {
231 fprintf(f, "%f", d);
232 }
233
234 static void
util_dump_format(FILE * f,enum pipe_format format)235 util_dump_format(FILE *f, enum pipe_format format)
236 {
237 fprintf(f, "%s", util_format_name(format));
238 }
239
240 static void
util_dump_color_union(FILE * f,const union pipe_color_union * color)241 util_dump_color_union(FILE *f, const union pipe_color_union *color)
242 {
243 fprintf(f, "{f = {%f, %f, %f, %f}, ui = {%u, %u, %u, %u}",
244 color->f[0], color->f[1], color->f[2], color->f[3],
245 color->ui[0], color->ui[1], color->ui[2], color->ui[3]);
246 }
247
248 static void
dd_dump_render_condition(struct dd_draw_state * dstate,FILE * f)249 dd_dump_render_condition(struct dd_draw_state *dstate, FILE *f)
250 {
251 if (dstate->render_cond.query) {
252 fprintf(f, "render condition:\n");
253 DUMP_M(query_type, &dstate->render_cond, query->type);
254 DUMP_M(uint, &dstate->render_cond, condition);
255 DUMP_M(uint, &dstate->render_cond, mode);
256 fprintf(f, "\n");
257 }
258 }
259
260 static void
dd_dump_shader(struct dd_draw_state * dstate,enum pipe_shader_type sh,FILE * f)261 dd_dump_shader(struct dd_draw_state *dstate, enum pipe_shader_type sh, FILE *f)
262 {
263 int i;
264 const char *shader_str[PIPE_SHADER_TYPES];
265
266 shader_str[PIPE_SHADER_VERTEX] = "VERTEX";
267 shader_str[PIPE_SHADER_TESS_CTRL] = "TESS_CTRL";
268 shader_str[PIPE_SHADER_TESS_EVAL] = "TESS_EVAL";
269 shader_str[PIPE_SHADER_GEOMETRY] = "GEOMETRY";
270 shader_str[PIPE_SHADER_FRAGMENT] = "FRAGMENT";
271 shader_str[PIPE_SHADER_COMPUTE] = "COMPUTE";
272
273 if (sh == PIPE_SHADER_TESS_CTRL &&
274 !dstate->shaders[PIPE_SHADER_TESS_CTRL] &&
275 dstate->shaders[PIPE_SHADER_TESS_EVAL])
276 fprintf(f, "tess_state: {default_outer_level = {%f, %f, %f, %f}, "
277 "default_inner_level = {%f, %f}}\n",
278 dstate->tess_default_levels[0],
279 dstate->tess_default_levels[1],
280 dstate->tess_default_levels[2],
281 dstate->tess_default_levels[3],
282 dstate->tess_default_levels[4],
283 dstate->tess_default_levels[5]);
284
285 if (sh == PIPE_SHADER_FRAGMENT)
286 if (dstate->rs) {
287 unsigned num_viewports = dd_num_active_viewports(dstate);
288
289 if (dstate->rs->state.rs.clip_plane_enable)
290 DUMP(clip_state, &dstate->clip_state);
291
292 for (i = 0; i < num_viewports; i++)
293 DUMP_I(viewport_state, &dstate->viewports[i], i);
294
295 if (dstate->rs->state.rs.scissor)
296 for (i = 0; i < num_viewports; i++)
297 DUMP_I(scissor_state, &dstate->scissors[i], i);
298
299 DUMP(rasterizer_state, &dstate->rs->state.rs);
300
301 if (dstate->rs->state.rs.poly_stipple_enable)
302 DUMP(poly_stipple, &dstate->polygon_stipple);
303 fprintf(f, "\n");
304 }
305
306 if (!dstate->shaders[sh])
307 return;
308
309 fprintf(f, COLOR_SHADER "begin shader: %s" COLOR_RESET "\n", shader_str[sh]);
310 DUMP(shader_state, &dstate->shaders[sh]->state.shader);
311
312 for (i = 0; i < PIPE_MAX_CONSTANT_BUFFERS; i++)
313 if (dstate->constant_buffers[sh][i].buffer ||
314 dstate->constant_buffers[sh][i].user_buffer) {
315 DUMP_I(constant_buffer, &dstate->constant_buffers[sh][i], i);
316 if (dstate->constant_buffers[sh][i].buffer)
317 DUMP_M(resource, &dstate->constant_buffers[sh][i], buffer);
318 }
319
320 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
321 if (dstate->sampler_states[sh][i])
322 DUMP_I(sampler_state, &dstate->sampler_states[sh][i]->state.sampler, i);
323
324 for (i = 0; i < PIPE_MAX_SAMPLERS; i++)
325 if (dstate->sampler_views[sh][i]) {
326 DUMP_I(sampler_view, dstate->sampler_views[sh][i], i);
327 DUMP_M(resource, dstate->sampler_views[sh][i], texture);
328 }
329
330 for (i = 0; i < PIPE_MAX_SHADER_IMAGES; i++)
331 if (dstate->shader_images[sh][i].resource) {
332 DUMP_I(image_view, &dstate->shader_images[sh][i], i);
333 if (dstate->shader_images[sh][i].resource)
334 DUMP_M(resource, &dstate->shader_images[sh][i], resource);
335 }
336
337 for (i = 0; i < PIPE_MAX_SHADER_BUFFERS; i++)
338 if (dstate->shader_buffers[sh][i].buffer) {
339 DUMP_I(shader_buffer, &dstate->shader_buffers[sh][i], i);
340 if (dstate->shader_buffers[sh][i].buffer)
341 DUMP_M(resource, &dstate->shader_buffers[sh][i], buffer);
342 }
343
344 fprintf(f, COLOR_SHADER "end shader: %s" COLOR_RESET "\n\n", shader_str[sh]);
345 }
346
347 static void
dd_dump_flush(struct dd_draw_state * dstate,struct call_flush * info,FILE * f)348 dd_dump_flush(struct dd_draw_state *dstate, struct call_flush *info, FILE *f)
349 {
350 fprintf(f, "%s:\n", __func__+8);
351 DUMP_M(hex, info, flags);
352 }
353
354 static void
dd_dump_draw_vbo(struct dd_draw_state * dstate,struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draw,FILE * f)355 dd_dump_draw_vbo(struct dd_draw_state *dstate, struct pipe_draw_info *info,
356 unsigned drawid_offset,
357 const struct pipe_draw_indirect_info *indirect,
358 const struct pipe_draw_start_count_bias *draw, FILE *f)
359 {
360 int sh, i;
361
362 DUMP(draw_info, info);
363 PRINT_NAMED(int, "drawid offset", drawid_offset);
364 DUMP(draw_start_count_bias, draw);
365 if (indirect) {
366 if (indirect->buffer)
367 DUMP_M(resource, indirect, buffer);
368 if (indirect->indirect_draw_count)
369 DUMP_M(resource, indirect, indirect_draw_count);
370 if (indirect->count_from_stream_output)
371 DUMP_M(stream_output_target, indirect, count_from_stream_output);
372 }
373
374 fprintf(f, "\n");
375
376 /* TODO: dump active queries */
377
378 dd_dump_render_condition(dstate, f);
379
380 for (i = 0; i < PIPE_MAX_ATTRIBS; i++)
381 if (dstate->vertex_buffers[i].buffer.resource) {
382 DUMP_I(vertex_buffer, &dstate->vertex_buffers[i], i);
383 if (!dstate->vertex_buffers[i].is_user_buffer)
384 DUMP_M(resource, &dstate->vertex_buffers[i], buffer.resource);
385 }
386
387 if (dstate->velems) {
388 PRINT_NAMED(uint, "num vertex elements",
389 dstate->velems->state.velems.count);
390 for (i = 0; i < dstate->velems->state.velems.count; i++) {
391 fprintf(f, " ");
392 DUMP_I(vertex_element, &dstate->velems->state.velems.velems[i], i);
393 }
394 }
395
396 PRINT_NAMED(uint, "num stream output targets", dstate->num_so_targets);
397 for (i = 0; i < dstate->num_so_targets; i++)
398 if (dstate->so_targets[i]) {
399 DUMP_I(stream_output_target, dstate->so_targets[i], i);
400 DUMP_M(resource, dstate->so_targets[i], buffer);
401 fprintf(f, " offset = %i\n", dstate->so_offsets[i]);
402 }
403
404 fprintf(f, "\n");
405 for (sh = 0; sh < PIPE_SHADER_TYPES; sh++) {
406 if (sh == PIPE_SHADER_COMPUTE)
407 continue;
408
409 dd_dump_shader(dstate, sh, f);
410 }
411
412 if (dstate->dsa)
413 DUMP(depth_stencil_alpha_state, &dstate->dsa->state.dsa);
414 DUMP(stencil_ref, &dstate->stencil_ref);
415
416 if (dstate->blend)
417 DUMP(blend_state, &dstate->blend->state.blend);
418 DUMP(blend_color, &dstate->blend_color);
419
420 PRINT_NAMED(uint, "min_samples", dstate->min_samples);
421 PRINT_NAMED(hex, "sample_mask", dstate->sample_mask);
422 fprintf(f, "\n");
423
424 DUMP(framebuffer_state, &dstate->framebuffer_state);
425 for (i = 0; i < dstate->framebuffer_state.nr_cbufs; i++)
426 if (dstate->framebuffer_state.cbufs[i]) {
427 fprintf(f, " " COLOR_STATE "cbufs[%i]:" COLOR_RESET "\n ", i);
428 DUMP(surface, dstate->framebuffer_state.cbufs[i]);
429 fprintf(f, " ");
430 DUMP(resource, dstate->framebuffer_state.cbufs[i]->texture);
431 }
432 if (dstate->framebuffer_state.zsbuf) {
433 fprintf(f, " " COLOR_STATE "zsbuf:" COLOR_RESET "\n ");
434 DUMP(surface, dstate->framebuffer_state.zsbuf);
435 fprintf(f, " ");
436 DUMP(resource, dstate->framebuffer_state.zsbuf->texture);
437 }
438 fprintf(f, "\n");
439 }
440
441 static void
dd_dump_launch_grid(struct dd_draw_state * dstate,struct pipe_grid_info * info,FILE * f)442 dd_dump_launch_grid(struct dd_draw_state *dstate, struct pipe_grid_info *info, FILE *f)
443 {
444 fprintf(f, "%s:\n", __func__+8);
445 DUMP(grid_info, info);
446 fprintf(f, "\n");
447
448 dd_dump_shader(dstate, PIPE_SHADER_COMPUTE, f);
449 fprintf(f, "\n");
450 }
451
452 static void
dd_dump_resource_copy_region(struct dd_draw_state * dstate,struct call_resource_copy_region * info,FILE * f)453 dd_dump_resource_copy_region(struct dd_draw_state *dstate,
454 struct call_resource_copy_region *info,
455 FILE *f)
456 {
457 fprintf(f, "%s:\n", __func__+8);
458 DUMP_M(resource, info, dst);
459 DUMP_M(uint, info, dst_level);
460 DUMP_M(uint, info, dstx);
461 DUMP_M(uint, info, dsty);
462 DUMP_M(uint, info, dstz);
463 DUMP_M(resource, info, src);
464 DUMP_M(uint, info, src_level);
465 DUMP_M_ADDR(box, info, src_box);
466 }
467
468 static void
dd_dump_blit(struct dd_draw_state * dstate,struct pipe_blit_info * info,FILE * f)469 dd_dump_blit(struct dd_draw_state *dstate, struct pipe_blit_info *info, FILE *f)
470 {
471 fprintf(f, "%s:\n", __func__+8);
472 DUMP_M(resource, info, dst.resource);
473 DUMP_M(uint, info, dst.level);
474 DUMP_M_ADDR(box, info, dst.box);
475 DUMP_M(format, info, dst.format);
476
477 DUMP_M(resource, info, src.resource);
478 DUMP_M(uint, info, src.level);
479 DUMP_M_ADDR(box, info, src.box);
480 DUMP_M(format, info, src.format);
481
482 DUMP_M(hex, info, mask);
483 DUMP_M(uint, info, filter);
484 DUMP_M(uint, info, scissor_enable);
485 DUMP_M_ADDR(scissor_state, info, scissor);
486 DUMP_M(uint, info, render_condition_enable);
487
488 if (info->render_condition_enable)
489 dd_dump_render_condition(dstate, f);
490 }
491
492 static void
dd_dump_generate_mipmap(struct dd_draw_state * dstate,FILE * f)493 dd_dump_generate_mipmap(struct dd_draw_state *dstate, FILE *f)
494 {
495 fprintf(f, "%s:\n", __func__+8);
496 /* TODO */
497 }
498
499 static void
dd_dump_get_query_result_resource(struct call_get_query_result_resource * info,FILE * f)500 dd_dump_get_query_result_resource(struct call_get_query_result_resource *info, FILE *f)
501 {
502 fprintf(f, "%s:\n", __func__ + 8);
503 DUMP_M(query_type, info, query_type);
504 DUMP_M(query_flags, info, flags);
505 DUMP_M(query_value_type, info, result_type);
506 DUMP_M(int, info, index);
507 DUMP_M(resource, info, resource);
508 DUMP_M(uint, info, offset);
509 }
510
511 static void
dd_dump_flush_resource(struct dd_draw_state * dstate,struct pipe_resource * res,FILE * f)512 dd_dump_flush_resource(struct dd_draw_state *dstate, struct pipe_resource *res,
513 FILE *f)
514 {
515 fprintf(f, "%s:\n", __func__+8);
516 DUMP(resource, res);
517 }
518
519 static void
dd_dump_clear(struct dd_draw_state * dstate,struct call_clear * info,FILE * f)520 dd_dump_clear(struct dd_draw_state *dstate, struct call_clear *info, FILE *f)
521 {
522 fprintf(f, "%s:\n", __func__+8);
523 DUMP_M(uint, info, buffers);
524 fprintf(f, " scissor_state: %d,%d %d,%d\n",
525 info->scissor_state.minx, info->scissor_state.miny,
526 info->scissor_state.maxx, info->scissor_state.maxy);
527 DUMP_M_ADDR(color_union, info, color);
528 DUMP_M(double, info, depth);
529 DUMP_M(hex, info, stencil);
530 }
531
532 static void
dd_dump_clear_buffer(struct dd_draw_state * dstate,struct call_clear_buffer * info,FILE * f)533 dd_dump_clear_buffer(struct dd_draw_state *dstate, struct call_clear_buffer *info,
534 FILE *f)
535 {
536 int i;
537 const char *value = (const char*)info->clear_value;
538
539 fprintf(f, "%s:\n", __func__+8);
540 DUMP_M(resource, info, res);
541 DUMP_M(uint, info, offset);
542 DUMP_M(uint, info, size);
543 DUMP_M(uint, info, clear_value_size);
544
545 fprintf(f, " clear_value:");
546 for (i = 0; i < info->clear_value_size; i++)
547 fprintf(f, " %02x", value[i]);
548 fprintf(f, "\n");
549 }
550
551 static void
dd_dump_transfer_map(struct call_transfer_map * info,FILE * f)552 dd_dump_transfer_map(struct call_transfer_map *info, FILE *f)
553 {
554 fprintf(f, "%s:\n", __func__+8);
555 DUMP_M_ADDR(transfer, info, transfer);
556 DUMP_M(ptr, info, transfer_ptr);
557 DUMP_M(ptr, info, ptr);
558 }
559
560 static void
dd_dump_transfer_flush_region(struct call_transfer_flush_region * info,FILE * f)561 dd_dump_transfer_flush_region(struct call_transfer_flush_region *info, FILE *f)
562 {
563 fprintf(f, "%s:\n", __func__+8);
564 DUMP_M_ADDR(transfer, info, transfer);
565 DUMP_M(ptr, info, transfer_ptr);
566 DUMP_M_ADDR(box, info, box);
567 }
568
569 static void
dd_dump_transfer_unmap(struct call_transfer_unmap * info,FILE * f)570 dd_dump_transfer_unmap(struct call_transfer_unmap *info, FILE *f)
571 {
572 fprintf(f, "%s:\n", __func__+8);
573 DUMP_M_ADDR(transfer, info, transfer);
574 DUMP_M(ptr, info, transfer_ptr);
575 }
576
577 static void
dd_dump_buffer_subdata(struct call_buffer_subdata * info,FILE * f)578 dd_dump_buffer_subdata(struct call_buffer_subdata *info, FILE *f)
579 {
580 fprintf(f, "%s:\n", __func__+8);
581 DUMP_M(resource, info, resource);
582 DUMP_M(transfer_usage, info, usage);
583 DUMP_M(uint, info, offset);
584 DUMP_M(uint, info, size);
585 DUMP_M(ptr, info, data);
586 }
587
588 static void
dd_dump_texture_subdata(struct call_texture_subdata * info,FILE * f)589 dd_dump_texture_subdata(struct call_texture_subdata *info, FILE *f)
590 {
591 fprintf(f, "%s:\n", __func__+8);
592 DUMP_M(resource, info, resource);
593 DUMP_M(uint, info, level);
594 DUMP_M(transfer_usage, info, usage);
595 DUMP_M_ADDR(box, info, box);
596 DUMP_M(ptr, info, data);
597 DUMP_M(uint, info, stride);
598 DUMP_M(uint, info, layer_stride);
599 }
600
601 static void
dd_dump_clear_texture(struct dd_draw_state * dstate,FILE * f)602 dd_dump_clear_texture(struct dd_draw_state *dstate, FILE *f)
603 {
604 fprintf(f, "%s:\n", __func__+8);
605 /* TODO */
606 }
607
608 static void
dd_dump_clear_render_target(struct dd_draw_state * dstate,FILE * f)609 dd_dump_clear_render_target(struct dd_draw_state *dstate, FILE *f)
610 {
611 fprintf(f, "%s:\n", __func__+8);
612 /* TODO */
613 }
614
615 static void
dd_dump_clear_depth_stencil(struct dd_draw_state * dstate,FILE * f)616 dd_dump_clear_depth_stencil(struct dd_draw_state *dstate, FILE *f)
617 {
618 fprintf(f, "%s:\n", __func__+8);
619 /* TODO */
620 }
621
622 static void
dd_dump_driver_state(struct dd_context * dctx,FILE * f,unsigned flags)623 dd_dump_driver_state(struct dd_context *dctx, FILE *f, unsigned flags)
624 {
625 if (dctx->pipe->dump_debug_state) {
626 fprintf(f,"\n\n**************************************************"
627 "***************************\n");
628 fprintf(f, "Driver-specific state:\n\n");
629 dctx->pipe->dump_debug_state(dctx->pipe, f, flags);
630 }
631 }
632
633 static void
dd_dump_call(FILE * f,struct dd_draw_state * state,struct dd_call * call)634 dd_dump_call(FILE *f, struct dd_draw_state *state, struct dd_call *call)
635 {
636 switch (call->type) {
637 case CALL_FLUSH:
638 dd_dump_flush(state, &call->info.flush, f);
639 break;
640 case CALL_DRAW_VBO:
641 dd_dump_draw_vbo(state, &call->info.draw_vbo.info,
642 call->info.draw_vbo.drawid_offset,
643 &call->info.draw_vbo.indirect,
644 &call->info.draw_vbo.draw, f);
645 break;
646 case CALL_LAUNCH_GRID:
647 dd_dump_launch_grid(state, &call->info.launch_grid, f);
648 break;
649 case CALL_RESOURCE_COPY_REGION:
650 dd_dump_resource_copy_region(state,
651 &call->info.resource_copy_region, f);
652 break;
653 case CALL_BLIT:
654 dd_dump_blit(state, &call->info.blit, f);
655 break;
656 case CALL_FLUSH_RESOURCE:
657 dd_dump_flush_resource(state, call->info.flush_resource, f);
658 break;
659 case CALL_CLEAR:
660 dd_dump_clear(state, &call->info.clear, f);
661 break;
662 case CALL_CLEAR_BUFFER:
663 dd_dump_clear_buffer(state, &call->info.clear_buffer, f);
664 break;
665 case CALL_CLEAR_TEXTURE:
666 dd_dump_clear_texture(state, f);
667 break;
668 case CALL_CLEAR_RENDER_TARGET:
669 dd_dump_clear_render_target(state, f);
670 break;
671 case CALL_CLEAR_DEPTH_STENCIL:
672 dd_dump_clear_depth_stencil(state, f);
673 break;
674 case CALL_GENERATE_MIPMAP:
675 dd_dump_generate_mipmap(state, f);
676 break;
677 case CALL_GET_QUERY_RESULT_RESOURCE:
678 dd_dump_get_query_result_resource(&call->info.get_query_result_resource, f);
679 break;
680 case CALL_TRANSFER_MAP:
681 dd_dump_transfer_map(&call->info.transfer_map, f);
682 break;
683 case CALL_TRANSFER_FLUSH_REGION:
684 dd_dump_transfer_flush_region(&call->info.transfer_flush_region, f);
685 break;
686 case CALL_TRANSFER_UNMAP:
687 dd_dump_transfer_unmap(&call->info.transfer_unmap, f);
688 break;
689 case CALL_BUFFER_SUBDATA:
690 dd_dump_buffer_subdata(&call->info.buffer_subdata, f);
691 break;
692 case CALL_TEXTURE_SUBDATA:
693 dd_dump_texture_subdata(&call->info.texture_subdata, f);
694 break;
695 }
696 }
697
698 static void
dd_kill_process(void)699 dd_kill_process(void)
700 {
701 #if DETECT_OS_POSIX
702 sync();
703 #endif
704 fprintf(stderr, "dd: Aborting the process...\n");
705 fflush(stdout);
706 fflush(stderr);
707 exit(1);
708 }
709
710 static void
dd_unreference_copy_of_call(struct dd_call * dst)711 dd_unreference_copy_of_call(struct dd_call *dst)
712 {
713 switch (dst->type) {
714 case CALL_FLUSH:
715 break;
716 case CALL_DRAW_VBO:
717 pipe_so_target_reference(&dst->info.draw_vbo.indirect.count_from_stream_output, NULL);
718 pipe_resource_reference(&dst->info.draw_vbo.indirect.buffer, NULL);
719 pipe_resource_reference(&dst->info.draw_vbo.indirect.indirect_draw_count, NULL);
720 if (dst->info.draw_vbo.info.index_size &&
721 !dst->info.draw_vbo.info.has_user_indices)
722 pipe_resource_reference(&dst->info.draw_vbo.info.index.resource, NULL);
723 else
724 dst->info.draw_vbo.info.index.user = NULL;
725 break;
726 case CALL_LAUNCH_GRID:
727 pipe_resource_reference(&dst->info.launch_grid.indirect, NULL);
728 break;
729 case CALL_RESOURCE_COPY_REGION:
730 pipe_resource_reference(&dst->info.resource_copy_region.dst, NULL);
731 pipe_resource_reference(&dst->info.resource_copy_region.src, NULL);
732 break;
733 case CALL_BLIT:
734 pipe_resource_reference(&dst->info.blit.dst.resource, NULL);
735 pipe_resource_reference(&dst->info.blit.src.resource, NULL);
736 break;
737 case CALL_FLUSH_RESOURCE:
738 pipe_resource_reference(&dst->info.flush_resource, NULL);
739 break;
740 case CALL_CLEAR:
741 break;
742 case CALL_CLEAR_BUFFER:
743 pipe_resource_reference(&dst->info.clear_buffer.res, NULL);
744 break;
745 case CALL_CLEAR_TEXTURE:
746 break;
747 case CALL_CLEAR_RENDER_TARGET:
748 break;
749 case CALL_CLEAR_DEPTH_STENCIL:
750 break;
751 case CALL_GENERATE_MIPMAP:
752 pipe_resource_reference(&dst->info.generate_mipmap.res, NULL);
753 break;
754 case CALL_GET_QUERY_RESULT_RESOURCE:
755 pipe_resource_reference(&dst->info.get_query_result_resource.resource, NULL);
756 break;
757 case CALL_TRANSFER_MAP:
758 pipe_resource_reference(&dst->info.transfer_map.transfer.resource, NULL);
759 break;
760 case CALL_TRANSFER_FLUSH_REGION:
761 pipe_resource_reference(&dst->info.transfer_flush_region.transfer.resource, NULL);
762 break;
763 case CALL_TRANSFER_UNMAP:
764 pipe_resource_reference(&dst->info.transfer_unmap.transfer.resource, NULL);
765 break;
766 case CALL_BUFFER_SUBDATA:
767 pipe_resource_reference(&dst->info.buffer_subdata.resource, NULL);
768 break;
769 case CALL_TEXTURE_SUBDATA:
770 pipe_resource_reference(&dst->info.texture_subdata.resource, NULL);
771 break;
772 }
773 }
774
775 static void
dd_init_copy_of_draw_state(struct dd_draw_state_copy * state)776 dd_init_copy_of_draw_state(struct dd_draw_state_copy *state)
777 {
778 unsigned i,j;
779
780 /* Just clear pointers to gallium objects. Don't clear the whole structure,
781 * because it would kill performance with its size of 130 KB.
782 */
783 memset(state->base.vertex_buffers, 0,
784 sizeof(state->base.vertex_buffers));
785 memset(state->base.so_targets, 0,
786 sizeof(state->base.so_targets));
787 memset(state->base.constant_buffers, 0,
788 sizeof(state->base.constant_buffers));
789 memset(state->base.sampler_views, 0,
790 sizeof(state->base.sampler_views));
791 memset(state->base.shader_images, 0,
792 sizeof(state->base.shader_images));
793 memset(state->base.shader_buffers, 0,
794 sizeof(state->base.shader_buffers));
795 memset(&state->base.framebuffer_state, 0,
796 sizeof(state->base.framebuffer_state));
797
798 memset(state->shaders, 0, sizeof(state->shaders));
799
800 state->base.render_cond.query = &state->render_cond;
801
802 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
803 state->base.shaders[i] = &state->shaders[i];
804 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
805 state->base.sampler_states[i][j] = &state->sampler_states[i][j];
806 }
807
808 state->base.velems = &state->velems;
809 state->base.rs = &state->rs;
810 state->base.dsa = &state->dsa;
811 state->base.blend = &state->blend;
812 }
813
814 static void
dd_unreference_copy_of_draw_state(struct dd_draw_state_copy * state)815 dd_unreference_copy_of_draw_state(struct dd_draw_state_copy *state)
816 {
817 struct dd_draw_state *dst = &state->base;
818 unsigned i,j;
819
820 for (i = 0; i < ARRAY_SIZE(dst->vertex_buffers); i++)
821 pipe_vertex_buffer_unreference(&dst->vertex_buffers[i]);
822 for (i = 0; i < ARRAY_SIZE(dst->so_targets); i++)
823 pipe_so_target_reference(&dst->so_targets[i], NULL);
824
825 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
826 if (dst->shaders[i])
827 tgsi_free_tokens(dst->shaders[i]->state.shader.tokens);
828
829 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++)
830 pipe_resource_reference(&dst->constant_buffers[i][j].buffer, NULL);
831 for (j = 0; j < PIPE_MAX_SAMPLERS; j++)
832 pipe_sampler_view_reference(&dst->sampler_views[i][j], NULL);
833 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++)
834 pipe_resource_reference(&dst->shader_images[i][j].resource, NULL);
835 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++)
836 pipe_resource_reference(&dst->shader_buffers[i][j].buffer, NULL);
837 }
838
839 util_unreference_framebuffer_state(&dst->framebuffer_state);
840 }
841
842 static void
dd_copy_draw_state(struct dd_draw_state * dst,struct dd_draw_state * src)843 dd_copy_draw_state(struct dd_draw_state *dst, struct dd_draw_state *src)
844 {
845 unsigned i,j;
846
847 if (src->render_cond.query) {
848 *dst->render_cond.query = *src->render_cond.query;
849 dst->render_cond.condition = src->render_cond.condition;
850 dst->render_cond.mode = src->render_cond.mode;
851 } else {
852 dst->render_cond.query = NULL;
853 }
854
855 for (i = 0; i < ARRAY_SIZE(src->vertex_buffers); i++) {
856 pipe_vertex_buffer_reference(&dst->vertex_buffers[i],
857 &src->vertex_buffers[i]);
858 }
859
860 dst->num_so_targets = src->num_so_targets;
861 for (i = 0; i < src->num_so_targets; i++)
862 pipe_so_target_reference(&dst->so_targets[i], src->so_targets[i]);
863 memcpy(dst->so_offsets, src->so_offsets, sizeof(src->so_offsets));
864
865 for (i = 0; i < PIPE_SHADER_TYPES; i++) {
866 if (!src->shaders[i]) {
867 dst->shaders[i] = NULL;
868 continue;
869 }
870
871 if (src->shaders[i]) {
872 dst->shaders[i]->state.shader = src->shaders[i]->state.shader;
873 if (src->shaders[i]->state.shader.tokens) {
874 dst->shaders[i]->state.shader.tokens =
875 tgsi_dup_tokens(src->shaders[i]->state.shader.tokens);
876 } else {
877 dst->shaders[i]->state.shader.ir.nir = NULL;
878 }
879 } else {
880 dst->shaders[i] = NULL;
881 }
882
883 for (j = 0; j < PIPE_MAX_CONSTANT_BUFFERS; j++) {
884 pipe_resource_reference(&dst->constant_buffers[i][j].buffer,
885 src->constant_buffers[i][j].buffer);
886 memcpy(&dst->constant_buffers[i][j], &src->constant_buffers[i][j],
887 sizeof(src->constant_buffers[i][j]));
888 }
889
890 for (j = 0; j < PIPE_MAX_SAMPLERS; j++) {
891 pipe_sampler_view_reference(&dst->sampler_views[i][j],
892 src->sampler_views[i][j]);
893 if (src->sampler_states[i][j])
894 dst->sampler_states[i][j]->state.sampler =
895 src->sampler_states[i][j]->state.sampler;
896 else
897 dst->sampler_states[i][j] = NULL;
898 }
899
900 for (j = 0; j < PIPE_MAX_SHADER_IMAGES; j++) {
901 pipe_resource_reference(&dst->shader_images[i][j].resource,
902 src->shader_images[i][j].resource);
903 memcpy(&dst->shader_images[i][j], &src->shader_images[i][j],
904 sizeof(src->shader_images[i][j]));
905 }
906
907 for (j = 0; j < PIPE_MAX_SHADER_BUFFERS; j++) {
908 pipe_resource_reference(&dst->shader_buffers[i][j].buffer,
909 src->shader_buffers[i][j].buffer);
910 memcpy(&dst->shader_buffers[i][j], &src->shader_buffers[i][j],
911 sizeof(src->shader_buffers[i][j]));
912 }
913 }
914
915 if (src->velems)
916 dst->velems->state.velems = src->velems->state.velems;
917 else
918 dst->velems = NULL;
919
920 if (src->rs)
921 dst->rs->state.rs = src->rs->state.rs;
922 else
923 dst->rs = NULL;
924
925 if (src->dsa)
926 dst->dsa->state.dsa = src->dsa->state.dsa;
927 else
928 dst->dsa = NULL;
929
930 if (src->blend)
931 dst->blend->state.blend = src->blend->state.blend;
932 else
933 dst->blend = NULL;
934
935 dst->blend_color = src->blend_color;
936 dst->stencil_ref = src->stencil_ref;
937 dst->sample_mask = src->sample_mask;
938 dst->min_samples = src->min_samples;
939 dst->clip_state = src->clip_state;
940 util_copy_framebuffer_state(&dst->framebuffer_state, &src->framebuffer_state);
941 memcpy(dst->scissors, src->scissors, sizeof(src->scissors));
942 memcpy(dst->viewports, src->viewports, sizeof(src->viewports));
943 memcpy(dst->tess_default_levels, src->tess_default_levels,
944 sizeof(src->tess_default_levels));
945 dst->apitrace_call_number = src->apitrace_call_number;
946 }
947
948 static void
dd_free_record(struct pipe_screen * screen,struct dd_draw_record * record)949 dd_free_record(struct pipe_screen *screen, struct dd_draw_record *record)
950 {
951 u_log_page_destroy(record->log_page);
952 dd_unreference_copy_of_call(&record->call);
953 dd_unreference_copy_of_draw_state(&record->draw_state);
954 screen->fence_reference(screen, &record->prev_bottom_of_pipe, NULL);
955 screen->fence_reference(screen, &record->top_of_pipe, NULL);
956 screen->fence_reference(screen, &record->bottom_of_pipe, NULL);
957 util_queue_fence_destroy(&record->driver_finished);
958 FREE(record);
959 }
960
961 static void
dd_write_record(FILE * f,struct dd_draw_record * record)962 dd_write_record(FILE *f, struct dd_draw_record *record)
963 {
964 PRINT_NAMED(ptr, "pipe", record->dctx->pipe);
965 PRINT_NAMED(ns, "time before (API call)", record->time_before);
966 PRINT_NAMED(ns, "time after (driver done)", record->time_after);
967 fprintf(f, "\n");
968
969 dd_dump_call(f, &record->draw_state.base, &record->call);
970
971 if (record->log_page) {
972 fprintf(f,"\n\n**************************************************"
973 "***************************\n");
974 fprintf(f, "Context Log:\n\n");
975 u_log_page_print(record->log_page, f);
976 }
977 }
978
979 static void
dd_maybe_dump_record(struct dd_screen * dscreen,struct dd_draw_record * record)980 dd_maybe_dump_record(struct dd_screen *dscreen, struct dd_draw_record *record)
981 {
982 if (dscreen->dump_mode == DD_DUMP_ONLY_HANGS ||
983 (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
984 dscreen->apitrace_dump_call != record->draw_state.base.apitrace_call_number))
985 return;
986
987 char name[512];
988 dd_get_debug_filename_and_mkdir(name, sizeof(name), dscreen->verbose);
989 FILE *f = fopen(name, "w");
990 if (!f) {
991 fprintf(stderr, "dd: failed to open %s\n", name);
992 return;
993 }
994
995 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
996 dd_write_record(f, record);
997
998 fclose(f);
999 }
1000
1001 static const char *
dd_fence_state(struct pipe_screen * screen,struct pipe_fence_handle * fence,bool * not_reached)1002 dd_fence_state(struct pipe_screen *screen, struct pipe_fence_handle *fence,
1003 bool *not_reached)
1004 {
1005 if (!fence)
1006 return "---";
1007
1008 bool ok = screen->fence_finish(screen, NULL, fence, 0);
1009
1010 if (not_reached && !ok)
1011 *not_reached = true;
1012
1013 return ok ? "YES" : "NO ";
1014 }
1015
1016 static void
dd_report_hang(struct dd_context * dctx)1017 dd_report_hang(struct dd_context *dctx)
1018 {
1019 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1020 struct pipe_screen *screen = dscreen->screen;
1021 bool encountered_hang = false;
1022 bool stop_output = false;
1023 unsigned num_later = 0;
1024
1025 fprintf(stderr, "GPU hang detected, collecting information...\n\n");
1026
1027 fprintf(stderr, "Draw # driver prev BOP TOP BOP dump file\n"
1028 "-------------------------------------------------------------\n");
1029
1030 list_for_each_entry(struct dd_draw_record, record, &dctx->records, list) {
1031 if (!encountered_hang &&
1032 screen->fence_finish(screen, NULL, record->bottom_of_pipe, 0)) {
1033 dd_maybe_dump_record(dscreen, record);
1034 continue;
1035 }
1036
1037 if (stop_output) {
1038 dd_maybe_dump_record(dscreen, record);
1039 num_later++;
1040 continue;
1041 }
1042
1043 bool driver = util_queue_fence_is_signalled(&record->driver_finished);
1044 bool top_not_reached = false;
1045 const char *prev_bop = dd_fence_state(screen, record->prev_bottom_of_pipe, NULL);
1046 const char *top = dd_fence_state(screen, record->top_of_pipe, &top_not_reached);
1047 const char *bop = dd_fence_state(screen, record->bottom_of_pipe, NULL);
1048
1049 fprintf(stderr, "%-9u %s %s %s %s ",
1050 record->draw_call, driver ? "YES" : "NO ", prev_bop, top, bop);
1051
1052 char name[512];
1053 dd_get_debug_filename_and_mkdir(name, sizeof(name), false);
1054
1055 FILE *f = fopen(name, "w");
1056 if (!f) {
1057 fprintf(stderr, "fopen failed\n");
1058 } else {
1059 fprintf(stderr, "%s\n", name);
1060
1061 dd_write_header(f, dscreen->screen, record->draw_state.base.apitrace_call_number);
1062 dd_write_record(f, record);
1063
1064 fclose(f);
1065 }
1066
1067 if (top_not_reached)
1068 stop_output = true;
1069 encountered_hang = true;
1070 }
1071
1072 if (num_later)
1073 fprintf(stderr, "... and %u additional draws.\n", num_later);
1074
1075 char name[512];
1076 dd_get_debug_filename_and_mkdir(name, sizeof(name), false);
1077 FILE *f = fopen(name, "w");
1078 if (!f) {
1079 fprintf(stderr, "fopen failed\n");
1080 } else {
1081 dd_write_header(f, dscreen->screen, 0);
1082 dd_dump_driver_state(dctx, f, PIPE_DUMP_DEVICE_STATUS_REGISTERS);
1083 dd_dump_dmesg(f);
1084 fclose(f);
1085 }
1086
1087 fprintf(stderr, "\nDone.\n");
1088 dd_kill_process();
1089 }
1090
1091 int
dd_thread_main(void * input)1092 dd_thread_main(void *input)
1093 {
1094 struct dd_context *dctx = (struct dd_context *)input;
1095 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1096 struct pipe_screen *screen = dscreen->screen;
1097
1098 const char *process_name = util_get_process_name();
1099 if (process_name) {
1100 char threadname[16];
1101 snprintf(threadname, sizeof(threadname), "%.*s:ddbg",
1102 (int)MIN2(strlen(process_name), sizeof(threadname) - 6),
1103 process_name);
1104 u_thread_setname(threadname);
1105 }
1106
1107 mtx_lock(&dctx->mutex);
1108
1109 for (;;) {
1110 struct list_head records;
1111 list_replace(&dctx->records, &records);
1112 list_inithead(&dctx->records);
1113 dctx->num_records = 0;
1114
1115 if (dctx->api_stalled)
1116 cnd_signal(&dctx->cond);
1117
1118 if (list_is_empty(&records)) {
1119 if (dctx->kill_thread)
1120 break;
1121
1122 cnd_wait(&dctx->cond, &dctx->mutex);
1123 continue;
1124 }
1125
1126 mtx_unlock(&dctx->mutex);
1127
1128 /* Wait for the youngest draw. This means hangs can take a bit longer
1129 * to detect, but it's more efficient this way. */
1130 struct dd_draw_record *youngest =
1131 list_last_entry(&records, struct dd_draw_record, list);
1132
1133 if (dscreen->timeout_ms > 0) {
1134 uint64_t abs_timeout = os_time_get_absolute_timeout(
1135 (uint64_t)dscreen->timeout_ms * 1000*1000);
1136
1137 if (!util_queue_fence_wait_timeout(&youngest->driver_finished, abs_timeout) ||
1138 !screen->fence_finish(screen, NULL, youngest->bottom_of_pipe,
1139 (uint64_t)dscreen->timeout_ms * 1000*1000)) {
1140 mtx_lock(&dctx->mutex);
1141 list_splice(&records, &dctx->records);
1142 dd_report_hang(dctx);
1143 /* we won't actually get here */
1144 mtx_unlock(&dctx->mutex);
1145 }
1146 } else {
1147 util_queue_fence_wait(&youngest->driver_finished);
1148 }
1149
1150 list_for_each_entry_safe(struct dd_draw_record, record, &records, list) {
1151 dd_maybe_dump_record(dscreen, record);
1152 list_del(&record->list);
1153 dd_free_record(screen, record);
1154 }
1155
1156 mtx_lock(&dctx->mutex);
1157 }
1158 mtx_unlock(&dctx->mutex);
1159 return 0;
1160 }
1161
1162 static struct dd_draw_record *
dd_create_record(struct dd_context * dctx)1163 dd_create_record(struct dd_context *dctx)
1164 {
1165 struct dd_draw_record *record;
1166
1167 record = MALLOC_STRUCT(dd_draw_record);
1168 if (!record)
1169 return NULL;
1170
1171 record->dctx = dctx;
1172 record->draw_call = dctx->num_draw_calls;
1173
1174 record->prev_bottom_of_pipe = NULL;
1175 record->top_of_pipe = NULL;
1176 record->bottom_of_pipe = NULL;
1177 record->log_page = NULL;
1178 util_queue_fence_init(&record->driver_finished);
1179 util_queue_fence_reset(&record->driver_finished);
1180
1181 dd_init_copy_of_draw_state(&record->draw_state);
1182 dd_copy_draw_state(&record->draw_state.base, &dctx->draw_state);
1183
1184 return record;
1185 }
1186
1187 static void
dd_add_record(struct dd_context * dctx,struct dd_draw_record * record)1188 dd_add_record(struct dd_context *dctx, struct dd_draw_record *record)
1189 {
1190 mtx_lock(&dctx->mutex);
1191 if (unlikely(dctx->num_records > 10000)) {
1192 dctx->api_stalled = true;
1193 /* Since this is only a heuristic to prevent the API thread from getting
1194 * too far ahead, we don't need a loop here. */
1195 cnd_wait(&dctx->cond, &dctx->mutex);
1196 dctx->api_stalled = false;
1197 }
1198
1199 if (list_is_empty(&dctx->records))
1200 cnd_signal(&dctx->cond);
1201
1202 list_addtail(&record->list, &dctx->records);
1203 dctx->num_records++;
1204 mtx_unlock(&dctx->mutex);
1205 }
1206
1207 static void
dd_before_draw(struct dd_context * dctx,struct dd_draw_record * record)1208 dd_before_draw(struct dd_context *dctx, struct dd_draw_record *record)
1209 {
1210 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1211 struct pipe_context *pipe = dctx->pipe;
1212 struct pipe_screen *screen = dscreen->screen;
1213
1214 record->time_before = os_time_get_nano();
1215
1216 if (dscreen->timeout_ms > 0) {
1217 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
1218 pipe->flush(pipe, &record->prev_bottom_of_pipe, 0);
1219 screen->fence_reference(screen, &record->top_of_pipe, record->prev_bottom_of_pipe);
1220 } else {
1221 pipe->flush(pipe, &record->prev_bottom_of_pipe,
1222 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE);
1223 pipe->flush(pipe, &record->top_of_pipe,
1224 PIPE_FLUSH_DEFERRED | PIPE_FLUSH_TOP_OF_PIPE);
1225 }
1226 } else if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count) {
1227 pipe->flush(pipe, NULL, 0);
1228 }
1229
1230 dd_add_record(dctx, record);
1231 }
1232
1233 static void
dd_after_draw_async(void * data)1234 dd_after_draw_async(void *data)
1235 {
1236 struct dd_draw_record *record = (struct dd_draw_record *)data;
1237 struct dd_context *dctx = record->dctx;
1238 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1239
1240 record->log_page = u_log_new_page(&dctx->log);
1241 record->time_after = os_time_get_nano();
1242
1243 util_queue_fence_signal(&record->driver_finished);
1244
1245 if (dscreen->dump_mode == DD_DUMP_APITRACE_CALL &&
1246 dscreen->apitrace_dump_call > dctx->draw_state.apitrace_call_number) {
1247 dd_thread_join(dctx);
1248 /* No need to continue. */
1249 exit(0);
1250 }
1251 }
1252
1253 static void
dd_after_draw(struct dd_context * dctx,struct dd_draw_record * record)1254 dd_after_draw(struct dd_context *dctx, struct dd_draw_record *record)
1255 {
1256 struct dd_screen *dscreen = dd_screen(dctx->base.screen);
1257 struct pipe_context *pipe = dctx->pipe;
1258
1259 if (dscreen->timeout_ms > 0) {
1260 unsigned flush_flags;
1261 if (dscreen->flush_always && dctx->num_draw_calls >= dscreen->skip_count)
1262 flush_flags = 0;
1263 else
1264 flush_flags = PIPE_FLUSH_DEFERRED | PIPE_FLUSH_BOTTOM_OF_PIPE;
1265 pipe->flush(pipe, &record->bottom_of_pipe, flush_flags);
1266 }
1267
1268 if (pipe->callback) {
1269 pipe->callback(pipe, dd_after_draw_async, record, true);
1270 } else {
1271 dd_after_draw_async(record);
1272 }
1273
1274 ++dctx->num_draw_calls;
1275 if (dscreen->skip_count && dctx->num_draw_calls % 10000 == 0)
1276 fprintf(stderr, "Gallium debugger reached %u draw calls.\n",
1277 dctx->num_draw_calls);
1278 }
1279
1280 static void
dd_context_flush(struct pipe_context * _pipe,struct pipe_fence_handle ** fence,unsigned flags)1281 dd_context_flush(struct pipe_context *_pipe,
1282 struct pipe_fence_handle **fence, unsigned flags)
1283 {
1284 struct dd_context *dctx = dd_context(_pipe);
1285 struct pipe_context *pipe = dctx->pipe;
1286 struct pipe_screen *screen = pipe->screen;
1287 struct dd_draw_record *record = dd_create_record(dctx);
1288
1289 record->call.type = CALL_FLUSH;
1290 record->call.info.flush.flags = flags;
1291
1292 record->time_before = os_time_get_nano();
1293
1294 dd_add_record(dctx, record);
1295
1296 pipe->flush(pipe, &record->bottom_of_pipe, flags);
1297 if (fence)
1298 screen->fence_reference(screen, fence, record->bottom_of_pipe);
1299
1300 if (pipe->callback) {
1301 pipe->callback(pipe, dd_after_draw_async, record, true);
1302 } else {
1303 dd_after_draw_async(record);
1304 }
1305 }
1306
1307 static void
dd_context_draw_vbo(struct pipe_context * _pipe,const struct pipe_draw_info * info,unsigned drawid_offset,const struct pipe_draw_indirect_info * indirect,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)1308 dd_context_draw_vbo(struct pipe_context *_pipe,
1309 const struct pipe_draw_info *info,
1310 unsigned drawid_offset,
1311 const struct pipe_draw_indirect_info *indirect,
1312 const struct pipe_draw_start_count_bias *draws,
1313 unsigned num_draws)
1314 {
1315 struct dd_context *dctx = dd_context(_pipe);
1316 struct pipe_context *pipe = dctx->pipe;
1317 struct dd_draw_record *record = dd_create_record(dctx);
1318
1319 record->call.type = CALL_DRAW_VBO;
1320 record->call.info.draw_vbo.info = *info;
1321 record->call.info.draw_vbo.drawid_offset = drawid_offset;
1322 record->call.info.draw_vbo.draw = draws[0];
1323 if (info->index_size && !info->has_user_indices) {
1324 record->call.info.draw_vbo.info.index.resource = NULL;
1325 pipe_resource_reference(&record->call.info.draw_vbo.info.index.resource,
1326 info->index.resource);
1327 }
1328
1329 if (indirect) {
1330 record->call.info.draw_vbo.indirect = *indirect;
1331 record->call.info.draw_vbo.indirect.buffer = NULL;
1332 pipe_resource_reference(&record->call.info.draw_vbo.indirect.buffer,
1333 indirect->buffer);
1334 record->call.info.draw_vbo.indirect.indirect_draw_count = NULL;
1335 pipe_resource_reference(&record->call.info.draw_vbo.indirect.indirect_draw_count,
1336 indirect->indirect_draw_count);
1337 record->call.info.draw_vbo.indirect.count_from_stream_output = NULL;
1338 pipe_so_target_reference(&record->call.info.draw_vbo.indirect.count_from_stream_output,
1339 indirect->count_from_stream_output);
1340 } else {
1341 memset(&record->call.info.draw_vbo.indirect, 0, sizeof(*indirect));
1342 }
1343
1344 dd_before_draw(dctx, record);
1345 pipe->draw_vbo(pipe, info, drawid_offset, indirect, draws, num_draws);
1346 dd_after_draw(dctx, record);
1347 }
1348
1349 static void
dd_context_draw_vertex_state(struct pipe_context * _pipe,struct pipe_vertex_state * state,uint32_t partial_velem_mask,struct pipe_draw_vertex_state_info info,const struct pipe_draw_start_count_bias * draws,unsigned num_draws)1350 dd_context_draw_vertex_state(struct pipe_context *_pipe,
1351 struct pipe_vertex_state *state,
1352 uint32_t partial_velem_mask,
1353 struct pipe_draw_vertex_state_info info,
1354 const struct pipe_draw_start_count_bias *draws,
1355 unsigned num_draws)
1356 {
1357 struct dd_context *dctx = dd_context(_pipe);
1358 struct pipe_context *pipe = dctx->pipe;
1359 struct dd_draw_record *record = dd_create_record(dctx);
1360
1361 record->call.type = CALL_DRAW_VBO;
1362 memset(&record->call.info.draw_vbo.info, 0,
1363 sizeof(record->call.info.draw_vbo.info));
1364 record->call.info.draw_vbo.info.mode = info.mode;
1365 record->call.info.draw_vbo.info.index_size = 4;
1366 record->call.info.draw_vbo.info.instance_count = 1;
1367 record->call.info.draw_vbo.drawid_offset = 0;
1368 record->call.info.draw_vbo.draw = draws[0];
1369 record->call.info.draw_vbo.info.index.resource = NULL;
1370 pipe_resource_reference(&record->call.info.draw_vbo.info.index.resource,
1371 state->input.indexbuf);
1372 memset(&record->call.info.draw_vbo.indirect, 0,
1373 sizeof(record->call.info.draw_vbo.indirect));
1374
1375 dd_before_draw(dctx, record);
1376 pipe->draw_vertex_state(pipe, state, partial_velem_mask, info, draws, num_draws);
1377 dd_after_draw(dctx, record);
1378 }
1379
1380 static void
dd_context_launch_grid(struct pipe_context * _pipe,const struct pipe_grid_info * info)1381 dd_context_launch_grid(struct pipe_context *_pipe,
1382 const struct pipe_grid_info *info)
1383 {
1384 struct dd_context *dctx = dd_context(_pipe);
1385 struct pipe_context *pipe = dctx->pipe;
1386 struct dd_draw_record *record = dd_create_record(dctx);
1387
1388 record->call.type = CALL_LAUNCH_GRID;
1389 record->call.info.launch_grid = *info;
1390 record->call.info.launch_grid.indirect = NULL;
1391 pipe_resource_reference(&record->call.info.launch_grid.indirect, info->indirect);
1392
1393 dd_before_draw(dctx, record);
1394 pipe->launch_grid(pipe, info);
1395 dd_after_draw(dctx, record);
1396 }
1397
1398 static void
dd_context_resource_copy_region(struct pipe_context * _pipe,struct pipe_resource * dst,unsigned dst_level,unsigned dstx,unsigned dsty,unsigned dstz,struct pipe_resource * src,unsigned src_level,const struct pipe_box * src_box)1399 dd_context_resource_copy_region(struct pipe_context *_pipe,
1400 struct pipe_resource *dst, unsigned dst_level,
1401 unsigned dstx, unsigned dsty, unsigned dstz,
1402 struct pipe_resource *src, unsigned src_level,
1403 const struct pipe_box *src_box)
1404 {
1405 struct dd_context *dctx = dd_context(_pipe);
1406 struct pipe_context *pipe = dctx->pipe;
1407 struct dd_draw_record *record = dd_create_record(dctx);
1408
1409 record->call.type = CALL_RESOURCE_COPY_REGION;
1410 record->call.info.resource_copy_region.dst = NULL;
1411 pipe_resource_reference(&record->call.info.resource_copy_region.dst, dst);
1412 record->call.info.resource_copy_region.dst_level = dst_level;
1413 record->call.info.resource_copy_region.dstx = dstx;
1414 record->call.info.resource_copy_region.dsty = dsty;
1415 record->call.info.resource_copy_region.dstz = dstz;
1416 record->call.info.resource_copy_region.src = NULL;
1417 pipe_resource_reference(&record->call.info.resource_copy_region.src, src);
1418 record->call.info.resource_copy_region.src_level = src_level;
1419 record->call.info.resource_copy_region.src_box = *src_box;
1420
1421 dd_before_draw(dctx, record);
1422 pipe->resource_copy_region(pipe,
1423 dst, dst_level, dstx, dsty, dstz,
1424 src, src_level, src_box);
1425 dd_after_draw(dctx, record);
1426 }
1427
1428 static void
dd_context_blit(struct pipe_context * _pipe,const struct pipe_blit_info * info)1429 dd_context_blit(struct pipe_context *_pipe, const struct pipe_blit_info *info)
1430 {
1431 struct dd_context *dctx = dd_context(_pipe);
1432 struct pipe_context *pipe = dctx->pipe;
1433 struct dd_draw_record *record = dd_create_record(dctx);
1434
1435 record->call.type = CALL_BLIT;
1436 record->call.info.blit = *info;
1437 record->call.info.blit.dst.resource = NULL;
1438 pipe_resource_reference(&record->call.info.blit.dst.resource, info->dst.resource);
1439 record->call.info.blit.src.resource = NULL;
1440 pipe_resource_reference(&record->call.info.blit.src.resource, info->src.resource);
1441
1442 dd_before_draw(dctx, record);
1443 pipe->blit(pipe, info);
1444 dd_after_draw(dctx, record);
1445 }
1446
1447 static bool
dd_context_generate_mipmap(struct pipe_context * _pipe,struct pipe_resource * res,enum pipe_format format,unsigned base_level,unsigned last_level,unsigned first_layer,unsigned last_layer)1448 dd_context_generate_mipmap(struct pipe_context *_pipe,
1449 struct pipe_resource *res,
1450 enum pipe_format format,
1451 unsigned base_level,
1452 unsigned last_level,
1453 unsigned first_layer,
1454 unsigned last_layer)
1455 {
1456 struct dd_context *dctx = dd_context(_pipe);
1457 struct pipe_context *pipe = dctx->pipe;
1458 struct dd_draw_record *record = dd_create_record(dctx);
1459 bool result;
1460
1461 record->call.type = CALL_GENERATE_MIPMAP;
1462 record->call.info.generate_mipmap.res = NULL;
1463 pipe_resource_reference(&record->call.info.generate_mipmap.res, res);
1464 record->call.info.generate_mipmap.format = format;
1465 record->call.info.generate_mipmap.base_level = base_level;
1466 record->call.info.generate_mipmap.last_level = last_level;
1467 record->call.info.generate_mipmap.first_layer = first_layer;
1468 record->call.info.generate_mipmap.last_layer = last_layer;
1469
1470 dd_before_draw(dctx, record);
1471 result = pipe->generate_mipmap(pipe, res, format, base_level, last_level,
1472 first_layer, last_layer);
1473 dd_after_draw(dctx, record);
1474 return result;
1475 }
1476
1477 static void
dd_context_get_query_result_resource(struct pipe_context * _pipe,struct pipe_query * query,enum pipe_query_flags flags,enum pipe_query_value_type result_type,int index,struct pipe_resource * resource,unsigned offset)1478 dd_context_get_query_result_resource(struct pipe_context *_pipe,
1479 struct pipe_query *query,
1480 enum pipe_query_flags flags,
1481 enum pipe_query_value_type result_type,
1482 int index,
1483 struct pipe_resource *resource,
1484 unsigned offset)
1485 {
1486 struct dd_context *dctx = dd_context(_pipe);
1487 struct dd_query *dquery = dd_query(query);
1488 struct pipe_context *pipe = dctx->pipe;
1489 struct dd_draw_record *record = dd_create_record(dctx);
1490
1491 record->call.type = CALL_GET_QUERY_RESULT_RESOURCE;
1492 record->call.info.get_query_result_resource.query = query;
1493 record->call.info.get_query_result_resource.flags = flags;
1494 record->call.info.get_query_result_resource.result_type = result_type;
1495 record->call.info.get_query_result_resource.index = index;
1496 record->call.info.get_query_result_resource.resource = NULL;
1497 pipe_resource_reference(&record->call.info.get_query_result_resource.resource,
1498 resource);
1499 record->call.info.get_query_result_resource.offset = offset;
1500
1501 /* The query may be deleted by the time we need to print it. */
1502 record->call.info.get_query_result_resource.query_type = dquery->type;
1503
1504 dd_before_draw(dctx, record);
1505 pipe->get_query_result_resource(pipe, dquery->query, flags,
1506 result_type, index, resource, offset);
1507 dd_after_draw(dctx, record);
1508 }
1509
1510 static void
dd_context_flush_resource(struct pipe_context * _pipe,struct pipe_resource * resource)1511 dd_context_flush_resource(struct pipe_context *_pipe,
1512 struct pipe_resource *resource)
1513 {
1514 struct dd_context *dctx = dd_context(_pipe);
1515 struct pipe_context *pipe = dctx->pipe;
1516 struct dd_draw_record *record = dd_create_record(dctx);
1517
1518 record->call.type = CALL_FLUSH_RESOURCE;
1519 record->call.info.flush_resource = NULL;
1520 pipe_resource_reference(&record->call.info.flush_resource, resource);
1521
1522 dd_before_draw(dctx, record);
1523 pipe->flush_resource(pipe, resource);
1524 dd_after_draw(dctx, record);
1525 }
1526
1527 static void
dd_context_clear(struct pipe_context * _pipe,unsigned buffers,const struct pipe_scissor_state * scissor_state,const union pipe_color_union * color,double depth,unsigned stencil)1528 dd_context_clear(struct pipe_context *_pipe, unsigned buffers, const struct pipe_scissor_state *scissor_state,
1529 const union pipe_color_union *color, double depth,
1530 unsigned stencil)
1531 {
1532 struct dd_context *dctx = dd_context(_pipe);
1533 struct pipe_context *pipe = dctx->pipe;
1534 struct dd_draw_record *record = dd_create_record(dctx);
1535
1536 record->call.type = CALL_CLEAR;
1537 record->call.info.clear.buffers = buffers;
1538 if (scissor_state)
1539 record->call.info.clear.scissor_state = *scissor_state;
1540 record->call.info.clear.color = *color;
1541 record->call.info.clear.depth = depth;
1542 record->call.info.clear.stencil = stencil;
1543
1544 dd_before_draw(dctx, record);
1545 pipe->clear(pipe, buffers, scissor_state, color, depth, stencil);
1546 dd_after_draw(dctx, record);
1547 }
1548
1549 static void
dd_context_clear_render_target(struct pipe_context * _pipe,struct pipe_surface * dst,const union pipe_color_union * color,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1550 dd_context_clear_render_target(struct pipe_context *_pipe,
1551 struct pipe_surface *dst,
1552 const union pipe_color_union *color,
1553 unsigned dstx, unsigned dsty,
1554 unsigned width, unsigned height,
1555 bool render_condition_enabled)
1556 {
1557 struct dd_context *dctx = dd_context(_pipe);
1558 struct pipe_context *pipe = dctx->pipe;
1559 struct dd_draw_record *record = dd_create_record(dctx);
1560
1561 record->call.type = CALL_CLEAR_RENDER_TARGET;
1562
1563 dd_before_draw(dctx, record);
1564 pipe->clear_render_target(pipe, dst, color, dstx, dsty, width, height,
1565 render_condition_enabled);
1566 dd_after_draw(dctx, record);
1567 }
1568
1569 static void
dd_context_clear_depth_stencil(struct pipe_context * _pipe,struct pipe_surface * dst,unsigned clear_flags,double depth,unsigned stencil,unsigned dstx,unsigned dsty,unsigned width,unsigned height,bool render_condition_enabled)1570 dd_context_clear_depth_stencil(struct pipe_context *_pipe,
1571 struct pipe_surface *dst, unsigned clear_flags,
1572 double depth, unsigned stencil, unsigned dstx,
1573 unsigned dsty, unsigned width, unsigned height,
1574 bool render_condition_enabled)
1575 {
1576 struct dd_context *dctx = dd_context(_pipe);
1577 struct pipe_context *pipe = dctx->pipe;
1578 struct dd_draw_record *record = dd_create_record(dctx);
1579
1580 record->call.type = CALL_CLEAR_DEPTH_STENCIL;
1581
1582 dd_before_draw(dctx, record);
1583 pipe->clear_depth_stencil(pipe, dst, clear_flags, depth, stencil,
1584 dstx, dsty, width, height,
1585 render_condition_enabled);
1586 dd_after_draw(dctx, record);
1587 }
1588
1589 static void
dd_context_clear_buffer(struct pipe_context * _pipe,struct pipe_resource * res,unsigned offset,unsigned size,const void * clear_value,int clear_value_size)1590 dd_context_clear_buffer(struct pipe_context *_pipe, struct pipe_resource *res,
1591 unsigned offset, unsigned size,
1592 const void *clear_value, int clear_value_size)
1593 {
1594 struct dd_context *dctx = dd_context(_pipe);
1595 struct pipe_context *pipe = dctx->pipe;
1596 struct dd_draw_record *record = dd_create_record(dctx);
1597
1598 record->call.type = CALL_CLEAR_BUFFER;
1599 record->call.info.clear_buffer.res = NULL;
1600 pipe_resource_reference(&record->call.info.clear_buffer.res, res);
1601 record->call.info.clear_buffer.offset = offset;
1602 record->call.info.clear_buffer.size = size;
1603 record->call.info.clear_buffer.clear_value = clear_value;
1604 record->call.info.clear_buffer.clear_value_size = clear_value_size;
1605
1606 dd_before_draw(dctx, record);
1607 pipe->clear_buffer(pipe, res, offset, size, clear_value, clear_value_size);
1608 dd_after_draw(dctx, record);
1609 }
1610
1611 static void
dd_context_clear_texture(struct pipe_context * _pipe,struct pipe_resource * res,unsigned level,const struct pipe_box * box,const void * data)1612 dd_context_clear_texture(struct pipe_context *_pipe,
1613 struct pipe_resource *res,
1614 unsigned level,
1615 const struct pipe_box *box,
1616 const void *data)
1617 {
1618 struct dd_context *dctx = dd_context(_pipe);
1619 struct pipe_context *pipe = dctx->pipe;
1620 struct dd_draw_record *record = dd_create_record(dctx);
1621
1622 record->call.type = CALL_CLEAR_TEXTURE;
1623
1624 dd_before_draw(dctx, record);
1625 pipe->clear_texture(pipe, res, level, box, data);
1626 dd_after_draw(dctx, record);
1627 }
1628
1629 /********************************************************************
1630 * transfer
1631 */
1632
1633 static void *
dd_context_buffer_map(struct pipe_context * _pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** transfer)1634 dd_context_buffer_map(struct pipe_context *_pipe,
1635 struct pipe_resource *resource, unsigned level,
1636 unsigned usage, const struct pipe_box *box,
1637 struct pipe_transfer **transfer)
1638 {
1639 struct dd_context *dctx = dd_context(_pipe);
1640 struct pipe_context *pipe = dctx->pipe;
1641 struct dd_draw_record *record =
1642 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1643
1644 if (record) {
1645 record->call.type = CALL_TRANSFER_MAP;
1646
1647 dd_before_draw(dctx, record);
1648 }
1649 void *ptr = pipe->buffer_map(pipe, resource, level, usage, box, transfer);
1650 if (record) {
1651 record->call.info.transfer_map.transfer_ptr = *transfer;
1652 record->call.info.transfer_map.ptr = ptr;
1653 if (*transfer) {
1654 record->call.info.transfer_map.transfer = **transfer;
1655 record->call.info.transfer_map.transfer.resource = NULL;
1656 pipe_resource_reference(&record->call.info.transfer_map.transfer.resource,
1657 (*transfer)->resource);
1658 } else {
1659 memset(&record->call.info.transfer_map.transfer, 0, sizeof(struct pipe_transfer));
1660 }
1661
1662 dd_after_draw(dctx, record);
1663 }
1664 return ptr;
1665 }
1666
1667 static void *
dd_context_texture_map(struct pipe_context * _pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,struct pipe_transfer ** transfer)1668 dd_context_texture_map(struct pipe_context *_pipe,
1669 struct pipe_resource *resource, unsigned level,
1670 unsigned usage, const struct pipe_box *box,
1671 struct pipe_transfer **transfer)
1672 {
1673 struct dd_context *dctx = dd_context(_pipe);
1674 struct pipe_context *pipe = dctx->pipe;
1675 struct dd_draw_record *record =
1676 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1677
1678 if (record) {
1679 record->call.type = CALL_TRANSFER_MAP;
1680
1681 dd_before_draw(dctx, record);
1682 }
1683 void *ptr = pipe->texture_map(pipe, resource, level, usage, box, transfer);
1684 if (record) {
1685 record->call.info.transfer_map.transfer_ptr = *transfer;
1686 record->call.info.transfer_map.ptr = ptr;
1687 if (*transfer) {
1688 record->call.info.transfer_map.transfer = **transfer;
1689 record->call.info.transfer_map.transfer.resource = NULL;
1690 pipe_resource_reference(&record->call.info.transfer_map.transfer.resource,
1691 (*transfer)->resource);
1692 } else {
1693 memset(&record->call.info.transfer_map.transfer, 0, sizeof(struct pipe_transfer));
1694 }
1695
1696 dd_after_draw(dctx, record);
1697 }
1698 return ptr;
1699 }
1700
1701 static void
dd_context_transfer_flush_region(struct pipe_context * _pipe,struct pipe_transfer * transfer,const struct pipe_box * box)1702 dd_context_transfer_flush_region(struct pipe_context *_pipe,
1703 struct pipe_transfer *transfer,
1704 const struct pipe_box *box)
1705 {
1706 struct dd_context *dctx = dd_context(_pipe);
1707 struct pipe_context *pipe = dctx->pipe;
1708 struct dd_draw_record *record =
1709 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1710
1711 if (record) {
1712 record->call.type = CALL_TRANSFER_FLUSH_REGION;
1713 record->call.info.transfer_flush_region.transfer_ptr = transfer;
1714 record->call.info.transfer_flush_region.box = *box;
1715 record->call.info.transfer_flush_region.transfer = *transfer;
1716 record->call.info.transfer_flush_region.transfer.resource = NULL;
1717 pipe_resource_reference(
1718 &record->call.info.transfer_flush_region.transfer.resource,
1719 transfer->resource);
1720
1721 dd_before_draw(dctx, record);
1722 }
1723 pipe->transfer_flush_region(pipe, transfer, box);
1724 if (record)
1725 dd_after_draw(dctx, record);
1726 }
1727
1728 static void
dd_context_buffer_unmap(struct pipe_context * _pipe,struct pipe_transfer * transfer)1729 dd_context_buffer_unmap(struct pipe_context *_pipe,
1730 struct pipe_transfer *transfer)
1731 {
1732 struct dd_context *dctx = dd_context(_pipe);
1733 struct pipe_context *pipe = dctx->pipe;
1734 struct dd_draw_record *record =
1735 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1736
1737 if (record) {
1738 record->call.type = CALL_TRANSFER_UNMAP;
1739 record->call.info.transfer_unmap.transfer_ptr = transfer;
1740 record->call.info.transfer_unmap.transfer = *transfer;
1741 record->call.info.transfer_unmap.transfer.resource = NULL;
1742 pipe_resource_reference(
1743 &record->call.info.transfer_unmap.transfer.resource,
1744 transfer->resource);
1745
1746 dd_before_draw(dctx, record);
1747 }
1748 pipe->buffer_unmap(pipe, transfer);
1749 if (record)
1750 dd_after_draw(dctx, record);
1751 }
1752
1753 static void
dd_context_texture_unmap(struct pipe_context * _pipe,struct pipe_transfer * transfer)1754 dd_context_texture_unmap(struct pipe_context *_pipe,
1755 struct pipe_transfer *transfer)
1756 {
1757 struct dd_context *dctx = dd_context(_pipe);
1758 struct pipe_context *pipe = dctx->pipe;
1759 struct dd_draw_record *record =
1760 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1761
1762 if (record) {
1763 record->call.type = CALL_TRANSFER_UNMAP;
1764 record->call.info.transfer_unmap.transfer_ptr = transfer;
1765 record->call.info.transfer_unmap.transfer = *transfer;
1766 record->call.info.transfer_unmap.transfer.resource = NULL;
1767 pipe_resource_reference(
1768 &record->call.info.transfer_unmap.transfer.resource,
1769 transfer->resource);
1770
1771 dd_before_draw(dctx, record);
1772 }
1773 pipe->texture_unmap(pipe, transfer);
1774 if (record)
1775 dd_after_draw(dctx, record);
1776 }
1777
1778 static void
dd_context_buffer_subdata(struct pipe_context * _pipe,struct pipe_resource * resource,unsigned usage,unsigned offset,unsigned size,const void * data)1779 dd_context_buffer_subdata(struct pipe_context *_pipe,
1780 struct pipe_resource *resource,
1781 unsigned usage, unsigned offset,
1782 unsigned size, const void *data)
1783 {
1784 struct dd_context *dctx = dd_context(_pipe);
1785 struct pipe_context *pipe = dctx->pipe;
1786 struct dd_draw_record *record =
1787 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1788
1789 if (record) {
1790 record->call.type = CALL_BUFFER_SUBDATA;
1791 record->call.info.buffer_subdata.resource = NULL;
1792 pipe_resource_reference(&record->call.info.buffer_subdata.resource, resource);
1793 record->call.info.buffer_subdata.usage = usage;
1794 record->call.info.buffer_subdata.offset = offset;
1795 record->call.info.buffer_subdata.size = size;
1796 record->call.info.buffer_subdata.data = data;
1797
1798 dd_before_draw(dctx, record);
1799 }
1800 pipe->buffer_subdata(pipe, resource, usage, offset, size, data);
1801 if (record)
1802 dd_after_draw(dctx, record);
1803 }
1804
1805 static void
dd_context_texture_subdata(struct pipe_context * _pipe,struct pipe_resource * resource,unsigned level,unsigned usage,const struct pipe_box * box,const void * data,unsigned stride,uintptr_t layer_stride)1806 dd_context_texture_subdata(struct pipe_context *_pipe,
1807 struct pipe_resource *resource,
1808 unsigned level, unsigned usage,
1809 const struct pipe_box *box,
1810 const void *data, unsigned stride,
1811 uintptr_t layer_stride)
1812 {
1813 struct dd_context *dctx = dd_context(_pipe);
1814 struct pipe_context *pipe = dctx->pipe;
1815 struct dd_draw_record *record =
1816 dd_screen(dctx->base.screen)->transfers ? dd_create_record(dctx) : NULL;
1817
1818 if (record) {
1819 record->call.type = CALL_TEXTURE_SUBDATA;
1820 record->call.info.texture_subdata.resource = NULL;
1821 pipe_resource_reference(&record->call.info.texture_subdata.resource, resource);
1822 record->call.info.texture_subdata.level = level;
1823 record->call.info.texture_subdata.usage = usage;
1824 record->call.info.texture_subdata.box = *box;
1825 record->call.info.texture_subdata.data = data;
1826 record->call.info.texture_subdata.stride = stride;
1827 record->call.info.texture_subdata.layer_stride = layer_stride;
1828
1829 dd_before_draw(dctx, record);
1830 }
1831 pipe->texture_subdata(pipe, resource, level, usage, box, data,
1832 stride, layer_stride);
1833 if (record)
1834 dd_after_draw(dctx, record);
1835 }
1836
1837 void
dd_init_draw_functions(struct dd_context * dctx)1838 dd_init_draw_functions(struct dd_context *dctx)
1839 {
1840 CTX_INIT(flush);
1841 CTX_INIT(draw_vbo);
1842 CTX_INIT(launch_grid);
1843 CTX_INIT(resource_copy_region);
1844 CTX_INIT(blit);
1845 CTX_INIT(clear);
1846 CTX_INIT(clear_render_target);
1847 CTX_INIT(clear_depth_stencil);
1848 CTX_INIT(clear_buffer);
1849 CTX_INIT(clear_texture);
1850 CTX_INIT(flush_resource);
1851 CTX_INIT(generate_mipmap);
1852 CTX_INIT(get_query_result_resource);
1853 CTX_INIT(buffer_map);
1854 CTX_INIT(texture_map);
1855 CTX_INIT(transfer_flush_region);
1856 CTX_INIT(buffer_unmap);
1857 CTX_INIT(texture_unmap);
1858 CTX_INIT(buffer_subdata);
1859 CTX_INIT(texture_subdata);
1860 CTX_INIT(draw_vertex_state);
1861 }
1862