xref: /aosp_15_r20/external/mesa3d/src/panfrost/lib/genxml/decode_jm.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright (C) 2017-2019 Alyssa Rosenzweig
3  * Copyright (C) 2017-2019 Connor Abbott
4  * Copyright (C) 2019 Collabora, Ltd.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice (including the next
14  * paragraph) shall be included in all copies or substantial portions of the
15  * Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
20  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
23  * SOFTWARE.
24  */
25 
26 #include "genxml/gen_macros.h"
27 #include "util/set.h"
28 #include "decode.h"
29 
30 #if PAN_ARCH <= 9
31 
32 static void
pandecode_primitive(struct pandecode_context * ctx,const void * p)33 pandecode_primitive(struct pandecode_context *ctx, const void *p)
34 {
35    pan_unpack(p, PRIMITIVE, primitive);
36    DUMP_UNPACKED(ctx, PRIMITIVE, primitive, "Primitive:\n");
37 
38 #if PAN_ARCH <= 7
39    /* Validate an index buffer is present if we need one. TODO: verify
40     * relationship between invocation_count and index_count */
41 
42    if (primitive.indices) {
43       /* Grab the size */
44       unsigned size = (primitive.index_type == MALI_INDEX_TYPE_UINT32)
45                          ? sizeof(uint32_t)
46                          : primitive.index_type;
47 
48       /* Ensure we got a size, and if so, validate the index buffer
49        * is large enough to hold a full set of indices of the given
50        * size */
51 
52       if (!size)
53          pandecode_log(ctx, "// XXX: index size missing\n");
54       else
55          pandecode_validate_buffer(ctx, primitive.indices,
56                                    primitive.index_count * size);
57    } else if (primitive.index_type)
58       pandecode_log(ctx, "// XXX: unexpected index size\n");
59 #endif
60 }
61 
62 #if PAN_ARCH <= 7
63 static void
pandecode_attributes(struct pandecode_context * ctx,mali_ptr addr,int count,bool varying,enum mali_job_type job_type)64 pandecode_attributes(struct pandecode_context *ctx, mali_ptr addr, int count,
65                      bool varying, enum mali_job_type job_type)
66 {
67    char *prefix = varying ? "Varying" : "Attribute";
68    assert(addr);
69 
70    if (!count) {
71       pandecode_log(ctx, "// warn: No %s records\n", prefix);
72       return;
73    }
74 
75    MAP_ADDR(ctx, ATTRIBUTE_BUFFER, addr, cl);
76 
77    for (int i = 0; i < count; ++i) {
78       pan_unpack(cl + i * pan_size(ATTRIBUTE_BUFFER), ATTRIBUTE_BUFFER, temp);
79       DUMP_UNPACKED(ctx, ATTRIBUTE_BUFFER, temp, "%s:\n", prefix);
80 
81       switch (temp.type) {
82       case MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR_WRITE_REDUCTION:
83       case MALI_ATTRIBUTE_TYPE_1D_NPOT_DIVISOR: {
84          pan_unpack(cl + (i + 1) * pan_size(ATTRIBUTE_BUFFER),
85                     ATTRIBUTE_BUFFER_CONTINUATION_NPOT, temp2);
86          pan_print(ctx->dump_stream, ATTRIBUTE_BUFFER_CONTINUATION_NPOT, temp2,
87                    (ctx->indent + 1) * 2);
88          i++;
89          break;
90       }
91       case MALI_ATTRIBUTE_TYPE_3D_LINEAR:
92       case MALI_ATTRIBUTE_TYPE_3D_INTERLEAVED: {
93          pan_unpack(cl + (i + 1) * pan_size(ATTRIBUTE_BUFFER_CONTINUATION_3D),
94                     ATTRIBUTE_BUFFER_CONTINUATION_3D, temp2);
95          pan_print(ctx->dump_stream, ATTRIBUTE_BUFFER_CONTINUATION_3D, temp2,
96                    (ctx->indent + 1) * 2);
97          i++;
98          break;
99       }
100       default:
101          break;
102       }
103    }
104    pandecode_log(ctx, "\n");
105 }
106 
107 static unsigned
pandecode_attribute_meta(struct pandecode_context * ctx,int count,mali_ptr attribute,bool varying)108 pandecode_attribute_meta(struct pandecode_context *ctx, int count,
109                          mali_ptr attribute, bool varying)
110 {
111    unsigned max = 0;
112 
113    for (int i = 0; i < count; ++i, attribute += pan_size(ATTRIBUTE)) {
114       MAP_ADDR(ctx, ATTRIBUTE, attribute, cl);
115       pan_unpack(cl, ATTRIBUTE, a);
116       DUMP_UNPACKED(ctx, ATTRIBUTE, a, "%s:\n",
117                     varying ? "Varying" : "Attribute");
118       max = MAX2(max, a.buffer_index);
119    }
120 
121    pandecode_log(ctx, "\n");
122    return MIN2(max + 1, 256);
123 }
124 
125 /* return bits [lo, hi) of word */
126 static u32
bits(u32 word,u32 lo,u32 hi)127 bits(u32 word, u32 lo, u32 hi)
128 {
129    if (hi - lo >= 32)
130       return word; // avoid undefined behavior with the shift
131 
132    if (lo >= 32)
133       return 0;
134 
135    return (word >> lo) & ((1 << (hi - lo)) - 1);
136 }
137 
138 static void
pandecode_invocation(struct pandecode_context * ctx,const void * i)139 pandecode_invocation(struct pandecode_context *ctx, const void *i)
140 {
141    /* Decode invocation_count. See the comment before the definition of
142     * invocation_count for an explanation.
143     */
144    pan_unpack(i, INVOCATION, invocation);
145 
146    unsigned size_x =
147       bits(invocation.invocations, 0, invocation.size_y_shift) + 1;
148    unsigned size_y = bits(invocation.invocations, invocation.size_y_shift,
149                           invocation.size_z_shift) +
150                      1;
151    unsigned size_z = bits(invocation.invocations, invocation.size_z_shift,
152                           invocation.workgroups_x_shift) +
153                      1;
154 
155    unsigned groups_x =
156       bits(invocation.invocations, invocation.workgroups_x_shift,
157            invocation.workgroups_y_shift) +
158       1;
159    unsigned groups_y =
160       bits(invocation.invocations, invocation.workgroups_y_shift,
161            invocation.workgroups_z_shift) +
162       1;
163    unsigned groups_z =
164       bits(invocation.invocations, invocation.workgroups_z_shift, 32) + 1;
165 
166    pandecode_log(ctx, "Invocation (%d, %d, %d) x (%d, %d, %d)\n", size_x,
167                  size_y, size_z, groups_x, groups_y, groups_z);
168 
169    DUMP_UNPACKED(ctx, INVOCATION, invocation, "Invocation:\n")
170 }
171 
172 static void
pandecode_textures(struct pandecode_context * ctx,mali_ptr textures,unsigned texture_count)173 pandecode_textures(struct pandecode_context *ctx, mali_ptr textures,
174                    unsigned texture_count)
175 {
176    if (!textures)
177       return;
178 
179    pandecode_log(ctx, "Textures %" PRIx64 ":\n", textures);
180    ctx->indent++;
181 
182 #if PAN_ARCH >= 6
183    const void *cl =
184       pandecode_fetch_gpu_mem(ctx, textures, pan_size(TEXTURE) * texture_count);
185 
186    for (unsigned tex = 0; tex < texture_count; ++tex)
187       GENX(pandecode_texture)(ctx, cl + pan_size(TEXTURE) * tex, tex);
188 #else
189    mali_ptr *PANDECODE_PTR_VAR(ctx, u, textures);
190 
191    for (int tex = 0; tex < texture_count; ++tex) {
192       mali_ptr *PANDECODE_PTR_VAR(ctx, u, textures + tex * sizeof(mali_ptr));
193       char *a = pointer_as_memory_reference(ctx, *u);
194       pandecode_log(ctx, "%s,\n", a);
195       free(a);
196    }
197 
198    /* Now, finally, descend down into the texture descriptor */
199    for (unsigned tex = 0; tex < texture_count; ++tex) {
200       mali_ptr *PANDECODE_PTR_VAR(ctx, u, textures + tex * sizeof(mali_ptr));
201       GENX(pandecode_texture)(ctx, *u, tex);
202    }
203 #endif
204    ctx->indent--;
205    pandecode_log(ctx, "\n");
206 }
207 
208 static void
pandecode_samplers(struct pandecode_context * ctx,mali_ptr samplers,unsigned sampler_count)209 pandecode_samplers(struct pandecode_context *ctx, mali_ptr samplers,
210                    unsigned sampler_count)
211 {
212    pandecode_log(ctx, "Samplers %" PRIx64 ":\n", samplers);
213    ctx->indent++;
214 
215    for (int i = 0; i < sampler_count; ++i)
216       DUMP_ADDR(ctx, SAMPLER, samplers + (pan_size(SAMPLER) * i),
217                 "Sampler %d:\n", i);
218 
219    ctx->indent--;
220    pandecode_log(ctx, "\n");
221 }
222 
223 static void
pandecode_uniform_buffers(struct pandecode_context * ctx,mali_ptr pubufs,int ubufs_count)224 pandecode_uniform_buffers(struct pandecode_context *ctx, mali_ptr pubufs,
225                           int ubufs_count)
226 {
227    uint64_t *PANDECODE_PTR_VAR(ctx, ubufs, pubufs);
228 
229    for (int i = 0; i < ubufs_count; i++) {
230       mali_ptr addr = (ubufs[i] >> 10) << 2;
231       unsigned size = addr ? (((ubufs[i] & ((1 << 10) - 1)) + 1) * 16) : 0;
232 
233       pandecode_validate_buffer(ctx, addr, size);
234 
235       char *ptr = pointer_as_memory_reference(ctx, addr);
236       pandecode_log(ctx, "ubuf_%d[%u] = %s;\n", i, size, ptr);
237       free(ptr);
238    }
239 
240    pandecode_log(ctx, "\n");
241 }
242 
243 static void
pandecode_uniforms(struct pandecode_context * ctx,mali_ptr uniforms,unsigned uniform_count)244 pandecode_uniforms(struct pandecode_context *ctx, mali_ptr uniforms,
245                    unsigned uniform_count)
246 {
247    pandecode_validate_buffer(ctx, uniforms, uniform_count * 16);
248 
249    char *ptr = pointer_as_memory_reference(ctx, uniforms);
250    pandecode_log(ctx, "vec4 uniforms[%u] = %s;\n", uniform_count, ptr);
251    free(ptr);
252    pandecode_log(ctx, "\n");
253 }
254 
255 void
GENX(pandecode_dcd)256 GENX(pandecode_dcd)(struct pandecode_context *ctx, const struct MALI_DRAW *p,
257                     enum mali_job_type job_type, unsigned gpu_id)
258 {
259 #if PAN_ARCH >= 5
260    struct pandecode_fbd fbd_info = {.rt_count = 1};
261 #endif
262 
263    if (PAN_ARCH >= 6 || (PAN_ARCH == 5 && job_type != MALI_JOB_TYPE_TILER)) {
264 #if PAN_ARCH >= 5
265       DUMP_ADDR(ctx, LOCAL_STORAGE, p->thread_storage & ~1, "Local Storage:\n");
266 #endif
267    } else {
268 #if PAN_ARCH == 5
269       /* On v5 only, the actual framebuffer pointer is tagged with extra
270        * metadata that we validate but do not print.
271        */
272       pan_unpack(&p->fbd, FRAMEBUFFER_POINTER, ptr);
273 
274       if (!ptr.type || ptr.zs_crc_extension_present ||
275           ptr.render_target_count != 1) {
276 
277          fprintf(ctx->dump_stream, "Unexpected framebuffer pointer settings");
278       }
279 
280       GENX(pandecode_fbd)(ctx, ptr.pointer, false, gpu_id);
281 #elif PAN_ARCH == 4
282       GENX(pandecode_fbd)(ctx, p->fbd, false, gpu_id);
283 #endif
284    }
285 
286    int varying_count = 0, attribute_count = 0, uniform_count = 0,
287        uniform_buffer_count = 0;
288    int texture_count = 0, sampler_count = 0;
289 
290    if (p->state) {
291       uint32_t *cl =
292          pandecode_fetch_gpu_mem(ctx, p->state, pan_size(RENDERER_STATE));
293 
294       pan_unpack(cl, RENDERER_STATE, state);
295 
296       if (state.shader.shader & ~0xF)
297          pandecode_shader_disassemble(ctx, state.shader.shader & ~0xF, gpu_id);
298 
299 #if PAN_ARCH >= 6
300       bool idvs = (job_type == MALI_JOB_TYPE_INDEXED_VERTEX);
301 
302       if (idvs && state.secondary_shader)
303          pandecode_shader_disassemble(ctx, state.secondary_shader, gpu_id);
304 #endif
305       DUMP_UNPACKED(ctx, RENDERER_STATE, state, "State:\n");
306       ctx->indent++;
307 
308       /* Save for dumps */
309       attribute_count = state.shader.attribute_count;
310       varying_count = state.shader.varying_count;
311       texture_count = state.shader.texture_count;
312       sampler_count = state.shader.sampler_count;
313       uniform_buffer_count = state.properties.uniform_buffer_count;
314 
315 #if PAN_ARCH >= 6
316       uniform_count = state.preload.uniform_count;
317 #else
318       uniform_count = state.properties.uniform_count;
319 #endif
320 
321 #if PAN_ARCH == 4
322       mali_ptr shader = state.blend_shader & ~0xF;
323       if (state.multisample_misc.blend_shader && shader)
324          pandecode_shader_disassemble(ctx, shader, gpu_id);
325 #endif
326       ctx->indent--;
327       pandecode_log(ctx, "\n");
328 
329       /* MRT blend fields are used on v5+. Technically, they are optional on v5
330        * for backwards compatibility but we don't care about that.
331        */
332 #if PAN_ARCH >= 5
333       if ((job_type == MALI_JOB_TYPE_TILER ||
334            job_type == MALI_JOB_TYPE_FRAGMENT) &&
335           PAN_ARCH >= 5) {
336          void *blend_base = ((void *)cl) + pan_size(RENDERER_STATE);
337 
338          for (unsigned i = 0; i < fbd_info.rt_count; i++) {
339             mali_ptr shader =
340                GENX(pandecode_blend)(ctx, blend_base, i, state.shader.shader);
341             if (shader & ~0xF)
342                pandecode_shader_disassemble(ctx, shader, gpu_id);
343          }
344       }
345 #endif
346    } else
347       pandecode_log(ctx, "// XXX: missing shader descriptor\n");
348 
349    if (p->viewport) {
350       DUMP_ADDR(ctx, VIEWPORT, p->viewport, "Viewport:\n");
351       pandecode_log(ctx, "\n");
352    }
353 
354    unsigned max_attr_index = 0;
355 
356    if (p->attributes)
357       max_attr_index =
358          pandecode_attribute_meta(ctx, attribute_count, p->attributes, false);
359 
360    if (p->attribute_buffers)
361       pandecode_attributes(ctx, p->attribute_buffers, max_attr_index, false,
362                            job_type);
363 
364    if (p->varyings) {
365       varying_count =
366          pandecode_attribute_meta(ctx, varying_count, p->varyings, true);
367    }
368 
369    if (p->varying_buffers)
370       pandecode_attributes(ctx, p->varying_buffers, varying_count, true,
371                            job_type);
372 
373    if (p->uniform_buffers) {
374       if (uniform_buffer_count)
375          pandecode_uniform_buffers(ctx, p->uniform_buffers,
376                                    uniform_buffer_count);
377       else
378          pandecode_log(ctx, "// warn: UBOs specified but not referenced\n");
379    } else if (uniform_buffer_count)
380       pandecode_log(ctx, "// XXX: UBOs referenced but not specified\n");
381 
382    /* We don't want to actually dump uniforms, but we do need to validate
383     * that the counts we were given are sane */
384 
385    if (p->push_uniforms) {
386       if (uniform_count)
387          pandecode_uniforms(ctx, p->push_uniforms, uniform_count);
388       else
389          pandecode_log(ctx, "// warn: Uniforms specified but not referenced\n");
390    } else if (uniform_count)
391       pandecode_log(ctx, "// XXX: Uniforms referenced but not specified\n");
392 
393    if (p->textures)
394       pandecode_textures(ctx, p->textures, texture_count);
395 
396    if (p->samplers)
397       pandecode_samplers(ctx, p->samplers, sampler_count);
398 }
399 
400 static void
pandecode_vertex_compute_geometry_job(struct pandecode_context * ctx,const struct MALI_JOB_HEADER * h,mali_ptr job,unsigned gpu_id)401 pandecode_vertex_compute_geometry_job(struct pandecode_context *ctx,
402                                       const struct MALI_JOB_HEADER *h,
403                                       mali_ptr job, unsigned gpu_id)
404 {
405    struct mali_compute_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
406    pan_section_unpack(p, COMPUTE_JOB, DRAW, draw);
407    GENX(pandecode_dcd)(ctx, &draw, h->type, gpu_id);
408 
409    pandecode_log(ctx, "Vertex Job Payload:\n");
410    ctx->indent++;
411    pandecode_invocation(ctx, pan_section_ptr(p, COMPUTE_JOB, INVOCATION));
412    DUMP_SECTION(ctx, COMPUTE_JOB, PARAMETERS, p, "Vertex Job Parameters:\n");
413    DUMP_UNPACKED(ctx, DRAW, draw, "Draw:\n");
414    ctx->indent--;
415    pandecode_log(ctx, "\n");
416 }
417 #endif
418 
419 static void
pandecode_write_value_job(struct pandecode_context * ctx,mali_ptr job)420 pandecode_write_value_job(struct pandecode_context *ctx, mali_ptr job)
421 {
422    struct mali_write_value_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
423    pan_section_unpack(p, WRITE_VALUE_JOB, PAYLOAD, u);
424    DUMP_SECTION(ctx, WRITE_VALUE_JOB, PAYLOAD, p, "Write Value Payload:\n");
425    pandecode_log(ctx, "\n");
426 }
427 
428 static void
pandecode_cache_flush_job(struct pandecode_context * ctx,mali_ptr job)429 pandecode_cache_flush_job(struct pandecode_context *ctx, mali_ptr job)
430 {
431    struct mali_cache_flush_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
432    pan_section_unpack(p, CACHE_FLUSH_JOB, PAYLOAD, u);
433    DUMP_SECTION(ctx, CACHE_FLUSH_JOB, PAYLOAD, p, "Cache Flush Payload:\n");
434    pandecode_log(ctx, "\n");
435 }
436 
437 static void
pandecode_tiler_job(struct pandecode_context * ctx,const struct MALI_JOB_HEADER * h,mali_ptr job,unsigned gpu_id)438 pandecode_tiler_job(struct pandecode_context *ctx,
439                     const struct MALI_JOB_HEADER *h, mali_ptr job,
440                     unsigned gpu_id)
441 {
442    struct mali_tiler_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
443    pan_section_unpack(p, TILER_JOB, DRAW, draw);
444    GENX(pandecode_dcd)(ctx, &draw, h->type, gpu_id);
445    pandecode_log(ctx, "Tiler Job Payload:\n");
446    ctx->indent++;
447 
448 #if PAN_ARCH <= 7
449    pandecode_invocation(ctx, pan_section_ptr(p, TILER_JOB, INVOCATION));
450 #endif
451 
452    pandecode_primitive(ctx, pan_section_ptr(p, TILER_JOB, PRIMITIVE));
453    DUMP_UNPACKED(ctx, DRAW, draw, "Draw:\n");
454 
455    DUMP_SECTION(ctx, TILER_JOB, PRIMITIVE_SIZE, p, "Primitive Size:\n");
456 
457 #if PAN_ARCH >= 6
458    pan_section_unpack(p, TILER_JOB, TILER, tiler_ptr);
459    GENX(pandecode_tiler)(ctx, tiler_ptr.address, gpu_id);
460 
461 #if PAN_ARCH >= 9
462    DUMP_SECTION(ctx, TILER_JOB, INSTANCE_COUNT, p, "Instance count:\n");
463    DUMP_SECTION(ctx, TILER_JOB, VERTEX_COUNT, p, "Vertex count:\n");
464    DUMP_SECTION(ctx, TILER_JOB, SCISSOR, p, "Scissor:\n");
465    DUMP_SECTION(ctx, TILER_JOB, INDICES, p, "Indices:\n");
466 #else
467    pan_section_unpack(p, TILER_JOB, PADDING, padding);
468 #endif
469 
470 #endif
471    ctx->indent--;
472    pandecode_log(ctx, "\n");
473 }
474 
475 static void
pandecode_fragment_job(struct pandecode_context * ctx,mali_ptr job,unsigned gpu_id)476 pandecode_fragment_job(struct pandecode_context *ctx, mali_ptr job,
477                        unsigned gpu_id)
478 {
479    struct mali_fragment_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
480    pan_section_unpack(p, FRAGMENT_JOB, PAYLOAD, s);
481 
482    uint64_t fbd_pointer;
483 
484 #if PAN_ARCH >= 5
485    /* On v5 and newer, the actual framebuffer pointer is tagged with extra
486     * metadata that we need to disregard.
487     */
488    pan_unpack(&s.framebuffer, FRAMEBUFFER_POINTER, ptr);
489    fbd_pointer = ptr.pointer;
490 #else
491    /* On v4, the framebuffer pointer is untagged. */
492    fbd_pointer = s.framebuffer;
493 #endif
494 
495    UNUSED struct pandecode_fbd info =
496       GENX(pandecode_fbd)(ctx, fbd_pointer, true, gpu_id);
497 
498 #if PAN_ARCH >= 5
499    if (!ptr.type || ptr.zs_crc_extension_present != info.has_extra ||
500        ptr.render_target_count != info.rt_count) {
501       pandecode_log(ctx, "invalid FBD tag\n");
502    }
503 #endif
504 
505    DUMP_UNPACKED(ctx, FRAGMENT_JOB_PAYLOAD, s, "Fragment Job Payload:\n");
506 
507    pandecode_log(ctx, "\n");
508 }
509 
510 #if PAN_ARCH == 6 || PAN_ARCH == 7
511 static void
pandecode_indexed_vertex_job(struct pandecode_context * ctx,const struct MALI_JOB_HEADER * h,mali_ptr job,unsigned gpu_id)512 pandecode_indexed_vertex_job(struct pandecode_context *ctx,
513                              const struct MALI_JOB_HEADER *h, mali_ptr job,
514                              unsigned gpu_id)
515 {
516    struct mali_indexed_vertex_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
517 
518    pandecode_log(ctx, "Vertex:\n");
519    pan_section_unpack(p, INDEXED_VERTEX_JOB, VERTEX_DRAW, vert_draw);
520    GENX(pandecode_dcd)(ctx, &vert_draw, h->type, gpu_id);
521    DUMP_UNPACKED(ctx, DRAW, vert_draw, "Vertex Draw:\n");
522 
523    pandecode_log(ctx, "Fragment:\n");
524    pan_section_unpack(p, INDEXED_VERTEX_JOB, FRAGMENT_DRAW, frag_draw);
525    GENX(pandecode_dcd)(ctx, &frag_draw, MALI_JOB_TYPE_FRAGMENT, gpu_id);
526    DUMP_UNPACKED(ctx, DRAW, frag_draw, "Fragment Draw:\n");
527 
528    pan_section_unpack(p, INDEXED_VERTEX_JOB, TILER, tiler_ptr);
529    pandecode_log(ctx, "Tiler Job Payload:\n");
530    ctx->indent++;
531    GENX(pandecode_tiler)(ctx, tiler_ptr.address, gpu_id);
532    ctx->indent--;
533 
534    pandecode_invocation(ctx,
535                         pan_section_ptr(p, INDEXED_VERTEX_JOB, INVOCATION));
536    pandecode_primitive(ctx, pan_section_ptr(p, INDEXED_VERTEX_JOB, PRIMITIVE));
537 
538    DUMP_SECTION(ctx, INDEXED_VERTEX_JOB, PRIMITIVE_SIZE, p,
539                 "Primitive Size:\n");
540 
541    pan_section_unpack(p, INDEXED_VERTEX_JOB, PADDING, padding);
542 }
543 #endif
544 
545 #if PAN_ARCH == 9
546 static void
pandecode_malloc_vertex_job(struct pandecode_context * ctx,mali_ptr job,unsigned gpu_id)547 pandecode_malloc_vertex_job(struct pandecode_context *ctx, mali_ptr job,
548                             unsigned gpu_id)
549 {
550    struct mali_malloc_vertex_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
551 
552    DUMP_SECTION(ctx, MALLOC_VERTEX_JOB, PRIMITIVE, p, "Primitive:\n");
553    DUMP_SECTION(ctx, MALLOC_VERTEX_JOB, INSTANCE_COUNT, p, "Instance count:\n");
554    DUMP_SECTION(ctx, MALLOC_VERTEX_JOB, ALLOCATION, p, "Allocation:\n");
555    DUMP_SECTION(ctx, MALLOC_VERTEX_JOB, TILER, p, "Tiler:\n");
556    DUMP_SECTION(ctx, MALLOC_VERTEX_JOB, SCISSOR, p, "Scissor:\n");
557    DUMP_SECTION(ctx, MALLOC_VERTEX_JOB, PRIMITIVE_SIZE, p, "Primitive Size:\n");
558    DUMP_SECTION(ctx, MALLOC_VERTEX_JOB, INDICES, p, "Indices:\n");
559 
560    pan_section_unpack(p, MALLOC_VERTEX_JOB, DRAW, dcd);
561 
562    pan_section_unpack(p, MALLOC_VERTEX_JOB, TILER, tiler_ptr);
563    pandecode_log(ctx, "Tiler Job Payload:\n");
564    ctx->indent++;
565    if (tiler_ptr.address)
566       GENX(pandecode_tiler)(ctx, tiler_ptr.address, gpu_id);
567    else
568       pandecode_log(ctx, "<omitted>\n");
569    ctx->indent--;
570 
571    GENX(pandecode_dcd)(ctx, &dcd, 0, gpu_id);
572 
573    pan_section_unpack(p, MALLOC_VERTEX_JOB, POSITION, position);
574    pan_section_unpack(p, MALLOC_VERTEX_JOB, VARYING, varying);
575    GENX(pandecode_shader_environment)(ctx, &position, gpu_id);
576    GENX(pandecode_shader_environment)(ctx, &varying, gpu_id);
577 }
578 
579 static void
pandecode_compute_job(struct pandecode_context * ctx,mali_ptr job,unsigned gpu_id)580 pandecode_compute_job(struct pandecode_context *ctx, mali_ptr job,
581                       unsigned gpu_id)
582 {
583    struct mali_compute_job_packed *PANDECODE_PTR_VAR(ctx, p, job);
584    pan_section_unpack(p, COMPUTE_JOB, PAYLOAD, payload);
585 
586    GENX(pandecode_shader_environment)(ctx, &payload.compute, gpu_id);
587    DUMP_SECTION(ctx, COMPUTE_JOB, PAYLOAD, p, "Compute");
588 }
589 #endif
590 
591 /*
592  * Trace a job chain at a particular GPU address, interpreted for a particular
593  * GPU using the job manager.
594  */
595 void
GENX(pandecode_jc)596 GENX(pandecode_jc)(struct pandecode_context *ctx, mali_ptr jc_gpu_va,
597                    unsigned gpu_id)
598 {
599    pandecode_dump_file_open(ctx);
600 
601    struct set *va_set = _mesa_pointer_set_create(NULL);
602    struct set_entry *entry = NULL;
603 
604    mali_ptr next_job = 0;
605 
606    do {
607       struct mali_job_header_packed *hdr =
608          PANDECODE_PTR(ctx, jc_gpu_va, struct mali_job_header_packed);
609 
610       entry = _mesa_set_search(va_set, hdr);
611       if (entry != NULL) {
612          fprintf(stdout, "Job list has a cycle\n");
613          break;
614       }
615 
616       pan_unpack(hdr, JOB_HEADER, h);
617       next_job = h.next;
618 
619       DUMP_UNPACKED(ctx, JOB_HEADER, h, "Job Header (%" PRIx64 "):\n",
620                     jc_gpu_va);
621       pandecode_log(ctx, "\n");
622 
623       switch (h.type) {
624       case MALI_JOB_TYPE_WRITE_VALUE:
625          pandecode_write_value_job(ctx, jc_gpu_va);
626          break;
627 
628       case MALI_JOB_TYPE_CACHE_FLUSH:
629          pandecode_cache_flush_job(ctx, jc_gpu_va);
630          break;
631 
632       case MALI_JOB_TYPE_TILER:
633          pandecode_tiler_job(ctx, &h, jc_gpu_va, gpu_id);
634          break;
635 
636 #if PAN_ARCH <= 7
637       case MALI_JOB_TYPE_VERTEX:
638       case MALI_JOB_TYPE_COMPUTE:
639          pandecode_vertex_compute_geometry_job(ctx, &h, jc_gpu_va, gpu_id);
640          break;
641 
642 #if PAN_ARCH >= 6
643       case MALI_JOB_TYPE_INDEXED_VERTEX:
644          pandecode_indexed_vertex_job(ctx, &h, jc_gpu_va, gpu_id);
645          break;
646 #endif
647 #else
648       case MALI_JOB_TYPE_COMPUTE:
649          pandecode_compute_job(ctx, jc_gpu_va, gpu_id);
650          break;
651 
652       case MALI_JOB_TYPE_MALLOC_VERTEX:
653          pandecode_malloc_vertex_job(ctx, jc_gpu_va, gpu_id);
654          break;
655 #endif
656 
657       case MALI_JOB_TYPE_FRAGMENT:
658          pandecode_fragment_job(ctx, jc_gpu_va, gpu_id);
659          break;
660 
661       default:
662          break;
663       }
664 
665       /* Track the latest visited job CPU VA to detect cycles */
666       _mesa_set_add(va_set, hdr);
667    } while ((jc_gpu_va = next_job));
668 
669    _mesa_set_destroy(va_set, NULL);
670 
671    fflush(ctx->dump_stream);
672    pandecode_map_read_write(ctx);
673 }
674 
675 void
GENX(pandecode_abort_on_fault)676 GENX(pandecode_abort_on_fault)(struct pandecode_context *ctx,
677                                mali_ptr jc_gpu_va)
678 {
679    mali_ptr next_job = 0;
680 
681    do {
682       pan_unpack(PANDECODE_PTR(ctx, jc_gpu_va, struct mali_job_header_packed),
683                  JOB_HEADER, h);
684       next_job = h.next;
685 
686       /* Ensure the job is marked COMPLETE */
687       if (h.exception_status != 0x1) {
688          fprintf(stderr, "Incomplete job or timeout\n");
689          fflush(NULL);
690          abort();
691       }
692    } while ((jc_gpu_va = next_job));
693 
694    pandecode_map_read_write(ctx);
695 }
696 
697 #endif
698