xref: /aosp_15_r20/external/mesa3d/src/intel/tools/aub_write.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2015 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21  * IN THE SOFTWARE.
22  */
23 
24 #include "aub_write.h"
25 
26 #include <inttypes.h>
27 #include <signal.h>
28 #include <stdarg.h>
29 #include <stdlib.h>
30 #include <string.h>
31 
32 #include "intel_aub.h"
33 #include "intel_context.h"
34 
35 #include "util/u_math.h"
36 
37 #define MI_BATCH_NON_SECURE_I965 (1 << 8)
38 
39 #define min(a, b) ({                            \
40          __typeof(a) _a = (a);                  \
41          __typeof(b) _b = (b);                  \
42          _a < _b ? _a : _b;                     \
43       })
44 
45 #define max(a, b) ({                            \
46          __typeof(a) _a = (a);                  \
47          __typeof(b) _b = (b);                  \
48          _a > _b ? _a : _b;                     \
49       })
50 
51 static struct aub_context *aub_context_new(struct aub_file *aub, uint32_t new_id);
52 static void mem_trace_memory_write_header_out(struct aub_file *aub, uint64_t addr,
53                                               uint32_t len, uint32_t addr_space,
54                                               const char *desc);
55 
56 #define fail_if(cond, ...) _fail_if(cond, NULL, __VA_ARGS__)
57 
58 static void
aub_ppgtt_table_finish(struct aub_ppgtt_table * table,int level)59 aub_ppgtt_table_finish(struct aub_ppgtt_table *table, int level)
60 {
61    if (level == 1)
62       return;
63 
64    for (unsigned i = 0; i < ARRAY_SIZE(table->subtables); i++) {
65       if (table->subtables[i]) {
66          aub_ppgtt_table_finish(table->subtables[i], level - 1);
67          free(table->subtables[i]);
68       }
69    }
70 }
71 
72 static void
data_out(struct aub_file * aub,const void * data,size_t size)73 data_out(struct aub_file *aub, const void *data, size_t size)
74 {
75    if (size == 0)
76       return;
77 
78    fail_if(fwrite(data, 1, size, aub->file) == 0,
79            "Writing to output failed\n");
80 }
81 
82 static void
dword_out(struct aub_file * aub,uint32_t data)83 dword_out(struct aub_file *aub, uint32_t data)
84 {
85    data_out(aub, &data, sizeof(data));
86 }
87 
88 static void
write_execlists_header(struct aub_file * aub,const char * name)89 write_execlists_header(struct aub_file *aub, const char *name)
90 {
91    char app_name[8 * 4];
92    int app_name_len, dwords;
93 
94    app_name_len =
95       snprintf(app_name, sizeof(app_name), "PCI-ID=0x%X %s",
96                aub->pci_id, name);
97    app_name_len = ALIGN(app_name_len, sizeof(uint32_t));
98 
99    dwords = 5 + app_name_len / sizeof(uint32_t);
100    dword_out(aub, CMD_MEM_TRACE_VERSION | (dwords - 1));
101    dword_out(aub, AUB_MEM_TRACE_VERSION_FILE_VERSION);
102    dword_out(aub, aub->devinfo.simulator_id << AUB_MEM_TRACE_VERSION_DEVICE_SHIFT);
103    dword_out(aub, 0);      /* version */
104    dword_out(aub, 0);      /* version */
105    data_out(aub, app_name, app_name_len);
106 }
107 
108 static void
write_legacy_header(struct aub_file * aub,const char * name)109 write_legacy_header(struct aub_file *aub, const char *name)
110 {
111    char app_name[8 * 4];
112    char comment[16];
113    int comment_len, comment_dwords, dwords;
114 
115    comment_len = snprintf(comment, sizeof(comment), "PCI-ID=0x%x", aub->pci_id);
116    comment_dwords = ((comment_len + 3) / 4);
117 
118    /* Start with a (required) version packet. */
119    dwords = 13 + comment_dwords;
120    dword_out(aub, CMD_AUB_HEADER | (dwords - 2));
121    dword_out(aub, (4 << AUB_HEADER_MAJOR_SHIFT) |
122                   (0 << AUB_HEADER_MINOR_SHIFT));
123 
124    /* Next comes a 32-byte application name. */
125    strncpy(app_name, name, sizeof(app_name));
126    app_name[sizeof(app_name) - 1] = 0;
127    data_out(aub, app_name, sizeof(app_name));
128 
129    dword_out(aub, 0); /* timestamp */
130    dword_out(aub, 0); /* timestamp */
131    dword_out(aub, comment_len);
132    data_out(aub, comment, comment_dwords * 4);
133 }
134 
135 
136 static void
aub_write_header(struct aub_file * aub,const char * app_name)137 aub_write_header(struct aub_file *aub, const char *app_name)
138 {
139    if (aub_use_execlists(aub))
140       write_execlists_header(aub, app_name);
141    else
142       write_legacy_header(aub, app_name);
143 }
144 
145 void
aub_file_init(struct aub_file * aub,FILE * file,FILE * debug,uint16_t pci_id,const char * app_name)146 aub_file_init(struct aub_file *aub, FILE *file, FILE *debug, uint16_t pci_id, const char *app_name)
147 {
148    memset(aub, 0, sizeof(*aub));
149 
150    aub->verbose_log_file = debug;
151    aub->file = file;
152    aub->pci_id = pci_id;
153    fail_if(!intel_get_device_info_from_pci_id(pci_id, &aub->devinfo),
154            "failed to identify chipset=0x%x\n", pci_id);
155    aub->addr_bits = aub->devinfo.ver >= 8 ? 48 : 32;
156 
157    aub_write_header(aub, app_name);
158 
159    aub->phys_addrs_allocator = 0;
160    aub->ggtt_addrs_allocator = 0;
161    aub->pml4.phys_addr = aub->phys_addrs_allocator++ << 12;
162 
163    mem_trace_memory_write_header_out(aub, aub->ggtt_addrs_allocator++,
164                                      GFX8_PTE_SIZE,
165                                      AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY,
166                                      "GGTT PT");
167    dword_out(aub, 1);
168    dword_out(aub, 0);
169 
170    aub->next_context_handle = 1;
171    aub_context_new(aub, 0); /* Default context */
172 }
173 
174 void
aub_file_finish(struct aub_file * aub)175 aub_file_finish(struct aub_file *aub)
176 {
177    aub_ppgtt_table_finish(&aub->pml4, 4);
178    fclose(aub->file);
179 }
180 
181 uint32_t
aub_gtt_size(struct aub_file * aub)182 aub_gtt_size(struct aub_file *aub)
183 {
184    return NUM_PT_ENTRIES * (aub->addr_bits > 32 ? GFX8_PTE_SIZE : PTE_SIZE);
185 }
186 
187 static void
mem_trace_memory_write_header_out(struct aub_file * aub,uint64_t addr,uint32_t len,uint32_t addr_space,const char * desc)188 mem_trace_memory_write_header_out(struct aub_file *aub, uint64_t addr,
189                                   uint32_t len, uint32_t addr_space,
190                                   const char *desc)
191 {
192    uint32_t dwords = ALIGN(len, sizeof(uint32_t)) / sizeof(uint32_t);
193 
194    if (aub->verbose_log_file) {
195       fprintf(aub->verbose_log_file,
196               "  MEM WRITE (0x%016" PRIx64 "-0x%016" PRIx64 ") %s\n",
197               addr, addr + len, desc);
198    }
199 
200    dword_out(aub, CMD_MEM_TRACE_MEMORY_WRITE | (5 + dwords - 1));
201    dword_out(aub, addr & 0xFFFFFFFF);   /* addr lo */
202    dword_out(aub, addr >> 32);   /* addr hi */
203    dword_out(aub, addr_space);   /* gtt */
204    dword_out(aub, len);
205 }
206 
207 static void
register_write_out(struct aub_file * aub,uint32_t addr,uint32_t value)208 register_write_out(struct aub_file *aub, uint32_t addr, uint32_t value)
209 {
210    uint32_t dwords = 1;
211 
212    if (aub->verbose_log_file) {
213       fprintf(aub->verbose_log_file,
214               "  MMIO WRITE (0x%08x = 0x%08x)\n", addr, value);
215    }
216 
217    dword_out(aub, CMD_MEM_TRACE_REGISTER_WRITE | (5 + dwords - 1));
218    dword_out(aub, addr);
219    dword_out(aub, AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
220                   AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
221    dword_out(aub, 0xFFFFFFFF);   /* mask lo */
222    dword_out(aub, 0x00000000);   /* mask hi */
223    dword_out(aub, value);
224 }
225 
226 static void
populate_ppgtt_table(struct aub_file * aub,struct aub_ppgtt_table * table,int start,int end,int level)227 populate_ppgtt_table(struct aub_file *aub, struct aub_ppgtt_table *table,
228                      int start, int end, int level)
229 {
230    uint64_t entries[512] = {0};
231    int dirty_start = 512, dirty_end = 0;
232 
233    if (aub->verbose_log_file) {
234       fprintf(aub->verbose_log_file,
235               "  PPGTT (0x%016" PRIx64 "), lvl %d, start: %x, end: %x\n",
236               table->phys_addr, level, start, end);
237    }
238 
239    for (int i = start; i <= end; i++) {
240       if (!table->subtables[i]) {
241          dirty_start = min(dirty_start, i);
242          dirty_end = max(dirty_end, i);
243          if (level == 1) {
244             table->subtables[i] =
245                (void *)(uintptr_t)(aub->phys_addrs_allocator++ << 12);
246             if (aub->verbose_log_file) {
247                fprintf(aub->verbose_log_file,
248                        "   Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
249                        i, (uint64_t)(uintptr_t)table->subtables[i]);
250             }
251          } else {
252             table->subtables[i] =
253                calloc(1, sizeof(struct aub_ppgtt_table));
254             table->subtables[i]->phys_addr =
255                aub->phys_addrs_allocator++ << 12;
256             if (aub->verbose_log_file) {
257                fprintf(aub->verbose_log_file,
258                        "   Adding entry: %x, phys_addr: 0x%016" PRIx64 "\n",
259                        i, table->subtables[i]->phys_addr);
260             }
261          }
262       }
263       entries[i] = 3 /* read/write | present */ |
264          (level == 1 ? (uint64_t)(uintptr_t)table->subtables[i] :
265           table->subtables[i]->phys_addr);
266    }
267 
268    if (dirty_start <= dirty_end) {
269       uint64_t write_addr = table->phys_addr + dirty_start *
270          sizeof(uint64_t);
271       uint64_t write_size = (dirty_end - dirty_start + 1) *
272          sizeof(uint64_t);
273       mem_trace_memory_write_header_out(aub, write_addr, write_size,
274                                         AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL,
275                                         "PPGTT update");
276       data_out(aub, entries + dirty_start, write_size);
277    }
278 }
279 
280 void
aub_map_ppgtt(struct aub_file * aub,uint64_t start,uint64_t size)281 aub_map_ppgtt(struct aub_file *aub, uint64_t start, uint64_t size)
282 {
283    uint64_t l4_start = start & 0xff8000000000;
284    uint64_t l4_end = ((start + size - 1) | 0x007fffffffff) & 0xffffffffffff;
285 
286 #define L4_index(addr) (((addr) >> 39) & 0x1ff)
287 #define L3_index(addr) (((addr) >> 30) & 0x1ff)
288 #define L2_index(addr) (((addr) >> 21) & 0x1ff)
289 #define L1_index(addr) (((addr) >> 12) & 0x1ff)
290 
291 #define L3_table(addr) (aub->pml4.subtables[L4_index(addr)])
292 #define L2_table(addr) (L3_table(addr)->subtables[L3_index(addr)])
293 #define L1_table(addr) (L2_table(addr)->subtables[L2_index(addr)])
294 
295    if (aub->verbose_log_file) {
296       fprintf(aub->verbose_log_file,
297               " Mapping PPGTT address: 0x%" PRIx64 ", size: %" PRIu64"\n",
298               start, size);
299    }
300 
301    populate_ppgtt_table(aub, &aub->pml4, L4_index(l4_start), L4_index(l4_end), 4);
302 
303    for (uint64_t l4 = l4_start; l4 < l4_end; l4 += (1ULL << 39)) {
304       uint64_t l3_start = max(l4, start & 0xffffc0000000);
305       uint64_t l3_end = min(l4 + (1ULL << 39) - 1,
306                             ((start + size - 1) | 0x00003fffffff) & 0xffffffffffff);
307       uint64_t l3_start_idx = L3_index(l3_start);
308       uint64_t l3_end_idx = L3_index(l3_end);
309 
310       populate_ppgtt_table(aub, L3_table(l4), l3_start_idx, l3_end_idx, 3);
311 
312       for (uint64_t l3 = l3_start; l3 < l3_end; l3 += (1ULL << 30)) {
313          uint64_t l2_start = max(l3, start & 0xffffffe00000);
314          uint64_t l2_end = min(l3 + (1ULL << 30) - 1,
315                                ((start + size - 1) | 0x0000001fffff) & 0xffffffffffff);
316          uint64_t l2_start_idx = L2_index(l2_start);
317          uint64_t l2_end_idx = L2_index(l2_end);
318 
319          populate_ppgtt_table(aub, L2_table(l3), l2_start_idx, l2_end_idx, 2);
320 
321          for (uint64_t l2 = l2_start; l2 < l2_end; l2 += (1ULL << 21)) {
322             uint64_t l1_start = max(l2, start & 0xfffffffff000);
323             uint64_t l1_end = min(l2 + (1ULL << 21) - 1,
324                                   ((start + size - 1) | 0x000000000fff) & 0xffffffffffff);
325             uint64_t l1_start_idx = L1_index(l1_start);
326             uint64_t l1_end_idx = L1_index(l1_end);
327 
328             populate_ppgtt_table(aub, L1_table(l2), l1_start_idx, l1_end_idx, 1);
329          }
330       }
331    }
332 }
333 
334 static uint64_t
ppgtt_lookup(struct aub_file * aub,uint64_t ppgtt_addr)335 ppgtt_lookup(struct aub_file *aub, uint64_t ppgtt_addr)
336 {
337    return (uint64_t)(uintptr_t)L1_table(ppgtt_addr)->subtables[L1_index(ppgtt_addr)];
338 }
339 
340 static const struct engine {
341    const char *name;
342    enum intel_engine_class engine_class;
343    uint32_t hw_class;
344    uint32_t elsp_reg;
345    uint32_t elsq_reg;
346    uint32_t status_reg;
347    uint32_t control_reg;
348 } engines[] = {
349    [INTEL_ENGINE_CLASS_RENDER] = {
350       .name = "RENDER",
351       .engine_class = INTEL_ENGINE_CLASS_RENDER,
352       .hw_class = 1,
353       .elsp_reg = RCSUNIT(EXECLIST_SUBMITPORT),
354       .elsq_reg = RCSUNIT(EXECLIST_SQ_CONTENTS),
355       .status_reg = RCSUNIT(EXECLIST_STATUS),
356       .control_reg = RCSUNIT(EXECLIST_CONTROL),
357    },
358    [INTEL_ENGINE_CLASS_VIDEO] = {
359       .name = "VIDEO",
360       .engine_class = INTEL_ENGINE_CLASS_VIDEO,
361       .hw_class = 3,
362       .elsp_reg = VCSUNIT0(EXECLIST_SUBMITPORT),
363       .elsq_reg = VCSUNIT0(EXECLIST_SQ_CONTENTS),
364       .status_reg = VCSUNIT0(EXECLIST_STATUS),
365       .control_reg = VCSUNIT0(EXECLIST_CONTROL),
366    },
367    [INTEL_ENGINE_CLASS_COPY] = {
368       .name = "BLITTER",
369       .engine_class = INTEL_ENGINE_CLASS_COPY,
370       .hw_class = 2,
371       .elsp_reg = BCSUNIT0(EXECLIST_SUBMITPORT),
372       .elsq_reg = BCSUNIT0(EXECLIST_SQ_CONTENTS),
373       .status_reg = BCSUNIT0(EXECLIST_STATUS),
374       .control_reg = BCSUNIT0(EXECLIST_CONTROL),
375    },
376 };
377 
378 static void
aub_map_ggtt(struct aub_file * aub,uint64_t virt_addr,uint64_t size)379 aub_map_ggtt(struct aub_file *aub, uint64_t virt_addr, uint64_t size)
380 {
381    /* Makes the code below a bit simpler. In practice all of the write we
382     * receive from error2aub are page aligned.
383     */
384    assert(virt_addr % 4096 == 0);
385    assert((aub->phys_addrs_allocator + size) < (1ULL << 32));
386 
387    /* GGTT PT */
388    uint32_t ggtt_ptes = DIV_ROUND_UP(size, 4096);
389    uint64_t phys_addr = aub->phys_addrs_allocator << 12;
390    aub->phys_addrs_allocator += ggtt_ptes;
391 
392    if (aub->verbose_log_file) {
393       fprintf(aub->verbose_log_file,
394               " Mapping GGTT address: 0x%" PRIx64 ", size: %" PRIu64" phys_addr=0x%" PRIx64 " entries=%u\n",
395               virt_addr, size, phys_addr, ggtt_ptes);
396    }
397 
398    mem_trace_memory_write_header_out(aub,
399                                      (virt_addr >> 12) * GFX8_PTE_SIZE,
400                                      ggtt_ptes * GFX8_PTE_SIZE,
401                                      AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT_ENTRY,
402                                      "GGTT PT");
403    for (uint32_t i = 0; i < ggtt_ptes; i++) {
404       dword_out(aub, 1 + phys_addr + i * 4096);
405       dword_out(aub, 0);
406    }
407 }
408 
409 void
aub_write_ggtt(struct aub_file * aub,uint64_t virt_addr,uint64_t size,const void * data)410 aub_write_ggtt(struct aub_file *aub, uint64_t virt_addr, uint64_t size, const void *data)
411 {
412    /* Default setup assumes a 1 to 1 mapping between physical and virtual GGTT
413     * addresses. This is somewhat incompatible with the aub_write_ggtt()
414     * function. In practice it doesn't matter as the GGTT writes are used to
415     * replace the default setup and we've taken care to setup the PML4 as the
416     * top of the GGTT.
417     */
418    assert(!aub->has_default_setup);
419 
420    aub_map_ggtt(aub, virt_addr, size);
421 
422    /* We write the GGTT buffer through the GGTT aub command rather than the
423     * PHYSICAL aub command. This is because the Gfx9 simulator seems to have 2
424     * different set of memory pools for GGTT and physical (probably someone
425     * didn't really understand the concept?).
426     */
427    static const char null_block[8 * 4096];
428    for (uint64_t offset = 0; offset < size; offset += 4096) {
429       uint32_t block_size = min(4096, size - offset);
430 
431       mem_trace_memory_write_header_out(aub, virt_addr + offset, block_size,
432                                         AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
433                                         "GGTT buffer");
434       data_out(aub, (char *) data + offset, block_size);
435 
436       /* Pad to a multiple of 4 bytes. */
437       data_out(aub, null_block, -block_size & 3);
438    }
439 }
440 
441 static const struct engine *
engine_from_engine_class(enum intel_engine_class engine_class)442 engine_from_engine_class(enum intel_engine_class engine_class)
443 {
444    switch (engine_class) {
445    case INTEL_ENGINE_CLASS_RENDER:
446    case INTEL_ENGINE_CLASS_COPY:
447    case INTEL_ENGINE_CLASS_VIDEO:
448       return &engines[engine_class];
449    default:
450       unreachable("unknown ring");
451    }
452 }
453 
454 static void
get_context_init(const struct intel_device_info * devinfo,const struct intel_context_parameters * params,enum intel_engine_class engine_class,uint32_t * data,uint32_t * size)455 get_context_init(const struct intel_device_info *devinfo,
456                  const struct intel_context_parameters *params,
457                  enum intel_engine_class engine_class,
458                  uint32_t *data,
459                  uint32_t *size)
460 {
461    static const intel_context_init_t gfx8_contexts[] = {
462       [INTEL_ENGINE_CLASS_RENDER] = gfx8_render_context_init,
463       [INTEL_ENGINE_CLASS_COPY] = gfx8_blitter_context_init,
464       [INTEL_ENGINE_CLASS_VIDEO] = gfx8_video_context_init,
465    };
466    static const intel_context_init_t gfx10_contexts[] = {
467       [INTEL_ENGINE_CLASS_RENDER] = gfx10_render_context_init,
468       [INTEL_ENGINE_CLASS_COPY] = gfx10_blitter_context_init,
469       [INTEL_ENGINE_CLASS_VIDEO] = gfx10_video_context_init,
470    };
471 
472    assert(devinfo->ver >= 8);
473 
474    if (devinfo->ver <= 10)
475       gfx8_contexts[engine_class](params, data, size);
476    else
477       gfx10_contexts[engine_class](params, data, size);
478 }
479 
480 static uint64_t
alloc_ggtt_address(struct aub_file * aub,uint64_t size)481 alloc_ggtt_address(struct aub_file *aub, uint64_t size)
482 {
483    uint32_t ggtt_ptes = DIV_ROUND_UP(size, 4096);
484    uint64_t addr = aub->ggtt_addrs_allocator << 12;
485 
486    aub->ggtt_addrs_allocator += ggtt_ptes;
487    aub_map_ggtt(aub, addr, size);
488 
489    return addr;
490 }
491 
492 static void
write_hwsp(struct aub_file * aub,enum intel_engine_class engine_class)493 write_hwsp(struct aub_file *aub,
494            enum intel_engine_class engine_class)
495 {
496    uint32_t reg = 0;
497    switch (engine_class) {
498    case INTEL_ENGINE_CLASS_RENDER:  reg = RCSUNIT (HWS_PGA); break;
499    case INTEL_ENGINE_CLASS_COPY:    reg = BCSUNIT0(HWS_PGA); break;
500    case INTEL_ENGINE_CLASS_VIDEO:   reg = VCSUNIT0(HWS_PGA); break;
501    default:
502       unreachable("unknown ring");
503    }
504 
505    register_write_out(aub, reg, aub->engine_setup[engine_class].hwsp_addr);
506 }
507 
508 static uint32_t
write_engine_execlist_setup(struct aub_file * aub,uint32_t ctx_id,struct aub_hw_context * hw_ctx,enum intel_engine_class engine_class)509 write_engine_execlist_setup(struct aub_file *aub,
510                             uint32_t ctx_id,
511                             struct aub_hw_context *hw_ctx,
512                             enum intel_engine_class engine_class)
513 {
514    const struct engine *cs = engine_from_engine_class(engine_class);
515    uint32_t context_size;
516 
517    get_context_init(&aub->devinfo, NULL, engine_class, NULL, &context_size);
518 
519    /* GGTT PT */
520    uint32_t total_size = RING_SIZE + PPHWSP_SIZE + context_size;
521    char name[80];
522    uint64_t ggtt_addr = alloc_ggtt_address(aub, total_size);
523 
524    snprintf(name, sizeof(name), "%s (ctx id: %d) GGTT PT", cs->name, ctx_id);
525 
526    /* RING */
527    hw_ctx->ring_addr = ggtt_addr;
528    snprintf(name, sizeof(name), "%s RING", cs->name);
529    mem_trace_memory_write_header_out(aub, ggtt_addr, RING_SIZE,
530                                      AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
531                                      name);
532    for (uint32_t i = 0; i < RING_SIZE; i += sizeof(uint32_t))
533       dword_out(aub, 0);
534    ggtt_addr += RING_SIZE;
535 
536    /* PPHWSP */
537    hw_ctx->pphwsp_addr = ggtt_addr;
538    snprintf(name, sizeof(name), "%s PPHWSP", cs->name);
539    mem_trace_memory_write_header_out(aub, ggtt_addr,
540                                      PPHWSP_SIZE + context_size,
541                                      AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
542                                      name);
543    for (uint32_t i = 0; i < PPHWSP_SIZE; i += sizeof(uint32_t))
544       dword_out(aub, 0);
545 
546    /* CONTEXT */
547    struct intel_context_parameters params = {
548       .ring_addr = hw_ctx->ring_addr,
549       .ring_size = RING_SIZE,
550       .pml4_addr = aub->pml4.phys_addr,
551    };
552    uint32_t *context_data = calloc(1, context_size);
553    get_context_init(&aub->devinfo, &params, engine_class, context_data, &context_size);
554    data_out(aub, context_data, context_size);
555    free(context_data);
556 
557    hw_ctx->initialized = true;
558 
559    return total_size;
560 }
561 
562 static void
write_execlists_default_setup(struct aub_file * aub)563 write_execlists_default_setup(struct aub_file *aub)
564 {
565    register_write_out(aub, RCSUNIT(GFX_MODE), 0x80008000 /* execlist enable */);
566    register_write_out(aub, VCSUNIT0(GFX_MODE), 0x80008000 /* execlist enable */);
567    register_write_out(aub, BCSUNIT0(GFX_MODE), 0x80008000 /* execlist enable */);
568 }
569 
write_legacy_default_setup(struct aub_file * aub)570 static void write_legacy_default_setup(struct aub_file *aub)
571 {
572    uint32_t entry = 0x200003;
573 
574    /* Set up the GTT. The max we can handle is 64M */
575    dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
576                   ((aub->addr_bits > 32 ? 6 : 5) - 2));
577    dword_out(aub, AUB_TRACE_MEMTYPE_GTT_ENTRY |
578                   AUB_TRACE_TYPE_NOTYPE | AUB_TRACE_OP_DATA_WRITE);
579    dword_out(aub, 0); /* subtype */
580    dword_out(aub, 0); /* offset */
581    dword_out(aub, aub_gtt_size(aub)); /* size */
582    if (aub->addr_bits > 32)
583       dword_out(aub, 0);
584    for (uint32_t i = 0; i < NUM_PT_ENTRIES; i++) {
585       dword_out(aub, entry + 0x1000 * i);
586       if (aub->addr_bits > 32)
587          dword_out(aub, 0);
588    }
589 }
590 
591 /**
592  * Sets up a default GGTT/PPGTT address space and execlists context (when
593  * supported).
594  */
595 void
aub_write_default_setup(struct aub_file * aub)596 aub_write_default_setup(struct aub_file *aub)
597 {
598    if (aub_use_execlists(aub))
599       write_execlists_default_setup(aub);
600    else
601       write_legacy_default_setup(aub);
602 
603    aub->has_default_setup = true;
604 }
605 
606 static struct aub_context *
aub_context_new(struct aub_file * aub,uint32_t new_id)607 aub_context_new(struct aub_file *aub, uint32_t new_id)
608 {
609    assert(aub->num_contexts < MAX_CONTEXT_COUNT);
610 
611    struct aub_context *ctx = &aub->contexts[aub->num_contexts++];
612    memset(ctx, 0, sizeof(*ctx));
613    ctx->id = new_id;
614 
615    return ctx;
616 }
617 
618 uint32_t
aub_write_context_create(struct aub_file * aub,uint32_t * ctx_id)619 aub_write_context_create(struct aub_file *aub, uint32_t *ctx_id)
620 {
621    uint32_t new_id = ctx_id ? *ctx_id : aub->next_context_handle;
622 
623    aub_context_new(aub, new_id);
624 
625    if (!ctx_id)
626       aub->next_context_handle++;
627 
628    return new_id;
629 }
630 
631 static struct aub_context *
aub_context_find(struct aub_file * aub,uint32_t id)632 aub_context_find(struct aub_file *aub, uint32_t id)
633 {
634    for (int i = 0; i < aub->num_contexts; i++) {
635       if (aub->contexts[i].id == id)
636          return &aub->contexts[i];
637    }
638 
639    return NULL;
640 }
641 
642 static struct aub_hw_context *
aub_write_ensure_context(struct aub_file * aub,uint32_t ctx_id,enum intel_engine_class engine_class)643 aub_write_ensure_context(struct aub_file *aub, uint32_t ctx_id,
644                          enum intel_engine_class engine_class)
645 {
646    struct aub_context *ctx = aub_context_find(aub, ctx_id);
647    assert(ctx != NULL);
648 
649    struct aub_hw_context *hw_ctx = &ctx->hw_contexts[engine_class];
650    if (!hw_ctx->initialized)
651       write_engine_execlist_setup(aub, ctx->id, hw_ctx, engine_class);
652 
653    return hw_ctx;
654 }
655 
656 static uint64_t
get_context_descriptor(struct aub_file * aub,const struct engine * cs,struct aub_hw_context * hw_ctx)657 get_context_descriptor(struct aub_file *aub,
658                        const struct engine *cs,
659                        struct aub_hw_context *hw_ctx)
660 {
661    return cs->hw_class | hw_ctx->pphwsp_addr | CONTEXT_FLAGS;
662 }
663 
664 /**
665  * Break up large objects into multiple writes.  Otherwise a 128kb VBO
666  * would overflow the 16 bits of size field in the packet header and
667  * everything goes badly after that.
668  */
669 void
aub_write_trace_block(struct aub_file * aub,uint32_t type,void * virtual,uint32_t size,uint64_t gtt_offset)670 aub_write_trace_block(struct aub_file *aub,
671                       uint32_t type, void *virtual,
672                       uint32_t size, uint64_t gtt_offset)
673 {
674    uint32_t block_size;
675    uint32_t subtype = 0;
676    static const char null_block[8 * 4096];
677 
678    for (uint32_t offset = 0; offset < size; offset += block_size) {
679       block_size = min(8 * 4096, size - offset);
680 
681       if (aub_use_execlists(aub)) {
682          block_size = min(4096, block_size);
683          mem_trace_memory_write_header_out(aub,
684                                            ppgtt_lookup(aub, gtt_offset + offset),
685                                            block_size,
686                                            AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_PHYSICAL,
687                                            "Trace Block");
688       } else {
689          dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
690                         ((aub->addr_bits > 32 ? 6 : 5) - 2));
691          dword_out(aub, AUB_TRACE_MEMTYPE_GTT |
692                         type | AUB_TRACE_OP_DATA_WRITE);
693          dword_out(aub, subtype);
694          dword_out(aub, gtt_offset + offset);
695          dword_out(aub, align(block_size, 4));
696          if (aub->addr_bits > 32)
697             dword_out(aub, (gtt_offset + offset) >> 32);
698       }
699 
700       if (virtual)
701          data_out(aub, ((char *) virtual) + offset, block_size);
702       else
703          data_out(aub, null_block, block_size);
704 
705       /* Pad to a multiple of 4 bytes. */
706       data_out(aub, null_block, -block_size & 3);
707    }
708 }
709 
710 static void
aub_dump_ring_buffer_execlist(struct aub_file * aub,struct aub_hw_context * hw_ctx,const struct engine * cs,uint64_t batch_offset)711 aub_dump_ring_buffer_execlist(struct aub_file *aub,
712                               struct aub_hw_context *hw_ctx,
713                               const struct engine *cs,
714                               uint64_t batch_offset)
715 {
716    mem_trace_memory_write_header_out(aub, hw_ctx->ring_addr, 16,
717                                      AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
718                                      "RING MI_BATCH_BUFFER_START user");
719    dword_out(aub, AUB_MI_BATCH_BUFFER_START | MI_BATCH_NON_SECURE_I965 | (3 - 2));
720    dword_out(aub, batch_offset & 0xFFFFFFFF);
721    dword_out(aub, batch_offset >> 32);
722    dword_out(aub, 0 /* MI_NOOP */);
723 
724    mem_trace_memory_write_header_out(aub, hw_ctx->ring_addr + 8192 + 20, 4,
725                                      AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
726                                      "RING BUFFER HEAD");
727    dword_out(aub, 0); /* RING_BUFFER_HEAD */
728    mem_trace_memory_write_header_out(aub, hw_ctx->ring_addr + 8192 + 28, 4,
729                                      AUB_MEM_TRACE_MEMORY_ADDRESS_SPACE_GGTT,
730                                      "RING BUFFER TAIL");
731    dword_out(aub, 16); /* RING_BUFFER_TAIL */
732 }
733 
734 static void
aub_dump_execlist(struct aub_file * aub,const struct engine * cs,uint64_t descriptor)735 aub_dump_execlist(struct aub_file *aub, const struct engine *cs, uint64_t descriptor)
736 {
737    if (aub->devinfo.ver >= 11) {
738       register_write_out(aub, cs->elsq_reg, descriptor & 0xFFFFFFFF);
739       register_write_out(aub, cs->elsq_reg + sizeof(uint32_t), descriptor >> 32);
740       register_write_out(aub, cs->control_reg, 1);
741    } else {
742       register_write_out(aub, cs->elsp_reg, 0);
743       register_write_out(aub, cs->elsp_reg, 0);
744       register_write_out(aub, cs->elsp_reg, descriptor >> 32);
745       register_write_out(aub, cs->elsp_reg, descriptor & 0xFFFFFFFF);
746    }
747 
748    dword_out(aub, CMD_MEM_TRACE_REGISTER_POLL | (5 + 1 - 1));
749    dword_out(aub, cs->status_reg);
750    dword_out(aub, AUB_MEM_TRACE_REGISTER_SIZE_DWORD |
751                   AUB_MEM_TRACE_REGISTER_SPACE_MMIO);
752    if (aub->devinfo.ver >= 11) {
753       dword_out(aub, 0x00000001);   /* mask lo */
754       dword_out(aub, 0x00000000);   /* mask hi */
755       dword_out(aub, 0x00000001);
756    } else {
757       dword_out(aub, 0x00000010);   /* mask lo */
758       dword_out(aub, 0x00000000);   /* mask hi */
759       dword_out(aub, 0x00000000);
760    }
761 }
762 
763 static void
aub_dump_ring_buffer_legacy(struct aub_file * aub,uint64_t batch_offset,uint64_t offset,enum intel_engine_class engine_class)764 aub_dump_ring_buffer_legacy(struct aub_file *aub,
765                             uint64_t batch_offset,
766                             uint64_t offset,
767                             enum intel_engine_class engine_class)
768 {
769    uint32_t ringbuffer[4096];
770    unsigned aub_mi_bbs_len;
771    int ring_count = 0;
772    static const int engine_class_to_ring[] = {
773       [INTEL_ENGINE_CLASS_RENDER] = AUB_TRACE_TYPE_RING_PRB0,
774       [INTEL_ENGINE_CLASS_VIDEO]  = AUB_TRACE_TYPE_RING_PRB1,
775       [INTEL_ENGINE_CLASS_COPY]   = AUB_TRACE_TYPE_RING_PRB2,
776    };
777    int ring = engine_class_to_ring[engine_class];
778 
779    /* Make a ring buffer to execute our batchbuffer. */
780    memset(ringbuffer, 0, sizeof(ringbuffer));
781 
782    aub_mi_bbs_len = aub->addr_bits > 32 ? 3 : 2;
783    ringbuffer[ring_count] = AUB_MI_BATCH_BUFFER_START | (aub_mi_bbs_len - 2);
784    aub_write_reloc(&aub->devinfo, &ringbuffer[ring_count + 1], batch_offset);
785    ring_count += aub_mi_bbs_len;
786 
787    /* Write out the ring.  This appears to trigger execution of
788     * the ring in the simulator.
789     */
790    dword_out(aub, CMD_AUB_TRACE_HEADER_BLOCK |
791                   ((aub->addr_bits > 32 ? 6 : 5) - 2));
792    dword_out(aub, AUB_TRACE_MEMTYPE_GTT | ring | AUB_TRACE_OP_COMMAND_WRITE);
793    dword_out(aub, 0); /* general/surface subtype */
794    dword_out(aub, offset);
795    dword_out(aub, ring_count * 4);
796    if (aub->addr_bits > 32)
797       dword_out(aub, offset >> 32);
798 
799    data_out(aub, ringbuffer, ring_count * 4);
800 }
801 
802 static void
aub_write_ensure_hwsp(struct aub_file * aub,enum intel_engine_class engine_class)803 aub_write_ensure_hwsp(struct aub_file *aub,
804                       enum intel_engine_class engine_class)
805 {
806    uint64_t *hwsp_addr = &aub->engine_setup[engine_class].hwsp_addr;
807 
808    if (*hwsp_addr != 0)
809       return;
810 
811    *hwsp_addr = alloc_ggtt_address(aub, 4096);
812    write_hwsp(aub, engine_class);
813 }
814 
815 void
aub_write_exec(struct aub_file * aub,uint32_t ctx_id,uint64_t batch_addr,uint64_t offset,enum intel_engine_class engine_class)816 aub_write_exec(struct aub_file *aub, uint32_t ctx_id, uint64_t batch_addr,
817                uint64_t offset, enum intel_engine_class engine_class)
818 {
819    const struct engine *cs = engine_from_engine_class(engine_class);
820 
821    if (aub_use_execlists(aub)) {
822       struct aub_hw_context *hw_ctx =
823          aub_write_ensure_context(aub, ctx_id, engine_class);
824       uint64_t descriptor = get_context_descriptor(aub, cs, hw_ctx);
825       aub_write_ensure_hwsp(aub, engine_class);
826       aub_dump_ring_buffer_execlist(aub, hw_ctx, cs, batch_addr);
827       aub_dump_execlist(aub, cs, descriptor);
828    } else {
829       /* Dump ring buffer */
830       aub_dump_ring_buffer_legacy(aub, batch_addr, offset, engine_class);
831    }
832    fflush(aub->file);
833 }
834 
835 void
aub_write_context_execlists(struct aub_file * aub,uint64_t context_addr,enum intel_engine_class engine_class)836 aub_write_context_execlists(struct aub_file *aub, uint64_t context_addr,
837                             enum intel_engine_class engine_class)
838 {
839    const struct engine *cs = engine_from_engine_class(engine_class);
840    uint64_t descriptor = ((uint64_t)1 << 62 | context_addr  | CONTEXT_FLAGS);
841    aub_dump_execlist(aub, cs, descriptor);
842 }
843