xref: /aosp_15_r20/external/mesa3d/src/amd/vulkan/radv_sqtt.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2020 Valve Corporation
3  *
4  * SPDX-License-Identifier: MIT
5  */
6 
7 #include <inttypes.h>
8 
9 #include "radv_buffer.h"
10 #include "radv_cs.h"
11 #include "radv_debug.h"
12 #include "radv_entrypoints.h"
13 #include "radv_perfcounter.h"
14 #include "radv_spm.h"
15 #include "radv_sqtt.h"
16 #include "sid.h"
17 
18 #include "ac_pm4.h"
19 
20 #include "vk_command_pool.h"
21 #include "vk_common_entrypoints.h"
22 
23 #define SQTT_BUFFER_ALIGN_SHIFT 12
24 
25 bool
radv_is_instruction_timing_enabled(void)26 radv_is_instruction_timing_enabled(void)
27 {
28    return debug_get_bool_option("RADV_THREAD_TRACE_INSTRUCTION_TIMING", true);
29 }
30 
31 bool
radv_sqtt_queue_events_enabled(void)32 radv_sqtt_queue_events_enabled(void)
33 {
34    return debug_get_bool_option("RADV_THREAD_TRACE_QUEUE_EVENTS", true);
35 }
36 
37 static enum radv_queue_family
radv_ip_to_queue_family(enum amd_ip_type t)38 radv_ip_to_queue_family(enum amd_ip_type t)
39 {
40    switch (t) {
41    case AMD_IP_GFX:
42       return RADV_QUEUE_GENERAL;
43    case AMD_IP_COMPUTE:
44       return RADV_QUEUE_COMPUTE;
45    case AMD_IP_SDMA:
46       return RADV_QUEUE_TRANSFER;
47    default:
48       unreachable("Unknown IP type");
49    }
50 }
51 
52 static void
radv_emit_wait_for_idle(const struct radv_device * device,struct radeon_cmdbuf * cs,int family)53 radv_emit_wait_for_idle(const struct radv_device *device, struct radeon_cmdbuf *cs, int family)
54 {
55    const struct radv_physical_device *pdev = radv_device_physical(device);
56    const enum radv_queue_family qf = radv_ip_to_queue_family(family);
57    enum rgp_flush_bits sqtt_flush_bits = 0;
58    radv_cs_emit_cache_flush(
59       device->ws, cs, pdev->info.gfx_level, NULL, 0, qf,
60       (family == RADV_QUEUE_COMPUTE ? RADV_CMD_FLAG_CS_PARTIAL_FLUSH
61                                     : (RADV_CMD_FLAG_CS_PARTIAL_FLUSH | RADV_CMD_FLAG_PS_PARTIAL_FLUSH)) |
62          RADV_CMD_FLAG_INV_ICACHE | RADV_CMD_FLAG_INV_SCACHE | RADV_CMD_FLAG_INV_VCACHE | RADV_CMD_FLAG_INV_L2,
63       &sqtt_flush_bits, 0);
64 }
65 
66 static void
radv_emit_sqtt_start(const struct radv_device * device,struct radeon_cmdbuf * cs,enum radv_queue_family qf)67 radv_emit_sqtt_start(const struct radv_device *device, struct radeon_cmdbuf *cs, enum radv_queue_family qf)
68 {
69    const struct radv_physical_device *pdev = radv_device_physical(device);
70    const bool is_compute_queue = qf == RADV_QUEUE_COMPUTE;
71    struct ac_pm4_state *pm4;
72 
73    pm4 = ac_pm4_create_sized(&pdev->info, false, 512, is_compute_queue);
74    if (!pm4)
75       return;
76 
77    ac_sqtt_emit_start(&pdev->info, pm4, &device->sqtt, is_compute_queue);
78    ac_pm4_finalize(pm4);
79 
80    radeon_check_space(device->ws, cs, pm4->ndw);
81    radeon_emit_array(cs, pm4->pm4, pm4->ndw);
82 
83    ac_pm4_free_state(pm4);
84 }
85 
86 static void
radv_emit_sqtt_stop(const struct radv_device * device,struct radeon_cmdbuf * cs,enum radv_queue_family qf)87 radv_emit_sqtt_stop(const struct radv_device *device, struct radeon_cmdbuf *cs, enum radv_queue_family qf)
88 {
89    const struct radv_physical_device *pdev = radv_device_physical(device);
90    const bool is_compute_queue = qf == RADV_QUEUE_COMPUTE;
91    struct ac_pm4_state *pm4;
92 
93    pm4 = ac_pm4_create_sized(&pdev->info, false, 512, is_compute_queue);
94    if (!pm4)
95       return;
96 
97    ac_sqtt_emit_stop(&pdev->info, pm4, is_compute_queue);
98    ac_pm4_finalize(pm4);
99 
100    radeon_check_space(device->ws, cs, pm4->ndw);
101    radeon_emit_array(cs, pm4->pm4, pm4->ndw);
102 
103    ac_pm4_clear_state(pm4, &pdev->info, false, is_compute_queue);
104 
105    if (pdev->info.has_sqtt_rb_harvest_bug) {
106       /* Some chips with disabled RBs should wait for idle because FINISH_DONE doesn't work. */
107       radv_emit_wait_for_idle(device, cs, qf);
108    }
109 
110    ac_sqtt_emit_wait(&pdev->info, pm4, &device->sqtt, is_compute_queue);
111    ac_pm4_finalize(pm4);
112 
113    radeon_check_space(device->ws, cs, pm4->ndw);
114    radeon_emit_array(cs, pm4->pm4, pm4->ndw);
115 
116    ac_pm4_free_state(pm4);
117 }
118 
119 void
radv_emit_sqtt_userdata(const struct radv_cmd_buffer * cmd_buffer,const void * data,uint32_t num_dwords)120 radv_emit_sqtt_userdata(const struct radv_cmd_buffer *cmd_buffer, const void *data, uint32_t num_dwords)
121 {
122    struct radv_device *device = radv_cmd_buffer_device(cmd_buffer);
123    const struct radv_physical_device *pdev = radv_device_physical(device);
124    const enum amd_gfx_level gfx_level = pdev->info.gfx_level;
125    const enum radv_queue_family qf = cmd_buffer->qf;
126    struct radeon_cmdbuf *cs = cmd_buffer->cs;
127    const uint32_t *dwords = (uint32_t *)data;
128 
129    /* SQTT user data packets aren't supported on SDMA queues. */
130    if (cmd_buffer->qf == RADV_QUEUE_TRANSFER)
131       return;
132 
133    while (num_dwords > 0) {
134       uint32_t count = MIN2(num_dwords, 2);
135 
136       radeon_check_space(device->ws, cs, 2 + count);
137 
138       /* Without the perfctr bit the CP might not always pass the
139        * write on correctly. */
140       if (pdev->info.gfx_level >= GFX10)
141          radeon_set_uconfig_perfctr_reg_seq(gfx_level, qf, cs, R_030D08_SQ_THREAD_TRACE_USERDATA_2, count);
142       else
143          radeon_set_uconfig_reg_seq(cs, R_030D08_SQ_THREAD_TRACE_USERDATA_2, count);
144       radeon_emit_array(cs, dwords, count);
145 
146       dwords += count;
147       num_dwords -= count;
148    }
149 }
150 
151 void
radv_emit_spi_config_cntl(const struct radv_device * device,struct radeon_cmdbuf * cs,bool enable)152 radv_emit_spi_config_cntl(const struct radv_device *device, struct radeon_cmdbuf *cs, bool enable)
153 {
154    const struct radv_physical_device *pdev = radv_device_physical(device);
155 
156    if (pdev->info.gfx_level >= GFX9) {
157       uint32_t spi_config_cntl = S_031100_GPR_WRITE_PRIORITY(0x2c688) | S_031100_EXP_PRIORITY_ORDER(3) |
158                                  S_031100_ENABLE_SQG_TOP_EVENTS(enable) | S_031100_ENABLE_SQG_BOP_EVENTS(enable);
159 
160       if (pdev->info.gfx_level >= GFX10)
161          spi_config_cntl |= S_031100_PS_PKR_PRIORITY_CNTL(3);
162 
163       radeon_set_uconfig_reg(cs, R_031100_SPI_CONFIG_CNTL, spi_config_cntl);
164    } else {
165       /* SPI_CONFIG_CNTL is a protected register on GFX6-GFX8. */
166       radeon_set_privileged_config_reg(cs, R_009100_SPI_CONFIG_CNTL,
167                                        S_009100_ENABLE_SQG_TOP_EVENTS(enable) | S_009100_ENABLE_SQG_BOP_EVENTS(enable));
168    }
169 }
170 
171 void
radv_emit_inhibit_clockgating(const struct radv_device * device,struct radeon_cmdbuf * cs,bool inhibit)172 radv_emit_inhibit_clockgating(const struct radv_device *device, struct radeon_cmdbuf *cs, bool inhibit)
173 {
174    const struct radv_physical_device *pdev = radv_device_physical(device);
175 
176    if (pdev->info.gfx_level >= GFX11)
177       return; /* not needed */
178 
179    if (pdev->info.gfx_level >= GFX10) {
180       radeon_set_uconfig_reg(cs, R_037390_RLC_PERFMON_CLK_CNTL, S_037390_PERFMON_CLOCK_STATE(inhibit));
181    } else if (pdev->info.gfx_level >= GFX8) {
182       radeon_set_uconfig_reg(cs, R_0372FC_RLC_PERFMON_CLK_CNTL, S_0372FC_PERFMON_CLOCK_STATE(inhibit));
183    }
184 }
185 
186 VkResult
radv_sqtt_acquire_gpu_timestamp(struct radv_device * device,struct radeon_winsys_bo ** gpu_timestamp_bo,uint32_t * gpu_timestamp_offset,void ** gpu_timestamp_ptr)187 radv_sqtt_acquire_gpu_timestamp(struct radv_device *device, struct radeon_winsys_bo **gpu_timestamp_bo,
188                                 uint32_t *gpu_timestamp_offset, void **gpu_timestamp_ptr)
189 {
190    simple_mtx_lock(&device->sqtt_timestamp_mtx);
191 
192    if (device->sqtt_timestamp.offset + 8 > device->sqtt_timestamp.size) {
193       struct radeon_winsys_bo *bo;
194       uint64_t new_size;
195       VkResult result;
196       uint8_t *map;
197 
198       new_size = MAX2(4096, 2 * device->sqtt_timestamp.size);
199 
200       result = radv_bo_create(device, NULL, new_size, 8, RADEON_DOMAIN_GTT,
201                               RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING, RADV_BO_PRIORITY_SCRATCH, 0,
202                               true, &bo);
203       if (result != VK_SUCCESS) {
204          simple_mtx_unlock(&device->sqtt_timestamp_mtx);
205          return result;
206       }
207 
208       map = radv_buffer_map(device->ws, bo);
209       if (!map) {
210          radv_bo_destroy(device, NULL, bo);
211          simple_mtx_unlock(&device->sqtt_timestamp_mtx);
212          return VK_ERROR_OUT_OF_DEVICE_MEMORY;
213       }
214 
215       if (device->sqtt_timestamp.bo) {
216          struct radv_sqtt_timestamp *new_timestamp;
217 
218          new_timestamp = malloc(sizeof(*new_timestamp));
219          if (!new_timestamp) {
220             radv_bo_destroy(device, NULL, bo);
221             simple_mtx_unlock(&device->sqtt_timestamp_mtx);
222             return VK_ERROR_OUT_OF_HOST_MEMORY;
223          }
224 
225          memcpy(new_timestamp, &device->sqtt_timestamp, sizeof(*new_timestamp));
226          list_add(&new_timestamp->list, &device->sqtt_timestamp.list);
227       }
228 
229       device->sqtt_timestamp.bo = bo;
230       device->sqtt_timestamp.size = new_size;
231       device->sqtt_timestamp.offset = 0;
232       device->sqtt_timestamp.map = map;
233    }
234 
235    *gpu_timestamp_bo = device->sqtt_timestamp.bo;
236    *gpu_timestamp_offset = device->sqtt_timestamp.offset;
237    *gpu_timestamp_ptr = device->sqtt_timestamp.map + device->sqtt_timestamp.offset;
238 
239    device->sqtt_timestamp.offset += 8;
240 
241    simple_mtx_unlock(&device->sqtt_timestamp_mtx);
242 
243    return VK_SUCCESS;
244 }
245 
246 static void
radv_sqtt_reset_timestamp(struct radv_device * device)247 radv_sqtt_reset_timestamp(struct radv_device *device)
248 {
249    simple_mtx_lock(&device->sqtt_timestamp_mtx);
250 
251    list_for_each_entry_safe (struct radv_sqtt_timestamp, ts, &device->sqtt_timestamp.list, list) {
252       radv_bo_destroy(device, NULL, ts->bo);
253       list_del(&ts->list);
254       free(ts);
255    }
256 
257    device->sqtt_timestamp.offset = 0;
258 
259    simple_mtx_unlock(&device->sqtt_timestamp_mtx);
260 }
261 
262 static bool
radv_sqtt_init_queue_event(struct radv_device * device)263 radv_sqtt_init_queue_event(struct radv_device *device)
264 {
265    const struct radv_physical_device *pdev = radv_device_physical(device);
266    const struct radv_instance *instance = radv_physical_device_instance(pdev);
267    VkCommandPool cmd_pool;
268    VkResult result;
269 
270    const VkCommandPoolCreateInfo create_gfx_info = {
271       .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
272       .queueFamilyIndex = RADV_QUEUE_GENERAL, /* Graphics queue is always the first queue. */
273    };
274 
275    result = vk_common_CreateCommandPool(radv_device_to_handle(device), &create_gfx_info, NULL, &cmd_pool);
276    if (result != VK_SUCCESS)
277       return false;
278 
279    device->sqtt_command_pool[0] = vk_command_pool_from_handle(cmd_pool);
280 
281    if (!(instance->debug_flags & RADV_DEBUG_NO_COMPUTE_QUEUE)) {
282       const VkCommandPoolCreateInfo create_comp_info = {
283          .sType = VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
284          .queueFamilyIndex = RADV_QUEUE_COMPUTE,
285       };
286 
287       result = vk_common_CreateCommandPool(radv_device_to_handle(device), &create_comp_info, NULL, &cmd_pool);
288       if (result != VK_SUCCESS)
289          return false;
290 
291       device->sqtt_command_pool[1] = vk_command_pool_from_handle(cmd_pool);
292    }
293 
294    simple_mtx_init(&device->sqtt_command_pool_mtx, mtx_plain);
295 
296    simple_mtx_init(&device->sqtt_timestamp_mtx, mtx_plain);
297    list_inithead(&device->sqtt_timestamp.list);
298 
299    return true;
300 }
301 
302 static void
radv_sqtt_finish_queue_event(struct radv_device * device)303 radv_sqtt_finish_queue_event(struct radv_device *device)
304 {
305    if (device->sqtt_timestamp.bo)
306       radv_bo_destroy(device, NULL, device->sqtt_timestamp.bo);
307 
308    simple_mtx_destroy(&device->sqtt_timestamp_mtx);
309 
310    for (unsigned i = 0; i < ARRAY_SIZE(device->sqtt_command_pool); i++)
311       vk_common_DestroyCommandPool(radv_device_to_handle(device),
312                                    vk_command_pool_to_handle(device->sqtt_command_pool[i]), NULL);
313 
314    simple_mtx_destroy(&device->sqtt_command_pool_mtx);
315 }
316 
317 static bool
radv_sqtt_init_bo(struct radv_device * device)318 radv_sqtt_init_bo(struct radv_device *device)
319 {
320    const struct radv_physical_device *pdev = radv_device_physical(device);
321    unsigned max_se = pdev->info.max_se;
322    struct radeon_winsys *ws = device->ws;
323    VkResult result;
324    uint64_t size;
325 
326    /* The buffer size and address need to be aligned in HW regs. Align the
327     * size as early as possible so that we do all the allocation & addressing
328     * correctly. */
329    device->sqtt.buffer_size = align64(device->sqtt.buffer_size, 1u << SQTT_BUFFER_ALIGN_SHIFT);
330 
331    /* Compute total size of the thread trace BO for all SEs. */
332    size = align64(sizeof(struct ac_sqtt_data_info) * max_se, 1 << SQTT_BUFFER_ALIGN_SHIFT);
333    size += device->sqtt.buffer_size * (uint64_t)max_se;
334 
335    struct radeon_winsys_bo *bo = NULL;
336    result = radv_bo_create(device, NULL, size, 4096, RADEON_DOMAIN_VRAM,
337                            RADEON_FLAG_CPU_ACCESS | RADEON_FLAG_NO_INTERPROCESS_SHARING | RADEON_FLAG_ZERO_VRAM,
338                            RADV_BO_PRIORITY_SCRATCH, 0, true, &bo);
339    device->sqtt.bo = bo;
340    if (result != VK_SUCCESS)
341       return false;
342 
343    result = ws->buffer_make_resident(ws, device->sqtt.bo, true);
344    if (result != VK_SUCCESS)
345       return false;
346 
347    device->sqtt.ptr = radv_buffer_map(ws, device->sqtt.bo);
348    if (!device->sqtt.ptr)
349       return false;
350 
351    device->sqtt.buffer_va = radv_buffer_get_va(device->sqtt.bo);
352 
353    return true;
354 }
355 
356 static void
radv_sqtt_finish_bo(struct radv_device * device)357 radv_sqtt_finish_bo(struct radv_device *device)
358 {
359    struct radeon_winsys *ws = device->ws;
360 
361    if (unlikely(device->sqtt.bo)) {
362       ws->buffer_make_resident(ws, device->sqtt.bo, false);
363       radv_bo_destroy(device, NULL, device->sqtt.bo);
364    }
365 }
366 
367 static VkResult
radv_register_queue(struct radv_device * device,struct radv_queue * queue)368 radv_register_queue(struct radv_device *device, struct radv_queue *queue)
369 {
370    struct ac_sqtt *sqtt = &device->sqtt;
371    struct rgp_queue_info *queue_info = &sqtt->rgp_queue_info;
372    struct rgp_queue_info_record *record;
373 
374    record = malloc(sizeof(struct rgp_queue_info_record));
375    if (!record)
376       return VK_ERROR_OUT_OF_HOST_MEMORY;
377 
378    record->queue_id = (uintptr_t)queue;
379    record->queue_context = (uintptr_t)queue->hw_ctx;
380    if (queue->vk.queue_family_index == RADV_QUEUE_GENERAL) {
381       record->hardware_info.queue_type = SQTT_QUEUE_TYPE_UNIVERSAL;
382       record->hardware_info.engine_type = SQTT_ENGINE_TYPE_UNIVERSAL;
383    } else {
384       record->hardware_info.queue_type = SQTT_QUEUE_TYPE_COMPUTE;
385       record->hardware_info.engine_type = SQTT_ENGINE_TYPE_COMPUTE;
386    }
387 
388    simple_mtx_lock(&queue_info->lock);
389    list_addtail(&record->list, &queue_info->record);
390    queue_info->record_count++;
391    simple_mtx_unlock(&queue_info->lock);
392 
393    return VK_SUCCESS;
394 }
395 
396 static void
radv_unregister_queue(struct radv_device * device,struct radv_queue * queue)397 radv_unregister_queue(struct radv_device *device, struct radv_queue *queue)
398 {
399    struct ac_sqtt *sqtt = &device->sqtt;
400    struct rgp_queue_info *queue_info = &sqtt->rgp_queue_info;
401 
402    /* Destroy queue info record. */
403    simple_mtx_lock(&queue_info->lock);
404    if (queue_info->record_count > 0) {
405       list_for_each_entry_safe (struct rgp_queue_info_record, record, &queue_info->record, list) {
406          if (record->queue_id == (uintptr_t)queue) {
407             queue_info->record_count--;
408             list_del(&record->list);
409             free(record);
410             break;
411          }
412       }
413    }
414    simple_mtx_unlock(&queue_info->lock);
415 }
416 
417 static void
radv_register_queues(struct radv_device * device,struct ac_sqtt * sqtt)418 radv_register_queues(struct radv_device *device, struct ac_sqtt *sqtt)
419 {
420    if (device->queue_count[RADV_QUEUE_GENERAL] == 1)
421       radv_register_queue(device, &device->queues[RADV_QUEUE_GENERAL][0]);
422 
423    for (uint32_t i = 0; i < device->queue_count[RADV_QUEUE_COMPUTE]; i++)
424       radv_register_queue(device, &device->queues[RADV_QUEUE_COMPUTE][i]);
425 }
426 
427 static void
radv_unregister_queues(struct radv_device * device,struct ac_sqtt * sqtt)428 radv_unregister_queues(struct radv_device *device, struct ac_sqtt *sqtt)
429 {
430    if (device->queue_count[RADV_QUEUE_GENERAL] == 1)
431       radv_unregister_queue(device, &device->queues[RADV_QUEUE_GENERAL][0]);
432 
433    for (uint32_t i = 0; i < device->queue_count[RADV_QUEUE_COMPUTE]; i++)
434       radv_unregister_queue(device, &device->queues[RADV_QUEUE_COMPUTE][i]);
435 }
436 
437 bool
radv_sqtt_init(struct radv_device * device)438 radv_sqtt_init(struct radv_device *device)
439 {
440    struct ac_sqtt *sqtt = &device->sqtt;
441 
442    /* Default buffer size set to 32MB per SE. */
443    device->sqtt.buffer_size = (uint32_t)debug_get_num_option("RADV_THREAD_TRACE_BUFFER_SIZE", 32 * 1024 * 1024);
444    device->sqtt.instruction_timing_enabled = radv_is_instruction_timing_enabled();
445 
446    if (!radv_sqtt_init_bo(device))
447       return false;
448 
449    if (!radv_sqtt_init_queue_event(device))
450       return false;
451 
452    if (!radv_device_acquire_performance_counters(device))
453       return false;
454 
455    ac_sqtt_init(sqtt);
456 
457    radv_register_queues(device, sqtt);
458 
459    return true;
460 }
461 
462 void
radv_sqtt_finish(struct radv_device * device)463 radv_sqtt_finish(struct radv_device *device)
464 {
465    struct ac_sqtt *sqtt = &device->sqtt;
466    struct radeon_winsys *ws = device->ws;
467 
468    radv_sqtt_finish_bo(device);
469    radv_sqtt_finish_queue_event(device);
470 
471    for (unsigned i = 0; i < 2; i++) {
472       if (device->sqtt.start_cs[i])
473          ws->cs_destroy(device->sqtt.start_cs[i]);
474       if (device->sqtt.stop_cs[i])
475          ws->cs_destroy(device->sqtt.stop_cs[i]);
476    }
477 
478    radv_unregister_queues(device, sqtt);
479 
480    ac_sqtt_finish(sqtt);
481 }
482 
483 static bool
radv_sqtt_resize_bo(struct radv_device * device)484 radv_sqtt_resize_bo(struct radv_device *device)
485 {
486    /* Destroy the previous thread trace BO. */
487    radv_sqtt_finish_bo(device);
488 
489    /* Double the size of the thread trace buffer per SE. */
490    device->sqtt.buffer_size *= 2;
491 
492    fprintf(stderr,
493            "Failed to get the thread trace because the buffer "
494            "was too small, resizing to %d KB\n",
495            device->sqtt.buffer_size / 1024);
496 
497    /* Re-create the thread trace BO. */
498    return radv_sqtt_init_bo(device);
499 }
500 
501 bool
radv_begin_sqtt(struct radv_queue * queue)502 radv_begin_sqtt(struct radv_queue *queue)
503 {
504    struct radv_device *device = radv_queue_device(queue);
505    const struct radv_physical_device *pdev = radv_device_physical(device);
506    enum radv_queue_family family = queue->state.qf;
507    struct radeon_winsys *ws = device->ws;
508    struct radeon_cmdbuf *cs;
509    VkResult result;
510 
511    /* Destroy the previous start CS and create a new one. */
512    if (device->sqtt.start_cs[family]) {
513       ws->cs_destroy(device->sqtt.start_cs[family]);
514       device->sqtt.start_cs[family] = NULL;
515    }
516 
517    cs = ws->cs_create(ws, radv_queue_ring(queue), false);
518    if (!cs)
519       return false;
520 
521    radeon_check_space(ws, cs, 512);
522 
523    switch (family) {
524    case RADV_QUEUE_GENERAL:
525       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
526       radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
527       radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
528       break;
529    case RADV_QUEUE_COMPUTE:
530       radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
531       radeon_emit(cs, 0);
532       break;
533    default:
534       unreachable("Incorrect queue family");
535       break;
536    }
537 
538    /* Make sure to wait-for-idle before starting SQTT. */
539    radv_emit_wait_for_idle(device, cs, family);
540 
541    /* Disable clock gating before starting SQTT. */
542    radv_emit_inhibit_clockgating(device, cs, true);
543 
544    /* Enable SQG events that collects thread trace data. */
545    radv_emit_spi_config_cntl(device, cs, true);
546 
547    radv_perfcounter_emit_spm_reset(cs);
548 
549    if (device->spm.bo) {
550       /* Enable all shader stages by default. */
551       radv_perfcounter_emit_shaders(device, cs, ac_sqtt_get_shader_mask(&pdev->info));
552 
553       radv_emit_spm_setup(device, cs, family);
554    }
555 
556    /* Start SQTT. */
557    radv_emit_sqtt_start(device, cs, family);
558 
559    if (device->spm.bo) {
560       radeon_check_space(ws, cs, 8);
561       radv_perfcounter_emit_spm_start(device, cs, family);
562    }
563 
564    result = ws->cs_finalize(cs);
565    if (result != VK_SUCCESS) {
566       ws->cs_destroy(cs);
567       return false;
568    }
569 
570    device->sqtt.start_cs[family] = cs;
571 
572    return radv_queue_internal_submit(queue, cs);
573 }
574 
575 bool
radv_end_sqtt(struct radv_queue * queue)576 radv_end_sqtt(struct radv_queue *queue)
577 {
578    struct radv_device *device = radv_queue_device(queue);
579    enum radv_queue_family family = queue->state.qf;
580    struct radeon_winsys *ws = device->ws;
581    struct radeon_cmdbuf *cs;
582    VkResult result;
583 
584    /* Destroy the previous stop CS and create a new one. */
585    if (device->sqtt.stop_cs[family]) {
586       ws->cs_destroy(device->sqtt.stop_cs[family]);
587       device->sqtt.stop_cs[family] = NULL;
588    }
589 
590    cs = ws->cs_create(ws, radv_queue_ring(queue), false);
591    if (!cs)
592       return false;
593 
594    radeon_check_space(ws, cs, 512);
595 
596    switch (family) {
597    case RADV_QUEUE_GENERAL:
598       radeon_emit(cs, PKT3(PKT3_CONTEXT_CONTROL, 1, 0));
599       radeon_emit(cs, CC0_UPDATE_LOAD_ENABLES(1));
600       radeon_emit(cs, CC1_UPDATE_SHADOW_ENABLES(1));
601       break;
602    case RADV_QUEUE_COMPUTE:
603       radeon_emit(cs, PKT3(PKT3_NOP, 0, 0));
604       radeon_emit(cs, 0);
605       break;
606    default:
607       unreachable("Incorrect queue family");
608       break;
609    }
610 
611    /* Make sure to wait-for-idle before stopping SQTT. */
612    radv_emit_wait_for_idle(device, cs, family);
613 
614    if (device->spm.bo) {
615       radeon_check_space(ws, cs, 8);
616       radv_perfcounter_emit_spm_stop(device, cs, family);
617    }
618 
619    /* Stop SQTT. */
620    radv_emit_sqtt_stop(device, cs, family);
621 
622    radv_perfcounter_emit_spm_reset(cs);
623 
624    /* Restore previous state by disabling SQG events. */
625    radv_emit_spi_config_cntl(device, cs, false);
626 
627    /* Restore previous state by re-enabling clock gating. */
628    radv_emit_inhibit_clockgating(device, cs, false);
629 
630    result = ws->cs_finalize(cs);
631    if (result != VK_SUCCESS) {
632       ws->cs_destroy(cs);
633       return false;
634    }
635 
636    device->sqtt.stop_cs[family] = cs;
637 
638    return radv_queue_internal_submit(queue, cs);
639 }
640 
641 bool
radv_get_sqtt_trace(struct radv_queue * queue,struct ac_sqtt_trace * sqtt_trace)642 radv_get_sqtt_trace(struct radv_queue *queue, struct ac_sqtt_trace *sqtt_trace)
643 {
644    struct radv_device *device = radv_queue_device(queue);
645    const struct radv_physical_device *pdev = radv_device_physical(device);
646    const struct radeon_info *gpu_info = &pdev->info;
647 
648    if (!ac_sqtt_get_trace(&device->sqtt, gpu_info, sqtt_trace)) {
649       if (!radv_sqtt_resize_bo(device))
650          fprintf(stderr, "radv: Failed to resize the SQTT buffer.\n");
651       return false;
652    }
653 
654    return true;
655 }
656 
657 void
radv_reset_sqtt_trace(struct radv_device * device)658 radv_reset_sqtt_trace(struct radv_device *device)
659 {
660    struct ac_sqtt *sqtt = &device->sqtt;
661    struct rgp_clock_calibration *clock_calibration = &sqtt->rgp_clock_calibration;
662    struct rgp_queue_event *queue_event = &sqtt->rgp_queue_event;
663 
664    /* Clear clock calibration records. */
665    simple_mtx_lock(&clock_calibration->lock);
666    list_for_each_entry_safe (struct rgp_clock_calibration_record, record, &clock_calibration->record, list) {
667       clock_calibration->record_count--;
668       list_del(&record->list);
669       free(record);
670    }
671    simple_mtx_unlock(&clock_calibration->lock);
672 
673    /* Clear queue event records. */
674    simple_mtx_lock(&queue_event->lock);
675    list_for_each_entry_safe (struct rgp_queue_event_record, record, &queue_event->record, list) {
676       list_del(&record->list);
677       free(record);
678    }
679    queue_event->record_count = 0;
680    simple_mtx_unlock(&queue_event->lock);
681 
682    /* Clear timestamps. */
683    radv_sqtt_reset_timestamp(device);
684 
685    /* Clear timed cmdbufs. */
686    simple_mtx_lock(&device->sqtt_command_pool_mtx);
687    for (unsigned i = 0; i < ARRAY_SIZE(device->sqtt_command_pool); i++) {
688       /* If RADV_DEBUG_NO_COMPUTE_QUEUE is used, there's no compute sqtt command pool */
689       if (device->sqtt_command_pool[i])
690          vk_common_TrimCommandPool(radv_device_to_handle(device), vk_command_pool_to_handle(device->sqtt_command_pool[i]),
691                                 0);
692    }
693    simple_mtx_unlock(&device->sqtt_command_pool_mtx);
694 }
695 
696 static VkResult
radv_get_calibrated_timestamps(struct radv_device * device,uint64_t * cpu_timestamp,uint64_t * gpu_timestamp)697 radv_get_calibrated_timestamps(struct radv_device *device, uint64_t *cpu_timestamp, uint64_t *gpu_timestamp)
698 {
699    uint64_t timestamps[2];
700    uint64_t max_deviation;
701    VkResult result;
702 
703    const VkCalibratedTimestampInfoKHR timestamp_infos[2] = {{
704                                                                .sType = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR,
705                                                                .timeDomain = VK_TIME_DOMAIN_CLOCK_MONOTONIC_KHR,
706                                                             },
707                                                             {
708                                                                .sType = VK_STRUCTURE_TYPE_CALIBRATED_TIMESTAMP_INFO_KHR,
709                                                                .timeDomain = VK_TIME_DOMAIN_DEVICE_KHR,
710                                                             }};
711 
712    result =
713       radv_GetCalibratedTimestampsKHR(radv_device_to_handle(device), 2, timestamp_infos, timestamps, &max_deviation);
714    if (result != VK_SUCCESS)
715       return result;
716 
717    *cpu_timestamp = timestamps[0];
718    *gpu_timestamp = timestamps[1];
719 
720    return result;
721 }
722 
723 bool
radv_sqtt_sample_clocks(struct radv_device * device)724 radv_sqtt_sample_clocks(struct radv_device *device)
725 {
726    uint64_t cpu_timestamp = 0, gpu_timestamp = 0;
727    VkResult result;
728 
729    result = radv_get_calibrated_timestamps(device, &cpu_timestamp, &gpu_timestamp);
730    if (result != VK_SUCCESS)
731       return false;
732 
733    return ac_sqtt_add_clock_calibration(&device->sqtt, cpu_timestamp, gpu_timestamp);
734 }
735 
736 VkResult
radv_sqtt_get_timed_cmdbuf(struct radv_queue * queue,struct radeon_winsys_bo * timestamp_bo,uint32_t timestamp_offset,VkPipelineStageFlags2 timestamp_stage,VkCommandBuffer * pcmdbuf)737 radv_sqtt_get_timed_cmdbuf(struct radv_queue *queue, struct radeon_winsys_bo *timestamp_bo, uint32_t timestamp_offset,
738                            VkPipelineStageFlags2 timestamp_stage, VkCommandBuffer *pcmdbuf)
739 {
740    struct radv_device *device = radv_queue_device(queue);
741    enum radv_queue_family queue_family = queue->state.qf;
742    VkCommandBuffer cmdbuf;
743    uint64_t timestamp_va;
744    VkResult result;
745 
746    assert(queue_family == RADV_QUEUE_GENERAL || queue_family == RADV_QUEUE_COMPUTE);
747 
748    simple_mtx_lock(&device->sqtt_command_pool_mtx);
749 
750    const VkCommandBufferAllocateInfo alloc_info = {
751       .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
752       .commandPool = vk_command_pool_to_handle(device->sqtt_command_pool[queue_family]),
753       .level = VK_COMMAND_BUFFER_LEVEL_PRIMARY,
754       .commandBufferCount = 1,
755    };
756 
757    result = vk_common_AllocateCommandBuffers(radv_device_to_handle(device), &alloc_info, &cmdbuf);
758    if (result != VK_SUCCESS)
759       goto fail;
760 
761    const VkCommandBufferBeginInfo begin_info = {
762       .sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
763       .flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT,
764    };
765 
766    result = radv_BeginCommandBuffer(cmdbuf, &begin_info);
767    if (result != VK_SUCCESS)
768       goto fail;
769 
770    radeon_check_space(device->ws, radv_cmd_buffer_from_handle(cmdbuf)->cs, 28);
771 
772    timestamp_va = radv_buffer_get_va(timestamp_bo) + timestamp_offset;
773 
774    radv_cs_add_buffer(device->ws, radv_cmd_buffer_from_handle(cmdbuf)->cs, timestamp_bo);
775 
776    radv_write_timestamp(radv_cmd_buffer_from_handle(cmdbuf), timestamp_va, timestamp_stage);
777 
778    result = radv_EndCommandBuffer(cmdbuf);
779    if (result != VK_SUCCESS)
780       goto fail;
781 
782    *pcmdbuf = cmdbuf;
783 
784 fail:
785    simple_mtx_unlock(&device->sqtt_command_pool_mtx);
786    return result;
787 }
788