1 /*
2 * Copyright © 2018 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 */
23
24 #include <dirent.h>
25
26 #include <sys/types.h>
27 #include <sys/stat.h>
28
29 #if defined(MAJOR_IN_SYSMACROS)
30 #include <sys/sysmacros.h>
31 #elif defined(MAJOR_IN_MKDEV)
32 #include <sys/mkdev.h>
33 #endif
34
35 #include <fcntl.h>
36 #include <unistd.h>
37 #include <errno.h>
38
39 #ifndef HAVE_DIRENT_D_TYPE
40 #include <limits.h> // PATH_MAX
41 #endif
42
43 #include "common/intel_gem.h"
44 #include "common/i915/intel_gem.h"
45
46 #include "dev/intel_debug.h"
47 #include "dev/intel_device_info.h"
48
49 #include "perf/i915/intel_perf.h"
50 #include "perf/xe/intel_perf.h"
51 #include "perf/intel_perf.h"
52 #include "perf/intel_perf_common.h"
53 #include "perf/intel_perf_regs.h"
54 #include "perf/intel_perf_mdapi.h"
55 #include "perf/intel_perf_metrics.h"
56 #include "perf/intel_perf_private.h"
57
58 #include "perf/i915/intel_perf.h"
59 #include "perf/xe/intel_perf.h"
60
61 #include "util/bitscan.h"
62 #include "util/macros.h"
63 #include "util/mesa-sha1.h"
64 #include "util/u_debug.h"
65 #include "util/u_math.h"
66
67 #define FILE_DEBUG_FLAG DEBUG_PERFMON
68
69 static bool
is_dir_or_link(const struct dirent * entry,const char * parent_dir)70 is_dir_or_link(const struct dirent *entry, const char *parent_dir)
71 {
72 #ifdef HAVE_DIRENT_D_TYPE
73 return entry->d_type == DT_DIR || entry->d_type == DT_LNK;
74 #else
75 struct stat st;
76 char path[PATH_MAX + 1];
77 snprintf(path, sizeof(path), "%s/%s", parent_dir, entry->d_name);
78 lstat(path, &st);
79 return S_ISDIR(st.st_mode) || S_ISLNK(st.st_mode);
80 #endif
81 }
82
83 static bool
get_sysfs_dev_dir(struct intel_perf_config * perf,int fd)84 get_sysfs_dev_dir(struct intel_perf_config *perf, int fd)
85 {
86 struct stat sb;
87 int min, maj;
88 DIR *drmdir;
89 struct dirent *drm_entry;
90 int len;
91
92 perf->sysfs_dev_dir[0] = '\0';
93
94 if (INTEL_DEBUG(DEBUG_NO_OACONFIG))
95 return true;
96
97 if (fstat(fd, &sb)) {
98 DBG("Failed to stat DRM fd\n");
99 return false;
100 }
101
102 maj = major(sb.st_rdev);
103 min = minor(sb.st_rdev);
104
105 if (!S_ISCHR(sb.st_mode)) {
106 DBG("DRM fd is not a character device as expected\n");
107 return false;
108 }
109
110 len = snprintf(perf->sysfs_dev_dir,
111 sizeof(perf->sysfs_dev_dir),
112 "/sys/dev/char/%d:%d/device/drm", maj, min);
113 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir)) {
114 DBG("Failed to concatenate sysfs path to drm device\n");
115 return false;
116 }
117
118 drmdir = opendir(perf->sysfs_dev_dir);
119 if (!drmdir) {
120 DBG("Failed to open %s: %m\n", perf->sysfs_dev_dir);
121 return false;
122 }
123
124 while ((drm_entry = readdir(drmdir))) {
125 if (is_dir_or_link(drm_entry, perf->sysfs_dev_dir) &&
126 strncmp(drm_entry->d_name, "card", 4) == 0)
127 {
128 len = snprintf(perf->sysfs_dev_dir,
129 sizeof(perf->sysfs_dev_dir),
130 "/sys/dev/char/%d:%d/device/drm/%s",
131 maj, min, drm_entry->d_name);
132 closedir(drmdir);
133 if (len < 0 || len >= sizeof(perf->sysfs_dev_dir))
134 return false;
135 else
136 return true;
137 }
138 }
139
140 closedir(drmdir);
141
142 DBG("Failed to find cardX directory under /sys/dev/char/%d:%d/device/drm\n",
143 maj, min);
144
145 return false;
146 }
147
148 static bool
read_sysfs_drm_device_file_uint64(struct intel_perf_config * perf,const char * file,uint64_t * value)149 read_sysfs_drm_device_file_uint64(struct intel_perf_config *perf,
150 const char *file,
151 uint64_t *value)
152 {
153 char buf[512];
154 int len;
155
156 len = snprintf(buf, sizeof(buf), "%s/%s", perf->sysfs_dev_dir, file);
157 if (len < 0 || len >= sizeof(buf)) {
158 DBG("Failed to concatenate sys filename to read u64 from\n");
159 return false;
160 }
161
162 return read_file_uint64(buf, value);
163 }
164
165 static bool
oa_config_enabled(struct intel_perf_config * perf,const struct intel_perf_query_info * query)166 oa_config_enabled(struct intel_perf_config *perf,
167 const struct intel_perf_query_info *query) {
168 // Hide extended metrics unless enabled with env param
169 bool is_extended_metric = strncmp(query->name, "Ext", 3) == 0;
170
171 return perf->enable_all_metrics || !is_extended_metric;
172 }
173
174 static void
register_oa_config(struct intel_perf_config * perf,const struct intel_device_info * devinfo,const struct intel_perf_query_info * query,uint64_t config_id)175 register_oa_config(struct intel_perf_config *perf,
176 const struct intel_device_info *devinfo,
177 const struct intel_perf_query_info *query,
178 uint64_t config_id)
179 {
180 if (!oa_config_enabled(perf, query))
181 return;
182
183 struct intel_perf_query_info *registered_query =
184 intel_perf_append_query_info(perf, 0);
185
186 *registered_query = *query;
187 registered_query->oa_metrics_set_id = config_id;
188 DBG("metric set registered: id = %" PRIu64", guid = %s\n",
189 registered_query->oa_metrics_set_id, query->guid);
190 }
191
192 static void
enumerate_sysfs_metrics(struct intel_perf_config * perf,const struct intel_device_info * devinfo)193 enumerate_sysfs_metrics(struct intel_perf_config *perf,
194 const struct intel_device_info *devinfo)
195 {
196 DIR *metricsdir = NULL;
197 struct dirent *metric_entry;
198 char buf[256];
199 int len;
200
201 len = snprintf(buf, sizeof(buf), "%s/metrics", perf->sysfs_dev_dir);
202 if (len < 0 || len >= sizeof(buf)) {
203 DBG("Failed to concatenate path to sysfs metrics/ directory\n");
204 return;
205 }
206
207 metricsdir = opendir(buf);
208 if (!metricsdir) {
209 DBG("Failed to open %s: %m\n", buf);
210 return;
211 }
212
213 while ((metric_entry = readdir(metricsdir))) {
214 struct hash_entry *entry;
215 if (!is_dir_or_link(metric_entry, buf) ||
216 metric_entry->d_name[0] == '.')
217 continue;
218
219 DBG("metric set: %s\n", metric_entry->d_name);
220 entry = _mesa_hash_table_search(perf->oa_metrics_table,
221 metric_entry->d_name);
222 if (entry) {
223 uint64_t id;
224 if (!intel_perf_load_metric_id(perf, metric_entry->d_name, &id)) {
225 DBG("Failed to read metric set id from %s: %m", buf);
226 continue;
227 }
228
229 register_oa_config(perf, devinfo,
230 (const struct intel_perf_query_info *)entry->data, id);
231 } else
232 DBG("metric set not known by mesa (skipping)\n");
233 }
234
235 closedir(metricsdir);
236 }
237
238 static void
add_all_metrics(struct intel_perf_config * perf,const struct intel_device_info * devinfo)239 add_all_metrics(struct intel_perf_config *perf,
240 const struct intel_device_info *devinfo)
241 {
242 hash_table_foreach(perf->oa_metrics_table, entry) {
243 const struct intel_perf_query_info *query = entry->data;
244 register_oa_config(perf, devinfo, query, 0);
245 }
246 }
247
248 static bool
kernel_has_dynamic_config_support(struct intel_perf_config * perf,int fd)249 kernel_has_dynamic_config_support(struct intel_perf_config *perf, int fd)
250 {
251 switch (perf->devinfo->kmd_type) {
252 case INTEL_KMD_TYPE_I915:
253 return i915_has_dynamic_config_support(perf, fd);
254 case INTEL_KMD_TYPE_XE:
255 return true;
256 default:
257 unreachable("missing");
258 return false;
259 }
260 }
261
262 bool
intel_perf_load_metric_id(struct intel_perf_config * perf_cfg,const char * guid,uint64_t * metric_id)263 intel_perf_load_metric_id(struct intel_perf_config *perf_cfg,
264 const char *guid,
265 uint64_t *metric_id)
266 {
267 char config_path[280];
268
269 snprintf(config_path, sizeof(config_path), "%s/metrics/%s/id",
270 perf_cfg->sysfs_dev_dir, guid);
271
272 /* Don't recreate already loaded configs. */
273 return read_file_uint64(config_path, metric_id);
274 }
275
276 static uint64_t
kmd_add_config(struct intel_perf_config * perf,int fd,const struct intel_perf_registers * config,const char * guid)277 kmd_add_config(struct intel_perf_config *perf, int fd,
278 const struct intel_perf_registers *config,
279 const char *guid)
280 {
281 switch (perf->devinfo->kmd_type) {
282 case INTEL_KMD_TYPE_I915:
283 return i915_add_config(perf, fd, config, guid);
284 case INTEL_KMD_TYPE_XE:
285 return xe_add_config(perf, fd, config, guid);
286 default:
287 unreachable("missing");
288 return 0;
289 }
290 }
291
292 static void
init_oa_configs(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo)293 init_oa_configs(struct intel_perf_config *perf, int fd,
294 const struct intel_device_info *devinfo)
295 {
296 hash_table_foreach(perf->oa_metrics_table, entry) {
297 const struct intel_perf_query_info *query = entry->data;
298 uint64_t config_id;
299
300 if (intel_perf_load_metric_id(perf, query->guid, &config_id)) {
301 DBG("metric set: %s (already loaded)\n", query->guid);
302 register_oa_config(perf, devinfo, query, config_id);
303 continue;
304 }
305
306 uint64_t ret = kmd_add_config(perf, fd, &query->config, query->guid);
307 if (ret == 0) {
308 DBG("Failed to load \"%s\" (%s) metrics set in kernel: %s\n",
309 query->name, query->guid, strerror(errno));
310 continue;
311 }
312
313 register_oa_config(perf, devinfo, query, ret);
314 DBG("metric set: %s (added)\n", query->guid);
315 }
316 }
317
318 static void
compute_topology_builtins(struct intel_perf_config * perf)319 compute_topology_builtins(struct intel_perf_config *perf)
320 {
321 const struct intel_device_info *devinfo = perf->devinfo;
322
323 perf->sys_vars.slice_mask = devinfo->slice_masks;
324 perf->sys_vars.n_eu_slices = devinfo->num_slices;
325
326 perf->sys_vars.n_eu_slice0123 = 0;
327 for (int s = 0; s < MIN2(4, devinfo->max_slices); s++) {
328 if (!intel_device_info_slice_available(devinfo, s))
329 continue;
330
331 for (int ss = 0; ss < devinfo->max_subslices_per_slice; ss++) {
332 if (!intel_device_info_subslice_available(devinfo, s, ss))
333 continue;
334
335 for (int eu = 0; eu < devinfo->max_eus_per_subslice; eu++) {
336 if (intel_device_info_eu_available(devinfo, s, ss, eu))
337 perf->sys_vars.n_eu_slice0123++;
338 }
339 }
340 }
341
342 perf->sys_vars.n_eu_sub_slices = intel_device_info_subslice_total(devinfo);
343 perf->sys_vars.n_eus = intel_device_info_eu_total(devinfo);
344
345 /* The subslice mask builtin contains bits for all slices. Prior to Gfx11
346 * it had groups of 3bits for each slice, on Gfx11 and above it's 8bits for
347 * each slice.
348 *
349 * Ideally equations would be updated to have a slice/subslice query
350 * function/operator.
351 */
352 perf->sys_vars.subslice_mask = 0;
353
354 int bits_per_subslice = devinfo->ver >= 11 ? 8 : 3;
355
356 for (int s = 0; s < util_last_bit(devinfo->slice_masks); s++) {
357 for (int ss = 0; ss < (devinfo->subslice_slice_stride * 8); ss++) {
358 if (intel_device_info_subslice_available(devinfo, s, ss))
359 perf->sys_vars.subslice_mask |= 1ULL << (s * bits_per_subslice + ss);
360 }
361 }
362 }
363
364 static bool
init_oa_sys_vars(struct intel_perf_config * perf,bool use_register_snapshots)365 init_oa_sys_vars(struct intel_perf_config *perf,
366 bool use_register_snapshots)
367 {
368 uint64_t min_freq_mhz = 0, max_freq_mhz = 0;
369
370 if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
371 const char *min_file, *max_file;
372
373 switch (perf->devinfo->kmd_type) {
374 case INTEL_KMD_TYPE_I915:
375 min_file = "gt_min_freq_mhz";
376 max_file = "gt_max_freq_mhz";
377 break;
378 case INTEL_KMD_TYPE_XE:
379 min_file = "device/tile0/gt0/freq0/min_freq";
380 max_file = "device/tile0/gt0/freq0/max_freq";
381 break;
382 default:
383 unreachable("missing");
384 return false;
385 }
386
387 if (!read_sysfs_drm_device_file_uint64(perf, min_file, &min_freq_mhz))
388 return false;
389
390 if (!read_sysfs_drm_device_file_uint64(perf, max_file, &max_freq_mhz))
391 return false;
392 } else {
393 min_freq_mhz = 300;
394 max_freq_mhz = 1000;
395 }
396
397 memset(&perf->sys_vars, 0, sizeof(perf->sys_vars));
398 perf->sys_vars.gt_min_freq = min_freq_mhz * 1000000;
399 perf->sys_vars.gt_max_freq = max_freq_mhz * 1000000;
400 perf->sys_vars.query_mode = use_register_snapshots;
401 compute_topology_builtins(perf);
402
403 return true;
404 }
405
406 typedef void (*perf_register_oa_queries_t)(struct intel_perf_config *);
407
408 static perf_register_oa_queries_t
get_register_queries_function(const struct intel_device_info * devinfo)409 get_register_queries_function(const struct intel_device_info *devinfo)
410 {
411 switch (devinfo->platform) {
412 case INTEL_PLATFORM_HSW:
413 return intel_oa_register_queries_hsw;
414 case INTEL_PLATFORM_CHV:
415 return intel_oa_register_queries_chv;
416 case INTEL_PLATFORM_BDW:
417 return intel_oa_register_queries_bdw;
418 case INTEL_PLATFORM_BXT:
419 return intel_oa_register_queries_bxt;
420 case INTEL_PLATFORM_SKL:
421 if (devinfo->gt == 2)
422 return intel_oa_register_queries_sklgt2;
423 if (devinfo->gt == 3)
424 return intel_oa_register_queries_sklgt3;
425 if (devinfo->gt == 4)
426 return intel_oa_register_queries_sklgt4;
427 return NULL;
428 case INTEL_PLATFORM_KBL:
429 if (devinfo->gt == 2)
430 return intel_oa_register_queries_kblgt2;
431 if (devinfo->gt == 3)
432 return intel_oa_register_queries_kblgt3;
433 return NULL;
434 case INTEL_PLATFORM_GLK:
435 return intel_oa_register_queries_glk;
436 case INTEL_PLATFORM_CFL:
437 if (devinfo->gt == 2)
438 return intel_oa_register_queries_cflgt2;
439 if (devinfo->gt == 3)
440 return intel_oa_register_queries_cflgt3;
441 return NULL;
442 case INTEL_PLATFORM_ICL:
443 return intel_oa_register_queries_icl;
444 case INTEL_PLATFORM_EHL:
445 return intel_oa_register_queries_ehl;
446 case INTEL_PLATFORM_TGL:
447 if (devinfo->gt == 1)
448 return intel_oa_register_queries_tglgt1;
449 if (devinfo->gt == 2)
450 return intel_oa_register_queries_tglgt2;
451 return NULL;
452 case INTEL_PLATFORM_RKL:
453 return intel_oa_register_queries_rkl;
454 case INTEL_PLATFORM_DG1:
455 return intel_oa_register_queries_dg1;
456 case INTEL_PLATFORM_ADL:
457 case INTEL_PLATFORM_RPL:
458 return intel_oa_register_queries_adl;
459 case INTEL_PLATFORM_DG2_G10:
460 return intel_oa_register_queries_acmgt3;
461 case INTEL_PLATFORM_DG2_G11:
462 return intel_oa_register_queries_acmgt1;
463 case INTEL_PLATFORM_DG2_G12:
464 return intel_oa_register_queries_acmgt2;
465 case INTEL_PLATFORM_MTL_U:
466 case INTEL_PLATFORM_MTL_H:
467 if (intel_device_info_eu_total(devinfo) <= 64)
468 return intel_oa_register_queries_mtlgt2;
469 if (intel_device_info_eu_total(devinfo) <= 128)
470 return intel_oa_register_queries_mtlgt3;
471 return NULL;
472 case INTEL_PLATFORM_LNL:
473 return intel_oa_register_queries_lnl;
474 default:
475 return NULL;
476 }
477 }
478
479 static int
intel_perf_compare_counter_names(const void * v1,const void * v2)480 intel_perf_compare_counter_names(const void *v1, const void *v2)
481 {
482 const struct intel_perf_query_counter *c1 = v1;
483 const struct intel_perf_query_counter *c2 = v2;
484
485 return strcmp(c1->name, c2->name);
486 }
487
488 static void
sort_query(struct intel_perf_query_info * q)489 sort_query(struct intel_perf_query_info *q)
490 {
491 qsort(q->counters, q->n_counters, sizeof(q->counters[0]),
492 intel_perf_compare_counter_names);
493 }
494
495 static void
load_pipeline_statistic_metrics(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo)496 load_pipeline_statistic_metrics(struct intel_perf_config *perf_cfg,
497 const struct intel_device_info *devinfo)
498 {
499 struct intel_perf_query_info *query =
500 intel_perf_append_query_info(perf_cfg, MAX_STAT_COUNTERS);
501
502 query->kind = INTEL_PERF_QUERY_TYPE_PIPELINE;
503 query->name = "Pipeline Statistics Registers";
504
505 intel_perf_query_add_basic_stat_reg(query, IA_VERTICES_COUNT,
506 "N vertices submitted");
507 intel_perf_query_add_basic_stat_reg(query, IA_PRIMITIVES_COUNT,
508 "N primitives submitted");
509 intel_perf_query_add_basic_stat_reg(query, VS_INVOCATION_COUNT,
510 "N vertex shader invocations");
511
512 if (devinfo->ver == 6) {
513 intel_perf_query_add_stat_reg(query, GFX6_SO_PRIM_STORAGE_NEEDED, 1, 1,
514 "SO_PRIM_STORAGE_NEEDED",
515 "N geometry shader stream-out primitives (total)");
516 intel_perf_query_add_stat_reg(query, GFX6_SO_NUM_PRIMS_WRITTEN, 1, 1,
517 "SO_NUM_PRIMS_WRITTEN",
518 "N geometry shader stream-out primitives (written)");
519 } else {
520 intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(0), 1, 1,
521 "SO_PRIM_STORAGE_NEEDED (Stream 0)",
522 "N stream-out (stream 0) primitives (total)");
523 intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(1), 1, 1,
524 "SO_PRIM_STORAGE_NEEDED (Stream 1)",
525 "N stream-out (stream 1) primitives (total)");
526 intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(2), 1, 1,
527 "SO_PRIM_STORAGE_NEEDED (Stream 2)",
528 "N stream-out (stream 2) primitives (total)");
529 intel_perf_query_add_stat_reg(query, GFX7_SO_PRIM_STORAGE_NEEDED(3), 1, 1,
530 "SO_PRIM_STORAGE_NEEDED (Stream 3)",
531 "N stream-out (stream 3) primitives (total)");
532 intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(0), 1, 1,
533 "SO_NUM_PRIMS_WRITTEN (Stream 0)",
534 "N stream-out (stream 0) primitives (written)");
535 intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(1), 1, 1,
536 "SO_NUM_PRIMS_WRITTEN (Stream 1)",
537 "N stream-out (stream 1) primitives (written)");
538 intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(2), 1, 1,
539 "SO_NUM_PRIMS_WRITTEN (Stream 2)",
540 "N stream-out (stream 2) primitives (written)");
541 intel_perf_query_add_stat_reg(query, GFX7_SO_NUM_PRIMS_WRITTEN(3), 1, 1,
542 "SO_NUM_PRIMS_WRITTEN (Stream 3)",
543 "N stream-out (stream 3) primitives (written)");
544 }
545
546 intel_perf_query_add_basic_stat_reg(query, HS_INVOCATION_COUNT,
547 "N TCS shader invocations");
548 intel_perf_query_add_basic_stat_reg(query, DS_INVOCATION_COUNT,
549 "N TES shader invocations");
550
551 intel_perf_query_add_basic_stat_reg(query, GS_INVOCATION_COUNT,
552 "N geometry shader invocations");
553 intel_perf_query_add_basic_stat_reg(query, GS_PRIMITIVES_COUNT,
554 "N geometry shader primitives emitted");
555
556 intel_perf_query_add_basic_stat_reg(query, CL_INVOCATION_COUNT,
557 "N primitives entering clipping");
558 intel_perf_query_add_basic_stat_reg(query, CL_PRIMITIVES_COUNT,
559 "N primitives leaving clipping");
560
561 if (devinfo->verx10 == 75 || devinfo->ver == 8) {
562 intel_perf_query_add_stat_reg(query, PS_INVOCATION_COUNT, 1, 4,
563 "N fragment shader invocations",
564 "N fragment shader invocations");
565 } else {
566 intel_perf_query_add_basic_stat_reg(query, PS_INVOCATION_COUNT,
567 "N fragment shader invocations");
568 }
569
570 intel_perf_query_add_basic_stat_reg(query, PS_DEPTH_COUNT,
571 "N z-pass fragments");
572
573 if (devinfo->ver >= 7) {
574 intel_perf_query_add_basic_stat_reg(query, CS_INVOCATION_COUNT,
575 "N compute shader invocations");
576 }
577
578 query->data_size = sizeof(uint64_t) * query->n_counters;
579
580 sort_query(query);
581 }
582
583 static inline int
compare_str_or_null(const char * s1,const char * s2)584 compare_str_or_null(const char *s1, const char *s2)
585 {
586 if (s1 == NULL && s2 == NULL)
587 return 0;
588 if (s1 == NULL)
589 return -1;
590 if (s2 == NULL)
591 return 1;
592
593 return strcmp(s1, s2);
594 }
595
596 static int
compare_counter_categories_and_names(const void * _c1,const void * _c2)597 compare_counter_categories_and_names(const void *_c1, const void *_c2)
598 {
599 const struct intel_perf_query_counter_info *c1 = (const struct intel_perf_query_counter_info *)_c1;
600 const struct intel_perf_query_counter_info *c2 = (const struct intel_perf_query_counter_info *)_c2;
601
602 /* pipeline counters don't have an assigned category */
603 int r = compare_str_or_null(c1->counter->category, c2->counter->category);
604 if (r)
605 return r;
606
607 return strcmp(c1->counter->name, c2->counter->name);
608 }
609
610 static void
build_unique_counter_list(struct intel_perf_config * perf)611 build_unique_counter_list(struct intel_perf_config *perf)
612 {
613 size_t max_counters = 0;
614
615 for (int q = 0; q < perf->n_queries; q++)
616 max_counters += perf->queries[q].n_counters;
617
618 /*
619 * Allocate big enough array to hold maximum possible number of counters.
620 * We can't alloc it small and realloc when needed because the hash table
621 * below contains pointers to this array.
622 */
623 struct intel_perf_query_counter_info *counter_infos =
624 rzalloc_array_size(perf, sizeof(counter_infos[0]), max_counters);
625
626 perf->n_counters = 0;
627
628 struct hash_table *counters_table =
629 _mesa_hash_table_create(NULL,
630 _mesa_hash_string,
631 _mesa_key_string_equal);
632 struct hash_entry *entry;
633 for (int q = 0; q < perf->n_queries ; q++) {
634 struct intel_perf_query_info *query = &perf->queries[q];
635
636 for (int c = 0; c < query->n_counters; c++) {
637 struct intel_perf_query_counter *counter;
638 struct intel_perf_query_counter_info *counter_info;
639
640 counter = &query->counters[c];
641 entry = _mesa_hash_table_search(counters_table, counter->symbol_name);
642
643 if (entry) {
644 counter_info = entry->data;
645 BITSET_SET(counter_info->query_mask, q);
646 continue;
647 }
648 assert(perf->n_counters < max_counters);
649
650 counter_info = &counter_infos[perf->n_counters++];
651 counter_info->counter = counter;
652 BITSET_SET(counter_info->query_mask, q);
653
654 counter_info->location.group_idx = q;
655 counter_info->location.counter_idx = c;
656
657 _mesa_hash_table_insert(counters_table, counter->symbol_name, counter_info);
658 }
659 }
660
661 _mesa_hash_table_destroy(counters_table, NULL);
662
663 perf->counter_infos = counter_infos;
664
665 qsort(perf->counter_infos, perf->n_counters, sizeof(perf->counter_infos[0]),
666 compare_counter_categories_and_names);
667 }
668
669 static bool
oa_metrics_available(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo,bool use_register_snapshots)670 oa_metrics_available(struct intel_perf_config *perf, int fd,
671 const struct intel_device_info *devinfo,
672 bool use_register_snapshots)
673 {
674 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
675 bool oa_metrics_available = false;
676
677 perf->devinfo = devinfo;
678
679 /* Consider an invalid as supported. */
680 if (fd == -1) {
681 perf->features_supported = INTEL_PERF_FEATURE_QUERY_PERF;
682 return true;
683 }
684
685 perf->enable_all_metrics = debug_get_bool_option("INTEL_EXTENDED_METRICS", false);
686
687 /* TODO: We should query this from i915?
688 * Looks like Xe2 platforms don't need it but don't have a spec quote to
689 * back it.
690 */
691 if (devinfo->verx10 == 125)
692 perf->oa_timestamp_shift = 1;
693
694 perf->oa_timestamp_mask =
695 0xffffffffffffffffull >> (32 + perf->oa_timestamp_shift);
696
697 switch (devinfo->kmd_type) {
698 case INTEL_KMD_TYPE_I915:
699 oa_metrics_available = i915_oa_metrics_available(perf, fd, use_register_snapshots);
700 break;
701 case INTEL_KMD_TYPE_XE:
702 oa_metrics_available = xe_oa_metrics_available(perf, fd, use_register_snapshots);
703 break;
704 default:
705 unreachable("missing");
706 break;
707 }
708
709 return oa_metrics_available &&
710 oa_register &&
711 get_sysfs_dev_dir(perf, fd) &&
712 init_oa_sys_vars(perf, use_register_snapshots);
713 }
714
715 static void
load_oa_metrics(struct intel_perf_config * perf,int fd,const struct intel_device_info * devinfo)716 load_oa_metrics(struct intel_perf_config *perf, int fd,
717 const struct intel_device_info *devinfo)
718 {
719 int existing_queries = perf->n_queries;
720
721 perf_register_oa_queries_t oa_register = get_register_queries_function(devinfo);
722
723 perf->oa_metrics_table =
724 _mesa_hash_table_create(perf, _mesa_hash_string,
725 _mesa_key_string_equal);
726
727 /* Index all the metric sets mesa knows about before looking to see what
728 * the kernel is advertising.
729 */
730 oa_register(perf);
731
732 if (!INTEL_DEBUG(DEBUG_NO_OACONFIG)) {
733 if (kernel_has_dynamic_config_support(perf, fd))
734 init_oa_configs(perf, fd, devinfo);
735 else
736 enumerate_sysfs_metrics(perf, devinfo);
737 } else {
738 add_all_metrics(perf, devinfo);
739 }
740
741 /* sort counters in each individual group created by this function by name */
742 for (int i = existing_queries; i < perf->n_queries; ++i)
743 sort_query(&perf->queries[i]);
744
745 /* Select a fallback OA metric. Look for the TestOa metric or use the last
746 * one if no present (on HSW).
747 */
748 for (int i = existing_queries; i < perf->n_queries; i++) {
749 if (perf->queries[i].symbol_name &&
750 strcmp(perf->queries[i].symbol_name, "TestOa") == 0) {
751 perf->fallback_raw_oa_metric = perf->queries[i].oa_metrics_set_id;
752 break;
753 }
754 }
755 if (perf->fallback_raw_oa_metric == 0 && perf->n_queries > 0)
756 perf->fallback_raw_oa_metric = perf->queries[perf->n_queries - 1].oa_metrics_set_id;
757 }
758
759 struct intel_perf_registers *
intel_perf_load_configuration(struct intel_perf_config * perf_cfg,int fd,const char * guid)760 intel_perf_load_configuration(struct intel_perf_config *perf_cfg, int fd, const char *guid)
761 {
762 if (!(perf_cfg->features_supported & INTEL_PERF_FEATURE_QUERY_PERF))
763 return NULL;
764
765 switch (perf_cfg->devinfo->kmd_type) {
766 case INTEL_KMD_TYPE_I915:
767 return i915_perf_load_configurations(perf_cfg, fd, guid);
768 default:
769 unreachable("missing");
770 return NULL;
771 }
772 }
773
774 uint64_t
intel_perf_store_configuration(struct intel_perf_config * perf_cfg,int fd,const struct intel_perf_registers * config,const char * guid)775 intel_perf_store_configuration(struct intel_perf_config *perf_cfg, int fd,
776 const struct intel_perf_registers *config,
777 const char *guid)
778 {
779 if (guid)
780 return kmd_add_config(perf_cfg, fd, config, guid);
781
782 struct mesa_sha1 sha1_ctx;
783 _mesa_sha1_init(&sha1_ctx);
784
785 if (config->flex_regs) {
786 _mesa_sha1_update(&sha1_ctx, config->flex_regs,
787 sizeof(config->flex_regs[0]) *
788 config->n_flex_regs);
789 }
790 if (config->mux_regs) {
791 _mesa_sha1_update(&sha1_ctx, config->mux_regs,
792 sizeof(config->mux_regs[0]) *
793 config->n_mux_regs);
794 }
795 if (config->b_counter_regs) {
796 _mesa_sha1_update(&sha1_ctx, config->b_counter_regs,
797 sizeof(config->b_counter_regs[0]) *
798 config->n_b_counter_regs);
799 }
800
801 uint8_t hash[20];
802 _mesa_sha1_final(&sha1_ctx, hash);
803
804 char formatted_hash[41];
805 _mesa_sha1_format(formatted_hash, hash);
806
807 char generated_guid[37];
808 snprintf(generated_guid, sizeof(generated_guid),
809 "%.8s-%.4s-%.4s-%.4s-%.12s",
810 &formatted_hash[0], &formatted_hash[8],
811 &formatted_hash[8 + 4], &formatted_hash[8 + 4 + 4],
812 &formatted_hash[8 + 4 + 4 + 4]);
813
814 /* Check if already present. */
815 uint64_t id;
816 if (intel_perf_load_metric_id(perf_cfg, generated_guid, &id))
817 return id;
818
819 return kmd_add_config(perf_cfg, fd, config, generated_guid);
820 }
821
822 void
intel_perf_remove_configuration(struct intel_perf_config * perf_cfg,int fd,uint64_t config_id)823 intel_perf_remove_configuration(struct intel_perf_config *perf_cfg, int fd,
824 uint64_t config_id)
825 {
826 switch (perf_cfg->devinfo->kmd_type) {
827 case INTEL_KMD_TYPE_I915:
828 i915_remove_config(perf_cfg, fd, config_id);
829 break;
830 case INTEL_KMD_TYPE_XE:
831 xe_remove_config(perf_cfg, fd, config_id);
832 break;
833 default:
834 unreachable("missing");
835 }
836 }
837
838 static void
get_passes_mask(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,BITSET_WORD * queries_mask)839 get_passes_mask(struct intel_perf_config *perf,
840 const uint32_t *counter_indices,
841 uint32_t counter_indices_count,
842 BITSET_WORD *queries_mask)
843 {
844 /* For each counter, look if it's already computed by a selected metric set
845 * or find one that can compute it.
846 */
847 for (uint32_t c = 0; c < counter_indices_count; c++) {
848 uint32_t counter_idx = counter_indices[c];
849 assert(counter_idx < perf->n_counters);
850
851 const struct intel_perf_query_counter_info *counter_info =
852 &perf->counter_infos[counter_idx];
853
854 /* Check if the counter is already computed by one of the selected
855 * metric set. If it is, there is nothing more to do with this counter.
856 */
857 uint32_t match = UINT32_MAX;
858 for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
859 if (queries_mask[w] & counter_info->query_mask[w]) {
860 match = w * BITSET_WORDBITS + ffsll(queries_mask[w] & counter_info->query_mask[w]) - 1;
861 break;
862 }
863 }
864 if (match != UINT32_MAX)
865 continue;
866
867 /* Now go through each metric set and find one that contains this
868 * counter.
869 */
870 bool found = false;
871 for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
872 if (!counter_info->query_mask[w])
873 continue;
874
875 uint32_t query_idx = w * BITSET_WORDBITS + ffsll(counter_info->query_mask[w]) - 1;
876
877 /* Since we already looked for this in the query_mask, it should not
878 * be set.
879 */
880 assert(!BITSET_TEST(queries_mask, query_idx));
881
882 BITSET_SET(queries_mask, query_idx);
883 found = true;
884 break;
885 }
886 assert(found);
887 }
888 }
889
890 uint32_t
intel_perf_get_n_passes(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,struct intel_perf_query_info ** pass_queries)891 intel_perf_get_n_passes(struct intel_perf_config *perf,
892 const uint32_t *counter_indices,
893 uint32_t counter_indices_count,
894 struct intel_perf_query_info **pass_queries)
895 {
896 BITSET_DECLARE(queries_mask, INTEL_PERF_MAX_METRIC_SETS);
897 BITSET_ZERO(queries_mask);
898
899 get_passes_mask(perf, counter_indices, counter_indices_count, queries_mask);
900
901 if (pass_queries) {
902 uint32_t pass = 0;
903 for (uint32_t q = 0; q < perf->n_queries; q++) {
904 if (BITSET_TEST(queries_mask, q))
905 pass_queries[pass++] = &perf->queries[q];
906 }
907 }
908
909 return BITSET_COUNT(queries_mask);
910 }
911
912 void
intel_perf_get_counters_passes(struct intel_perf_config * perf,const uint32_t * counter_indices,uint32_t counter_indices_count,struct intel_perf_counter_pass * counter_pass)913 intel_perf_get_counters_passes(struct intel_perf_config *perf,
914 const uint32_t *counter_indices,
915 uint32_t counter_indices_count,
916 struct intel_perf_counter_pass *counter_pass)
917 {
918 BITSET_DECLARE(queries_mask, INTEL_PERF_MAX_METRIC_SETS);
919 BITSET_ZERO(queries_mask);
920
921 get_passes_mask(perf, counter_indices, counter_indices_count, queries_mask);
922
923 for (uint32_t i = 0; i < counter_indices_count; i++) {
924 assert(counter_indices[i] < perf->n_counters);
925
926 uint32_t counter_idx = counter_indices[i];
927 counter_pass[i].counter = perf->counter_infos[counter_idx].counter;
928
929 const struct intel_perf_query_counter_info *counter_info =
930 &perf->counter_infos[counter_idx];
931
932 uint32_t query_idx = UINT32_MAX;
933 for (uint32_t w = 0; w < BITSET_WORDS(INTEL_PERF_MAX_METRIC_SETS); w++) {
934 if (counter_info->query_mask[w] & queries_mask[w]) {
935 query_idx = w * BITSET_WORDBITS +
936 ffsll(counter_info->query_mask[w] & queries_mask[w]) - 1;
937 break;
938 }
939 }
940 assert(query_idx != UINT32_MAX);
941
942 counter_pass[i].query = &perf->queries[query_idx];
943 }
944 }
945
946 /* Accumulate 32bits OA counters */
947 static inline void
accumulate_uint32(const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)948 accumulate_uint32(const uint32_t *report0,
949 const uint32_t *report1,
950 uint64_t *accumulator)
951 {
952 *accumulator += (uint32_t)(*report1 - *report0);
953 }
954
955 /* Accumulate 40bits OA counters */
956 static inline void
accumulate_uint40(int a_index,const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)957 accumulate_uint40(int a_index,
958 const uint32_t *report0,
959 const uint32_t *report1,
960 uint64_t *accumulator)
961 {
962 const uint8_t *high_bytes0 = (uint8_t *)(report0 + 40);
963 const uint8_t *high_bytes1 = (uint8_t *)(report1 + 40);
964 uint64_t high0 = (uint64_t)(high_bytes0[a_index]) << 32;
965 uint64_t high1 = (uint64_t)(high_bytes1[a_index]) << 32;
966 uint64_t value0 = report0[a_index + 4] | high0;
967 uint64_t value1 = report1[a_index + 4] | high1;
968 uint64_t delta;
969
970 if (value0 > value1)
971 delta = (1ULL << 40) + value1 - value0;
972 else
973 delta = value1 - value0;
974
975 *accumulator += delta;
976 }
977
978 /* Accumulate 64bits OA counters */
979 static inline void
accumulate_uint64(const uint32_t * report0,const uint32_t * report1,uint64_t * accumulator)980 accumulate_uint64(const uint32_t *report0,
981 const uint32_t *report1,
982 uint64_t *accumulator)
983 {
984 *accumulator += *((const uint64_t *)report1) - *((const uint64_t *)report0);
985 }
986
987 static void
gfx8_read_report_clock_ratios(const uint32_t * report,uint64_t * slice_freq_hz,uint64_t * unslice_freq_hz)988 gfx8_read_report_clock_ratios(const uint32_t *report,
989 uint64_t *slice_freq_hz,
990 uint64_t *unslice_freq_hz)
991 {
992 /* The lower 16bits of the RPT_ID field of the OA reports contains a
993 * snapshot of the bits coming from the RP_FREQ_NORMAL register and is
994 * divided this way :
995 *
996 * RPT_ID[31:25]: RP_FREQ_NORMAL[20:14] (low squashed_slice_clock_frequency)
997 * RPT_ID[10:9]: RP_FREQ_NORMAL[22:21] (high squashed_slice_clock_frequency)
998 * RPT_ID[8:0]: RP_FREQ_NORMAL[31:23] (squashed_unslice_clock_frequency)
999 *
1000 * RP_FREQ_NORMAL[31:23]: Software Unslice Ratio Request
1001 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1002 *
1003 * RP_FREQ_NORMAL[22:14]: Software Slice Ratio Request
1004 * Multiple of 33.33MHz 2xclk (16 MHz 1xclk)
1005 */
1006
1007 uint32_t unslice_freq = report[0] & 0x1ff;
1008 uint32_t slice_freq_low = (report[0] >> 25) & 0x7f;
1009 uint32_t slice_freq_high = (report[0] >> 9) & 0x3;
1010 uint32_t slice_freq = slice_freq_low | (slice_freq_high << 7);
1011
1012 *slice_freq_hz = slice_freq * 16666667ULL;
1013 *unslice_freq_hz = unslice_freq * 16666667ULL;
1014 }
1015
1016 void
intel_perf_query_result_read_frequencies(struct intel_perf_query_result * result,const struct intel_device_info * devinfo,const uint32_t * start,const uint32_t * end)1017 intel_perf_query_result_read_frequencies(struct intel_perf_query_result *result,
1018 const struct intel_device_info *devinfo,
1019 const uint32_t *start,
1020 const uint32_t *end)
1021 {
1022 /* Slice/Unslice frequency is only available in the OA reports when the
1023 * "Disable OA reports due to clock ratio change" field in
1024 * OA_DEBUG_REGISTER is set to 1. This is how the kernel programs this
1025 * global register (see drivers/gpu/drm/i915/i915_perf.c)
1026 *
1027 * Documentation says this should be available on Gfx9+ but experimentation
1028 * shows that Gfx8 reports similar values, so we enable it there too.
1029 */
1030 if (devinfo->ver < 8)
1031 return;
1032
1033 gfx8_read_report_clock_ratios(start,
1034 &result->slice_frequency[0],
1035 &result->unslice_frequency[0]);
1036 gfx8_read_report_clock_ratios(end,
1037 &result->slice_frequency[1],
1038 &result->unslice_frequency[1]);
1039 }
1040
1041 static inline bool
can_use_mi_rpc_bc_counters(const struct intel_device_info * devinfo)1042 can_use_mi_rpc_bc_counters(const struct intel_device_info *devinfo)
1043 {
1044 return devinfo->ver <= 11;
1045 }
1046
1047 uint64_t
intel_perf_report_timestamp(const struct intel_perf_query_info * query,const struct intel_device_info * devinfo,const uint32_t * report)1048 intel_perf_report_timestamp(const struct intel_perf_query_info *query,
1049 const struct intel_device_info *devinfo,
1050 const uint32_t *report)
1051 {
1052 if (query->perf->devinfo->verx10 >= 200) {
1053 uint64_t data_u64 = *((const uint64_t *)&report[2]);
1054 return data_u64 >> query->perf->oa_timestamp_shift;
1055 }
1056
1057 return report[1] >> query->perf->oa_timestamp_shift;
1058 }
1059
1060 void
intel_perf_query_result_accumulate(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const uint32_t * start,const uint32_t * end)1061 intel_perf_query_result_accumulate(struct intel_perf_query_result *result,
1062 const struct intel_perf_query_info *query,
1063 const uint32_t *start,
1064 const uint32_t *end)
1065 {
1066 const struct intel_device_info *devinfo = query->perf->devinfo;
1067 int i;
1068
1069 if (query->perf->devinfo->verx10 >= 200) {
1070 if (result->hw_id == INTEL_PERF_INVALID_CTX_ID &&
1071 start[4] != INTEL_PERF_INVALID_CTX_ID)
1072 result->hw_id = start[4];
1073 } else {
1074 if (result->hw_id == INTEL_PERF_INVALID_CTX_ID &&
1075 start[2] != INTEL_PERF_INVALID_CTX_ID)
1076 result->hw_id = start[2];
1077 }
1078
1079 if (result->reports_accumulated == 0)
1080 result->begin_timestamp = intel_perf_report_timestamp(query, devinfo, start);
1081 result->end_timestamp = intel_perf_report_timestamp(query, devinfo, end);
1082 result->reports_accumulated++;
1083
1084 /* oa format handling needs to match with platform version returned in
1085 * intel_perf_get_oa_format()
1086 */
1087 assert(intel_perf_get_oa_format(query->perf) == query->oa_format);
1088 if (query->perf->devinfo->verx10 >= 200) {
1089 /* PEC64u64 */
1090 result->accumulator[query->gpu_time_offset] =
1091 intel_perf_report_timestamp(query, devinfo, end) -
1092 intel_perf_report_timestamp(query, devinfo, start);
1093 accumulate_uint64(start + 6, end + 6, &result->accumulator[query->gpu_clock_offset]);
1094
1095 for (i = 0; i < 64; i++)
1096 accumulate_uint64(start + 8 + (2 * i), end + 8 + (2 * i),
1097 &result->accumulator[query->pec_offset + i]);
1098 } else if (query->perf->devinfo->verx10 >= 125) {
1099 /* I915_OA_FORMAT_A24u40_A14u32_B8_C8 */
1100 result->accumulator[query->gpu_time_offset] =
1101 intel_perf_report_timestamp(query, devinfo, end) -
1102 intel_perf_report_timestamp(query, devinfo, start);
1103
1104 accumulate_uint32(start + 3, end + 3,
1105 result->accumulator + query->gpu_clock_offset); /* clock */
1106
1107 /* A0-A3 counters are 32bits */
1108 for (i = 0; i < 4; i++) {
1109 accumulate_uint32(start + 4 + i, end + 4 + i,
1110 result->accumulator + query->a_offset + i);
1111 }
1112
1113 /* A4-A23 counters are 40bits */
1114 for (i = 4; i < 24; i++) {
1115 accumulate_uint40(i, start, end,
1116 result->accumulator + query->a_offset + i);
1117 }
1118
1119 /* A24-27 counters are 32bits */
1120 for (i = 0; i < 4; i++) {
1121 accumulate_uint32(start + 28 + i, end + 28 + i,
1122 result->accumulator + query->a_offset + 24 + i);
1123 }
1124
1125 /* A28-31 counters are 40bits */
1126 for (i = 28; i < 32; i++) {
1127 accumulate_uint40(i, start, end,
1128 result->accumulator + query->a_offset + i);
1129 }
1130
1131 /* A32-35 counters are 32bits */
1132 for (i = 0; i < 4; i++) {
1133 accumulate_uint32(start + 36 + i, end + 36 + i,
1134 result->accumulator + query->a_offset + 32 + i);
1135 }
1136
1137 if (can_use_mi_rpc_bc_counters(query->perf->devinfo) ||
1138 !query->perf->sys_vars.query_mode) {
1139 /* A36-37 counters are 32bits */
1140 accumulate_uint32(start + 40, end + 40,
1141 result->accumulator + query->a_offset + 36);
1142 accumulate_uint32(start + 46, end + 46,
1143 result->accumulator + query->a_offset + 37);
1144
1145 /* 8x 32bit B counters */
1146 for (i = 0; i < 8; i++) {
1147 accumulate_uint32(start + 48 + i, end + 48 + i,
1148 result->accumulator + query->b_offset + i);
1149 }
1150
1151 /* 8x 32bit C counters... */
1152 for (i = 0; i < 8; i++) {
1153 accumulate_uint32(start + 56 + i, end + 56 + i,
1154 result->accumulator + query->c_offset + i);
1155 }
1156 }
1157 } else if (query->perf->devinfo->verx10 >= 120) {
1158 /* I915_OA_FORMAT_A32u40_A4u32_B8_C8 */
1159 result->accumulator[query->gpu_time_offset] =
1160 intel_perf_report_timestamp(query, devinfo, end) -
1161 intel_perf_report_timestamp(query, devinfo, start);
1162
1163 accumulate_uint32(start + 3, end + 3,
1164 result->accumulator + query->gpu_clock_offset); /* clock */
1165
1166 /* 32x 40bit A counters... */
1167 for (i = 0; i < 32; i++) {
1168 accumulate_uint40(i, start, end,
1169 result->accumulator + query->a_offset + i);
1170 }
1171
1172 /* 4x 32bit A counters... */
1173 for (i = 0; i < 4; i++) {
1174 accumulate_uint32(start + 36 + i, end + 36 + i,
1175 result->accumulator + query->a_offset + 32 + i);
1176 }
1177
1178 if (can_use_mi_rpc_bc_counters(query->perf->devinfo) ||
1179 !query->perf->sys_vars.query_mode) {
1180 /* 8x 32bit B counters */
1181 for (i = 0; i < 8; i++) {
1182 accumulate_uint32(start + 48 + i, end + 48 + i,
1183 result->accumulator + query->b_offset + i);
1184 }
1185
1186 /* 8x 32bit C counters... */
1187 for (i = 0; i < 8; i++) {
1188 accumulate_uint32(start + 56 + i, end + 56 + i,
1189 result->accumulator + query->c_offset + i);
1190 }
1191 }
1192 } else {
1193 /* I915_OA_FORMAT_A24u40_A14u32_B8_C8 */
1194 result->accumulator[query->gpu_time_offset] =
1195 intel_perf_report_timestamp(query, devinfo, end) -
1196 intel_perf_report_timestamp(query, devinfo, start);
1197
1198 for (i = 0; i < 61; i++) {
1199 accumulate_uint32(start + 3 + i, end + 3 + i,
1200 result->accumulator + query->a_offset + i);
1201 }
1202 }
1203 }
1204
1205 #define GET_FIELD(word, field) (((word) & field ## _MASK) >> field ## _SHIFT)
1206
1207 void
intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result * result,const struct intel_device_info * devinfo,const uint32_t start,const uint32_t end)1208 intel_perf_query_result_read_gt_frequency(struct intel_perf_query_result *result,
1209 const struct intel_device_info *devinfo,
1210 const uint32_t start,
1211 const uint32_t end)
1212 {
1213 switch (devinfo->ver) {
1214 case 7:
1215 case 8:
1216 result->gt_frequency[0] = GET_FIELD(start, GFX7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1217 result->gt_frequency[1] = GET_FIELD(end, GFX7_RPSTAT1_CURR_GT_FREQ) * 50ULL;
1218 break;
1219 case 9:
1220 case 11:
1221 case 12:
1222 case 20:
1223 result->gt_frequency[0] = GET_FIELD(start, GFX9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1224 result->gt_frequency[1] = GET_FIELD(end, GFX9_RPSTAT0_CURR_GT_FREQ) * 50ULL / 3ULL;
1225 break;
1226 default:
1227 unreachable("unexpected gen");
1228 }
1229
1230 /* Put the numbers into Hz. */
1231 result->gt_frequency[0] *= 1000000ULL;
1232 result->gt_frequency[1] *= 1000000ULL;
1233 }
1234
1235 void
intel_perf_query_result_read_perfcnts(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const uint64_t * start,const uint64_t * end)1236 intel_perf_query_result_read_perfcnts(struct intel_perf_query_result *result,
1237 const struct intel_perf_query_info *query,
1238 const uint64_t *start,
1239 const uint64_t *end)
1240 {
1241 for (uint32_t i = 0; i < 2; i++) {
1242 uint64_t v0 = start[i] & PERF_CNT_VALUE_MASK;
1243 uint64_t v1 = end[i] & PERF_CNT_VALUE_MASK;
1244
1245 result->accumulator[query->perfcnt_offset + i] = v0 > v1 ?
1246 (PERF_CNT_VALUE_MASK + 1 + v1 - v0) :
1247 (v1 - v0);
1248 }
1249 }
1250
1251 static uint32_t
query_accumulator_offset(const struct intel_perf_query_info * query,enum intel_perf_query_field_type type,uint8_t index)1252 query_accumulator_offset(const struct intel_perf_query_info *query,
1253 enum intel_perf_query_field_type type,
1254 uint8_t index)
1255 {
1256 switch (type) {
1257 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT:
1258 return query->perfcnt_offset + index;
1259 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1260 return query->a_offset + index;
1261 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1262 return query->b_offset + index;
1263 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1264 return query->c_offset + index;
1265 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC:
1266 return query->pec_offset + index;
1267 default:
1268 unreachable("Invalid register type");
1269 return 0;
1270 }
1271 }
1272
1273 void
intel_perf_query_result_accumulate_fields(struct intel_perf_query_result * result,const struct intel_perf_query_info * query,const void * start,const void * end,bool no_oa_accumulate)1274 intel_perf_query_result_accumulate_fields(struct intel_perf_query_result *result,
1275 const struct intel_perf_query_info *query,
1276 const void *start,
1277 const void *end,
1278 bool no_oa_accumulate)
1279 {
1280 const struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
1281 const struct intel_device_info *devinfo = query->perf->devinfo;
1282
1283 for (uint32_t r = 0; r < layout->n_fields; r++) {
1284 const struct intel_perf_query_field *field = &layout->fields[r];
1285
1286 if (field->type == INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC) {
1287 intel_perf_query_result_read_frequencies(result, devinfo,
1288 start + field->location,
1289 end + field->location);
1290 /* no_oa_accumulate=true is used when doing GL perf queries, we
1291 * manually parse the OA reports from the OA buffer and subtract
1292 * unrelated deltas, so don't accumulate the begin/end reports here.
1293 */
1294 if (!no_oa_accumulate) {
1295 intel_perf_query_result_accumulate(result, query,
1296 start + field->location,
1297 end + field->location);
1298 }
1299 } else {
1300 uint64_t v0, v1;
1301
1302 if (field->size == 4) {
1303 v0 = *(const uint32_t *)(start + field->location);
1304 v1 = *(const uint32_t *)(end + field->location);
1305 } else {
1306 assert(field->size == 8);
1307 v0 = *(const uint64_t *)(start + field->location);
1308 v1 = *(const uint64_t *)(end + field->location);
1309 }
1310
1311 if (field->mask) {
1312 v0 = field->mask & v0;
1313 v1 = field->mask & v1;
1314 }
1315
1316 /* RPSTAT is a bit of a special case because its begin/end values
1317 * represent frequencies. We store it in a separate location.
1318 */
1319 if (field->type == INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT)
1320 intel_perf_query_result_read_gt_frequency(result, devinfo, v0, v1);
1321 else
1322 result->accumulator[query_accumulator_offset(query, field->type, field->index)] = v1 - v0;
1323 }
1324 }
1325 }
1326
1327 void
intel_perf_query_result_clear(struct intel_perf_query_result * result)1328 intel_perf_query_result_clear(struct intel_perf_query_result *result)
1329 {
1330 memset(result, 0, sizeof(*result));
1331 result->hw_id = INTEL_PERF_INVALID_CTX_ID;
1332 }
1333
1334 void
intel_perf_query_result_print_fields(const struct intel_perf_query_info * query,const void * data)1335 intel_perf_query_result_print_fields(const struct intel_perf_query_info *query,
1336 const void *data)
1337 {
1338 const struct intel_perf_query_field_layout *layout = &query->perf->query_layout;
1339
1340 for (uint32_t r = 0; r < layout->n_fields; r++) {
1341 const struct intel_perf_query_field *field = &layout->fields[r];
1342 const uint32_t *value32 = data + field->location;
1343
1344 switch (field->type) {
1345 case INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC:
1346 fprintf(stderr, "MI_RPC:\n");
1347 fprintf(stderr, " TS: 0x%08x\n", *(value32 + 1));
1348 fprintf(stderr, " CLK: 0x%08x\n", *(value32 + 3));
1349 break;
1350 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A:
1351 fprintf(stderr, "A%u: 0x%08x\n", field->index, *value32);
1352 break;
1353 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B:
1354 fprintf(stderr, "B%u: 0x%08x\n", field->index, *value32);
1355 break;
1356 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C:
1357 fprintf(stderr, "C%u: 0x%08x\n", field->index, *value32);
1358 break;
1359 case INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_PEC: {
1360 const uint64_t *value64 = data + field->location;
1361 fprintf(stderr, "PEC%u: 0x%" PRIx64 "\n", field->index, *value64);
1362 break;
1363 }
1364 default:
1365 break;
1366 }
1367 }
1368 }
1369
1370 static int
intel_perf_compare_query_names(const void * v1,const void * v2)1371 intel_perf_compare_query_names(const void *v1, const void *v2)
1372 {
1373 const struct intel_perf_query_info *q1 = v1;
1374 const struct intel_perf_query_info *q2 = v2;
1375
1376 return strcmp(q1->name, q2->name);
1377 }
1378
1379 /* Xe2: (64 x PEC) + SRM_RPSTAT + MI_RPC */
1380 #define MAX_QUERY_FIELDS(devinfo) (devinfo->verx10 >= 200 ? (64 + 2) : (5 + 16))
1381
1382 static inline struct intel_perf_query_field *
add_query_register(struct intel_perf_config * perf_cfg,enum intel_perf_query_field_type type,uint32_t offset,uint16_t size,uint8_t index)1383 add_query_register(struct intel_perf_config *perf_cfg,
1384 enum intel_perf_query_field_type type,
1385 uint32_t offset,
1386 uint16_t size,
1387 uint8_t index)
1388 {
1389 struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
1390
1391 /* Align MI_RPC to 64bytes (HW requirement) & 64bit registers to 8bytes
1392 * (shows up nicely in the debugger).
1393 */
1394 if (type == INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC)
1395 layout->size = align(layout->size, 64);
1396 else if (size % 8 == 0)
1397 layout->size = align(layout->size, 8);
1398
1399 assert(layout->n_fields < MAX_QUERY_FIELDS(perf_cfg->devinfo));
1400 layout->fields[layout->n_fields++] = (struct intel_perf_query_field) {
1401 .mmio_offset = offset,
1402 .location = layout->size,
1403 .type = type,
1404 .index = index,
1405 .size = size,
1406 };
1407 layout->size += size;
1408
1409 return &layout->fields[layout->n_fields - 1];
1410 }
1411
1412 static void
intel_perf_init_query_fields(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo,bool use_register_snapshots)1413 intel_perf_init_query_fields(struct intel_perf_config *perf_cfg,
1414 const struct intel_device_info *devinfo,
1415 bool use_register_snapshots)
1416 {
1417 struct intel_perf_query_field_layout *layout = &perf_cfg->query_layout;
1418
1419 layout->n_fields = 0;
1420
1421 /* MI_RPC requires a 64byte alignment. */
1422 layout->alignment = 64;
1423
1424 layout->fields = rzalloc_array(perf_cfg, struct intel_perf_query_field,
1425 MAX_QUERY_FIELDS(devinfo));
1426
1427 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_MI_RPC,
1428 0, perf_cfg->oa_sample_size, 0);
1429
1430 if (use_register_snapshots) {
1431 if (devinfo->ver <= 11) {
1432 struct intel_perf_query_field *field =
1433 add_query_register(perf_cfg,
1434 INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
1435 PERF_CNT_1_DW0, 8, 0);
1436 field->mask = PERF_CNT_VALUE_MASK;
1437
1438 field = add_query_register(perf_cfg,
1439 INTEL_PERF_QUERY_FIELD_TYPE_SRM_PERFCNT,
1440 PERF_CNT_2_DW0, 8, 1);
1441 field->mask = PERF_CNT_VALUE_MASK;
1442 }
1443
1444 if (devinfo->ver == 8 && devinfo->platform != INTEL_PLATFORM_CHV) {
1445 add_query_register(perf_cfg,
1446 INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
1447 GFX7_RPSTAT1, 4, 0);
1448 }
1449
1450 if (devinfo->ver >= 9) {
1451 add_query_register(perf_cfg,
1452 INTEL_PERF_QUERY_FIELD_TYPE_SRM_RPSTAT,
1453 GFX9_RPSTAT0, 4, 0);
1454 }
1455
1456 if (!can_use_mi_rpc_bc_counters(devinfo)) {
1457 if (devinfo->ver >= 8 && devinfo->ver <= 11) {
1458 for (uint32_t i = 0; i < GFX8_N_OA_PERF_B32; i++) {
1459 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1460 GFX8_OA_PERF_B32(i), 4, i);
1461 }
1462 for (uint32_t i = 0; i < GFX8_N_OA_PERF_C32; i++) {
1463 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1464 GFX8_OA_PERF_C32(i), 4, i);
1465 }
1466 } else if (devinfo->verx10 == 120) {
1467 for (uint32_t i = 0; i < GFX12_N_OAG_PERF_B32; i++) {
1468 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1469 GFX12_OAG_PERF_B32(i), 4, i);
1470 }
1471 for (uint32_t i = 0; i < GFX12_N_OAG_PERF_C32; i++) {
1472 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1473 GFX12_OAG_PERF_C32(i), 4, i);
1474 }
1475 } else if (devinfo->verx10 == 125) {
1476 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
1477 GFX125_OAG_PERF_A36, 4, 36);
1478 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_A,
1479 GFX125_OAG_PERF_A37, 4, 37);
1480 for (uint32_t i = 0; i < GFX12_N_OAG_PERF_B32; i++) {
1481 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_B,
1482 GFX12_OAG_PERF_B32(i), 4, i);
1483 }
1484 for (uint32_t i = 0; i < GFX12_N_OAG_PERF_C32; i++) {
1485 add_query_register(perf_cfg, INTEL_PERF_QUERY_FIELD_TYPE_SRM_OA_C,
1486 GFX12_OAG_PERF_C32(i), 4, i);
1487 }
1488 }
1489 }
1490 }
1491
1492 /* Align the whole package to 64bytes so that 2 snapshots can be put
1493 * together without extract alignment for the user.
1494 */
1495 layout->size = align(layout->size, 64);
1496 }
1497
1498 static size_t
intel_perf_get_oa_format_size(const struct intel_device_info * devinfo)1499 intel_perf_get_oa_format_size(const struct intel_device_info *devinfo)
1500 {
1501 if (devinfo->verx10 >= 200)
1502 return 576;
1503
1504 return 256;
1505 }
1506
1507 void
intel_perf_init_metrics(struct intel_perf_config * perf_cfg,const struct intel_device_info * devinfo,int drm_fd,bool include_pipeline_statistics,bool use_register_snapshots)1508 intel_perf_init_metrics(struct intel_perf_config *perf_cfg,
1509 const struct intel_device_info *devinfo,
1510 int drm_fd,
1511 bool include_pipeline_statistics,
1512 bool use_register_snapshots)
1513 {
1514 perf_cfg->devinfo = devinfo;
1515 perf_cfg->oa_sample_size = intel_perf_get_oa_format_size(devinfo);
1516
1517 intel_perf_init_query_fields(perf_cfg, devinfo, use_register_snapshots);
1518
1519 if (include_pipeline_statistics) {
1520 load_pipeline_statistic_metrics(perf_cfg, devinfo);
1521 intel_perf_register_mdapi_statistic_query(perf_cfg, devinfo);
1522 }
1523
1524 bool oa_metrics = oa_metrics_available(perf_cfg, drm_fd, devinfo,
1525 use_register_snapshots);
1526 if (oa_metrics)
1527 load_oa_metrics(perf_cfg, drm_fd, devinfo);
1528
1529 /* sort query groups by name */
1530 qsort(perf_cfg->queries, perf_cfg->n_queries,
1531 sizeof(perf_cfg->queries[0]), intel_perf_compare_query_names);
1532
1533 build_unique_counter_list(perf_cfg);
1534
1535 if (oa_metrics)
1536 intel_perf_register_mdapi_oa_query(perf_cfg, devinfo);
1537 }
1538
1539 void
intel_perf_free(struct intel_perf_config * perf_cfg)1540 intel_perf_free(struct intel_perf_config *perf_cfg)
1541 {
1542 ralloc_free(perf_cfg);
1543 }
1544
1545 uint64_t
intel_perf_get_oa_format(struct intel_perf_config * perf_cfg)1546 intel_perf_get_oa_format(struct intel_perf_config *perf_cfg)
1547 {
1548 switch (perf_cfg->devinfo->kmd_type) {
1549 case INTEL_KMD_TYPE_I915:
1550 return i915_perf_get_oa_format(perf_cfg);
1551 case INTEL_KMD_TYPE_XE:
1552 return xe_perf_get_oa_format(perf_cfg);
1553 default:
1554 unreachable("missing");
1555 return 0;
1556 }
1557 }
1558
1559 int
intel_perf_stream_open(struct intel_perf_config * perf_config,int drm_fd,uint32_t ctx_id,uint64_t metrics_set_id,uint64_t period_exponent,bool hold_preemption,bool enable)1560 intel_perf_stream_open(struct intel_perf_config *perf_config, int drm_fd,
1561 uint32_t ctx_id, uint64_t metrics_set_id,
1562 uint64_t period_exponent, bool hold_preemption,
1563 bool enable)
1564 {
1565 uint64_t report_format = intel_perf_get_oa_format(perf_config);
1566
1567 switch (perf_config->devinfo->kmd_type) {
1568 case INTEL_KMD_TYPE_I915:
1569 return i915_perf_stream_open(perf_config, drm_fd, ctx_id, metrics_set_id,
1570 report_format, period_exponent,
1571 hold_preemption, enable);
1572 case INTEL_KMD_TYPE_XE:
1573 return xe_perf_stream_open(perf_config, drm_fd, ctx_id, metrics_set_id,
1574 report_format, period_exponent,
1575 hold_preemption, enable);
1576 default:
1577 unreachable("missing");
1578 return 0;
1579 }
1580 }
1581
1582 /*
1583 * Read perf stream samples.
1584 *
1585 * buffer will be filled with multiple struct intel_perf_record_header + data.
1586 *
1587 * Returns 0 if no sample is available, -errno value if a error happened or
1588 * the number of bytes read on success.
1589 */
1590 int
intel_perf_stream_read_samples(struct intel_perf_config * perf_config,int perf_stream_fd,uint8_t * buffer,size_t buffer_len)1591 intel_perf_stream_read_samples(struct intel_perf_config *perf_config,
1592 int perf_stream_fd, uint8_t *buffer,
1593 size_t buffer_len)
1594 {
1595 switch (perf_config->devinfo->kmd_type) {
1596 case INTEL_KMD_TYPE_I915:
1597 return i915_perf_stream_read_samples(perf_config, perf_stream_fd, buffer, buffer_len);
1598 case INTEL_KMD_TYPE_XE:
1599 return xe_perf_stream_read_samples(perf_config, perf_stream_fd, buffer, buffer_len);
1600 default:
1601 unreachable("missing");
1602 return -1;
1603 }
1604 }
1605
1606 int
intel_perf_stream_set_state(struct intel_perf_config * perf_config,int perf_stream_fd,bool enable)1607 intel_perf_stream_set_state(struct intel_perf_config *perf_config,
1608 int perf_stream_fd, bool enable)
1609 {
1610 switch (perf_config->devinfo->kmd_type) {
1611 case INTEL_KMD_TYPE_I915:
1612 return i915_perf_stream_set_state(perf_stream_fd, enable);
1613 case INTEL_KMD_TYPE_XE:
1614 return xe_perf_stream_set_state(perf_stream_fd, enable);
1615 default:
1616 unreachable("missing");
1617 return -1;
1618 }
1619 }
1620
1621 int
intel_perf_stream_set_metrics_id(struct intel_perf_config * perf_config,int perf_stream_fd,uint64_t metrics_set_id)1622 intel_perf_stream_set_metrics_id(struct intel_perf_config *perf_config,
1623 int perf_stream_fd, uint64_t metrics_set_id)
1624 {
1625 switch (perf_config->devinfo->kmd_type) {
1626 case INTEL_KMD_TYPE_I915:
1627 return i915_perf_stream_set_metrics_id(perf_stream_fd, metrics_set_id);
1628 case INTEL_KMD_TYPE_XE:
1629 return xe_perf_stream_set_metrics_id(perf_stream_fd, metrics_set_id);
1630 default:
1631 unreachable("missing");
1632 return -1;
1633 }
1634 }
1635