Lines Matching +full:static +full:- +full:config

60 static ssize_t amdgpu_pmu_event_show(struct device *dev,  in amdgpu_pmu_event_show()
68 if (!amdgpu_pmu_attr->type) in amdgpu_pmu_event_show()
69 return sprintf(buf, "%s\n", amdgpu_pmu_attr->event_str); in amdgpu_pmu_event_show()
72 amdgpu_pmu_attr->event_str, amdgpu_pmu_attr->type); in amdgpu_pmu_event_show()
75 static LIST_HEAD(amdgpu_pmu_list);
80 const char *config; member
99 * - PMU typed
105 * - Event config typed
114 static struct amdgpu_pmu_attr amdgpu_pmu_formats[NUM_FORMATS_AMDGPU_PMU] = {
115 { .name = "event", .config = "config:0-7" },
116 { .name = "instance", .config = "config:8-15" },
117 { .name = "umask", .config = "config:16-23"},
118 { .name = "type", .config = "config:56-63"}
122 static struct amdgpu_pmu_attr vega20_events[NUM_EVENTS_VEGA20_MAX] = {
124 .config = "event=0x7,instance=0x46,umask=0x2" },
126 .config = "event=0x7,instance=0x47,umask=0x2" }
129 static struct amdgpu_pmu_type vega20_types[NUM_EVENT_TYPES_VEGA20] = {
134 static struct amdgpu_pmu_config vega20_config = {
144 static struct amdgpu_pmu_attr df_vega20_formats[NUM_FORMATS_DF_VEGA20] = {
145 { .name = "event", .config = "config:0-7" },
146 { .name = "instance", .config = "config:8-15" },
147 { .name = "umask", .config = "config:16-23"}
150 static struct amdgpu_pmu_attr df_vega20_events[NUM_EVENTS_DF_VEGA20] = {
152 .config = "event=0x7,instance=0x46,umask=0x2" },
154 .config = "event=0x7,instance=0x47,umask=0x2" },
156 .config = "event=0x7,instance=0x46,umask=0x4" },
158 .config = "event=0x7,instance=0x47,umask=0x4" },
160 .config = "event=0xb,instance=0x46,umask=0x4" },
162 .config = "event=0xb,instance=0x47,umask=0x4" },
164 .config = "event=0xb,instance=0x46,umask=0x8" },
166 .config = "event=0xb,instance=0x47,umask=0x8" }
169 static struct amdgpu_pmu_config df_vega20_config = {
179 static struct amdgpu_pmu_attr arcturus_events[NUM_EVENTS_ARCTURUS_MAX] = {
181 .config = "event=0x7,instance=0x4b,umask=0x2" },
183 .config = "event=0x7,instance=0x4c,umask=0x2" },
185 .config = "event=0x7,instance=0x4d,umask=0x2" },
187 .config = "event=0x7,instance=0x4e,umask=0x2" },
189 .config = "event=0x7,instance=0x4f,umask=0x2" },
191 .config = "event=0x7,instance=0x50,umask=0x2" }
194 static struct amdgpu_pmu_type arcturus_types[NUM_EVENT_TYPES_ARCTURUS] = {
199 static struct amdgpu_pmu_config arcturus_config = {
209 static int amdgpu_perf_event_init(struct perf_event *event) in amdgpu_perf_event_init()
211 struct hw_perf_event *hwc = &event->hw; in amdgpu_perf_event_init()
214 if (event->attr.type != event->pmu->type) in amdgpu_perf_event_init()
215 return -ENOENT; in amdgpu_perf_event_init()
217 /* update the hw_perf_event struct with config data */ in amdgpu_perf_event_init()
218 hwc->config = event->attr.config; in amdgpu_perf_event_init()
219 hwc->config_base = AMDGPU_PMU_PERF_TYPE_NONE; in amdgpu_perf_event_init()
225 static void amdgpu_perf_start(struct perf_event *event, int flags) in amdgpu_perf_start()
227 struct hw_perf_event *hwc = &event->hw; in amdgpu_perf_start()
228 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_start()
233 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) in amdgpu_perf_start()
236 if ((!pe->adev->df.funcs) || in amdgpu_perf_start()
237 (!pe->adev->df.funcs->pmc_start)) in amdgpu_perf_start()
240 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in amdgpu_perf_start()
241 hwc->state = 0; in amdgpu_perf_start()
243 switch (hwc->config_base) { in amdgpu_perf_start()
247 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, in amdgpu_perf_start()
248 hwc->config, 0 /* unused */, in amdgpu_perf_start()
253 hwc->idx = target_cntr; in amdgpu_perf_start()
256 pe->adev->df.funcs->pmc_start(pe->adev, hwc->config, in amdgpu_perf_start()
257 hwc->idx, 0); in amdgpu_perf_start()
267 static void amdgpu_perf_read(struct perf_event *event) in amdgpu_perf_read()
269 struct hw_perf_event *hwc = &event->hw; in amdgpu_perf_read()
270 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_read()
275 if ((!pe->adev->df.funcs) || in amdgpu_perf_read()
276 (!pe->adev->df.funcs->pmc_get_count)) in amdgpu_perf_read()
279 prev = local64_read(&hwc->prev_count); in amdgpu_perf_read()
281 switch (hwc->config_base) { in amdgpu_perf_read()
284 pe->adev->df.funcs->pmc_get_count(pe->adev, in amdgpu_perf_read()
285 hwc->config, hwc->idx, &count); in amdgpu_perf_read()
291 } while (!local64_try_cmpxchg(&hwc->prev_count, &prev, count)); in amdgpu_perf_read()
293 local64_add(count - prev, &event->count); in amdgpu_perf_read()
297 static void amdgpu_perf_stop(struct perf_event *event, int flags) in amdgpu_perf_stop()
299 struct hw_perf_event *hwc = &event->hw; in amdgpu_perf_stop()
300 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_stop()
304 if (hwc->state & PERF_HES_UPTODATE) in amdgpu_perf_stop()
307 if ((!pe->adev->df.funcs) || in amdgpu_perf_stop()
308 (!pe->adev->df.funcs->pmc_stop)) in amdgpu_perf_stop()
311 switch (hwc->config_base) { in amdgpu_perf_stop()
314 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, in amdgpu_perf_stop()
321 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in amdgpu_perf_stop()
322 hwc->state |= PERF_HES_STOPPED; in amdgpu_perf_stop()
324 if (hwc->state & PERF_HES_UPTODATE) in amdgpu_perf_stop()
328 hwc->state |= PERF_HES_UPTODATE; in amdgpu_perf_stop()
332 static int amdgpu_perf_add(struct perf_event *event, int flags) in amdgpu_perf_add()
334 struct hw_perf_event *hwc = &event->hw; in amdgpu_perf_add()
336 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_add()
340 if ((!pe->adev->df.funcs) || in amdgpu_perf_add()
341 (!pe->adev->df.funcs->pmc_start)) in amdgpu_perf_add()
342 return -EINVAL; in amdgpu_perf_add()
344 switch (pe->pmu_perf_type) { in amdgpu_perf_add()
346 hwc->config_base = AMDGPU_PMU_EVENT_CONFIG_TYPE_DF; in amdgpu_perf_add()
349 hwc->config_base = (hwc->config >> in amdgpu_perf_add()
355 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in amdgpu_perf_add()
357 switch (hwc->config_base) { in amdgpu_perf_add()
360 target_cntr = pe->adev->df.funcs->pmc_start(pe->adev, in amdgpu_perf_add()
361 hwc->config, 0 /* unused */, in amdgpu_perf_add()
366 hwc->idx = target_cntr; in amdgpu_perf_add()
383 static void amdgpu_perf_del(struct perf_event *event, int flags) in amdgpu_perf_del()
385 struct hw_perf_event *hwc = &event->hw; in amdgpu_perf_del()
386 struct amdgpu_pmu_entry *pe = container_of(event->pmu, in amdgpu_perf_del()
389 if ((!pe->adev->df.funcs) || in amdgpu_perf_del()
390 (!pe->adev->df.funcs->pmc_stop)) in amdgpu_perf_del()
395 switch (hwc->config_base) { in amdgpu_perf_del()
398 pe->adev->df.funcs->pmc_stop(pe->adev, hwc->config, hwc->idx, in amdgpu_perf_del()
408 static void amdgpu_pmu_create_event_attrs_by_type( in amdgpu_pmu_create_event_attrs_by_type()
421 attr_group->attrs[i] = &pmu_attr->attr.attr; in amdgpu_pmu_create_event_attrs_by_type()
422 sysfs_attr_init(&pmu_attr->attr.attr); in amdgpu_pmu_create_event_attrs_by_type()
423 pmu_attr->attr.attr.name = events[i].name; in amdgpu_pmu_create_event_attrs_by_type()
424 pmu_attr->attr.attr.mode = 0444; in amdgpu_pmu_create_event_attrs_by_type()
425 pmu_attr->attr.show = amdgpu_pmu_event_show; in amdgpu_pmu_create_event_attrs_by_type()
426 pmu_attr->event_str = events[i].config; in amdgpu_pmu_create_event_attrs_by_type()
427 pmu_attr->type = type; in amdgpu_pmu_create_event_attrs_by_type()
432 static void amdgpu_pmu_create_attrs(struct attribute_group *attr_group, in amdgpu_pmu_create_attrs()
442 static int amdgpu_pmu_alloc_pmu_attrs( in amdgpu_pmu_alloc_pmu_attrs()
447 struct amdgpu_pmu_config *config) in amdgpu_pmu_alloc_pmu_attrs() argument
449 *fmt_attr = kcalloc(config->num_formats, sizeof(**fmt_attr), in amdgpu_pmu_alloc_pmu_attrs()
453 return -ENOMEM; in amdgpu_pmu_alloc_pmu_attrs()
455 fmt_attr_group->attrs = kcalloc(config->num_formats + 1, in amdgpu_pmu_alloc_pmu_attrs()
456 sizeof(*fmt_attr_group->attrs), GFP_KERNEL); in amdgpu_pmu_alloc_pmu_attrs()
458 if (!fmt_attr_group->attrs) in amdgpu_pmu_alloc_pmu_attrs()
461 *evt_attr = kcalloc(config->num_events, sizeof(**evt_attr), GFP_KERNEL); in amdgpu_pmu_alloc_pmu_attrs()
466 evt_attr_group->attrs = kcalloc(config->num_events + 1, in amdgpu_pmu_alloc_pmu_attrs()
467 sizeof(*evt_attr_group->attrs), GFP_KERNEL); in amdgpu_pmu_alloc_pmu_attrs()
469 if (!evt_attr_group->attrs) in amdgpu_pmu_alloc_pmu_attrs()
476 kfree(fmt_attr_group->attrs); in amdgpu_pmu_alloc_pmu_attrs()
479 return -ENOMEM; in amdgpu_pmu_alloc_pmu_attrs()
483 static int init_pmu_entry_by_type_and_add(struct amdgpu_pmu_entry *pmu_entry, in init_pmu_entry_by_type_and_add()
484 struct amdgpu_pmu_config *config) in init_pmu_entry_by_type_and_add() argument
487 &pmu_entry->fmt_attr_group, in init_pmu_entry_by_type_and_add()
488 &pmu_entry->evt_attr_group, in init_pmu_entry_by_type_and_add()
494 pmu_entry->pmu = (struct pmu){ in init_pmu_entry_by_type_and_add()
504 ret = amdgpu_pmu_alloc_pmu_attrs(&pmu_entry->fmt_attr_group, in init_pmu_entry_by_type_and_add()
505 &pmu_entry->fmt_attr, in init_pmu_entry_by_type_and_add()
506 &pmu_entry->evt_attr_group, in init_pmu_entry_by_type_and_add()
507 &pmu_entry->evt_attr, in init_pmu_entry_by_type_and_add()
508 config); in init_pmu_entry_by_type_and_add()
513 amdgpu_pmu_create_attrs(&pmu_entry->fmt_attr_group, pmu_entry->fmt_attr, in init_pmu_entry_by_type_and_add()
514 config->formats, config->num_formats); in init_pmu_entry_by_type_and_add()
516 if (pmu_entry->pmu_perf_type == AMDGPU_PMU_PERF_TYPE_ALL) { in init_pmu_entry_by_type_and_add()
519 for (i = 0; i < config->num_types; i++) { in init_pmu_entry_by_type_and_add()
521 &pmu_entry->evt_attr_group, in init_pmu_entry_by_type_and_add()
522 pmu_entry->evt_attr, in init_pmu_entry_by_type_and_add()
523 config->events, in init_pmu_entry_by_type_and_add()
526 config->types[i].num_of_type, in init_pmu_entry_by_type_and_add()
527 config->types[i].type); in init_pmu_entry_by_type_and_add()
528 total_num_events += config->types[i].num_of_type; in init_pmu_entry_by_type_and_add()
531 amdgpu_pmu_create_attrs(&pmu_entry->evt_attr_group, in init_pmu_entry_by_type_and_add()
532 pmu_entry->evt_attr, in init_pmu_entry_by_type_and_add()
533 config->events, config->num_events); in init_pmu_entry_by_type_and_add()
534 total_num_events = config->num_events; in init_pmu_entry_by_type_and_add()
537 pmu_entry->pmu.attr_groups = kmemdup(attr_groups, sizeof(attr_groups), in init_pmu_entry_by_type_and_add()
540 if (!pmu_entry->pmu.attr_groups) { in init_pmu_entry_by_type_and_add()
541 ret = -ENOMEM; in init_pmu_entry_by_type_and_add()
545 snprintf(pmu_name, PMU_NAME_SIZE, "%s_%d", pmu_entry->pmu_file_prefix, in init_pmu_entry_by_type_and_add()
546 adev_to_drm(pmu_entry->adev)->primary->index); in init_pmu_entry_by_type_and_add()
548 ret = perf_pmu_register(&pmu_entry->pmu, pmu_name, -1); in init_pmu_entry_by_type_and_add()
553 if (pmu_entry->pmu_perf_type != AMDGPU_PMU_PERF_TYPE_ALL) in init_pmu_entry_by_type_and_add()
555 pmu_entry->pmu_type_name, total_num_events); in init_pmu_entry_by_type_and_add()
560 list_add_tail(&pmu_entry->entry, &amdgpu_pmu_list); in init_pmu_entry_by_type_and_add()
564 kfree(pmu_entry->pmu.attr_groups); in init_pmu_entry_by_type_and_add()
566 kfree(pmu_entry->fmt_attr_group.attrs); in init_pmu_entry_by_type_and_add()
567 kfree(pmu_entry->fmt_attr); in init_pmu_entry_by_type_and_add()
568 kfree(pmu_entry->evt_attr_group.attrs); in init_pmu_entry_by_type_and_add()
569 kfree(pmu_entry->evt_attr); in init_pmu_entry_by_type_and_add()
572 pmu_entry->pmu_type_name); in init_pmu_entry_by_type_and_add()
582 if (pe->adev != adev) in amdgpu_pmu_fini()
584 list_del(&pe->entry); in amdgpu_pmu_fini()
585 perf_pmu_unregister(&pe->pmu); in amdgpu_pmu_fini()
586 kfree(pe->pmu.attr_groups); in amdgpu_pmu_fini()
587 kfree(pe->fmt_attr_group.attrs); in amdgpu_pmu_fini()
588 kfree(pe->fmt_attr); in amdgpu_pmu_fini()
589 kfree(pe->evt_attr_group.attrs); in amdgpu_pmu_fini()
590 kfree(pe->evt_attr); in amdgpu_pmu_fini()
595 static struct amdgpu_pmu_entry *create_pmu_entry(struct amdgpu_device *adev, in create_pmu_entry()
607 pmu_entry->adev = adev; in create_pmu_entry()
608 pmu_entry->fmt_attr_group.name = "format"; in create_pmu_entry()
609 pmu_entry->fmt_attr_group.attrs = NULL; in create_pmu_entry()
610 pmu_entry->evt_attr_group.name = "events"; in create_pmu_entry()
611 pmu_entry->evt_attr_group.attrs = NULL; in create_pmu_entry()
612 pmu_entry->pmu_perf_type = pmu_type; in create_pmu_entry()
613 pmu_entry->pmu_type_name = pmu_type_name; in create_pmu_entry()
614 pmu_entry->pmu_file_prefix = pmu_file_prefix; in create_pmu_entry()
625 switch (adev->asic_type) { in amdgpu_pmu_init()
631 return -ENOMEM; in amdgpu_pmu_init()
646 return -ENOMEM; in amdgpu_pmu_init()
663 return -ENOMEM; in amdgpu_pmu_init()
670 return -ENOMEM; in amdgpu_pmu_init()