Lines Matching full:tad_pmu

7 #define pr_fmt(fmt) "tad_pmu: " fmt
24 #define to_tad_pmu(p) (container_of(p, struct tad_pmu, pmu))
30 struct tad_pmu { struct
53 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); in tad_pmu_event_counter_read() argument
61 for (i = 0, new = 0; i < tad_pmu->region_cnt; i++) in tad_pmu_event_counter_read()
62 new += readq(tad_pmu->regions[i].base + in tad_pmu_event_counter_read()
71 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); in tad_pmu_event_counter_stop() local
79 for (i = 0; i < tad_pmu->region_cnt; i++) { in tad_pmu_event_counter_stop()
80 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_stop()
90 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); in tad_pmu_event_counter_start() local
100 for (i = 0; i < tad_pmu->region_cnt; i++) in tad_pmu_event_counter_start()
101 writeq_relaxed(0, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
107 for (i = 0; i < tad_pmu->region_cnt; i++) { in tad_pmu_event_counter_start()
109 writeq_relaxed(reg_val, tad_pmu->regions[i].base + in tad_pmu_event_counter_start()
116 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); in tad_pmu_event_counter_del() local
121 tad_pmu->events[idx] = NULL; in tad_pmu_event_counter_del()
122 clear_bit(idx, tad_pmu->counters_map); in tad_pmu_event_counter_del()
127 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); in tad_pmu_event_counter_add() local
132 idx = find_first_zero_bit(tad_pmu->counters_map, TAD_MAX_COUNTERS); in tad_pmu_event_counter_add()
136 set_bit(idx, tad_pmu->counters_map); in tad_pmu_event_counter_add()
140 tad_pmu->events[idx] = event; in tad_pmu_event_counter_add()
150 struct tad_pmu *tad_pmu = to_tad_pmu(event->pmu); in tad_pmu_event_init() local
161 event->cpu = tad_pmu->cpu; in tad_pmu_event_init()
259 struct tad_pmu *tad_pmu = to_tad_pmu(dev_get_drvdata(dev)); in tad_pmu_cpumask_show() local
261 return cpumap_print_to_pagebuf(true, buf, cpumask_of(tad_pmu->cpu)); in tad_pmu_cpumask_show()
294 struct tad_pmu *tad_pmu; in tad_pmu_probe() local
303 tad_pmu = devm_kzalloc(&pdev->dev, sizeof(*tad_pmu), GFP_KERNEL); in tad_pmu_probe()
304 if (!tad_pmu) in tad_pmu_probe()
307 platform_set_drvdata(pdev, tad_pmu); in tad_pmu_probe()
359 tad_pmu->regions = regions; in tad_pmu_probe()
360 tad_pmu->region_cnt = tad_cnt; in tad_pmu_probe()
362 tad_pmu->pmu = (struct pmu) { in tad_pmu_probe()
378 tad_pmu->pmu.attr_groups = tad_pmu_attr_groups; in tad_pmu_probe()
380 tad_pmu->pmu.attr_groups = ody_tad_pmu_attr_groups; in tad_pmu_probe()
382 tad_pmu->cpu = raw_smp_processor_id(); in tad_pmu_probe()
386 &tad_pmu->node); in tad_pmu_probe()
393 ret = perf_pmu_register(&tad_pmu->pmu, name, -1); in tad_pmu_probe()
396 &tad_pmu->node); in tad_pmu_probe()
403 struct tad_pmu *pmu = platform_get_drvdata(pdev); in tad_pmu_remove()
451 struct tad_pmu *pmu = hlist_entry_safe(node, struct tad_pmu, node); in tad_pmu_offline_cpu()