1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon SoC CPA(Coherency Protocol Agent) hardware event counters support
4 *
5 * Copyright (C) 2022 HiSilicon Limited
6 * Author: Qi Liu <[email protected]>
7 *
8 * This code is based on the uncore PMUs like arm-cci and arm-ccn.
9 */
10
11 #define pr_fmt(fmt) "cpa pmu: " fmt
12 #include <linux/acpi.h>
13 #include <linux/bug.h>
14 #include <linux/cpuhotplug.h>
15 #include <linux/interrupt.h>
16 #include <linux/irq.h>
17 #include <linux/list.h>
18 #include <linux/smp.h>
19
20 #include "hisi_uncore_pmu.h"
21
22 /* CPA register definition */
23 #define CPA_PERF_CTRL 0x1c00
24 #define CPA_EVENT_CTRL 0x1c04
25 #define CPA_INT_MASK 0x1c70
26 #define CPA_INT_STATUS 0x1c78
27 #define CPA_INT_CLEAR 0x1c7c
28 #define CPA_EVENT_TYPE0 0x1c80
29 #define CPA_VERSION 0x1cf0
30 #define CPA_CNT0_LOWER 0x1d00
31 #define CPA_CFG_REG 0x0534
32
33 /* CPA operation command */
34 #define CPA_PERF_CTRL_EN BIT_ULL(0)
35 #define CPA_EVTYPE_MASK 0xffUL
36 #define CPA_PM_CTRL BIT_ULL(9)
37
38 /* CPA has 8-counters */
39 #define CPA_NR_COUNTERS 0x8
40 #define CPA_COUNTER_BITS 64
41 #define CPA_NR_EVENTS 0xff
42 #define CPA_REG_OFFSET 0x8
43
hisi_cpa_pmu_get_counter_offset(int idx)44 static u32 hisi_cpa_pmu_get_counter_offset(int idx)
45 {
46 return (CPA_CNT0_LOWER + idx * CPA_REG_OFFSET);
47 }
48
hisi_cpa_pmu_read_counter(struct hisi_pmu * cpa_pmu,struct hw_perf_event * hwc)49 static u64 hisi_cpa_pmu_read_counter(struct hisi_pmu *cpa_pmu,
50 struct hw_perf_event *hwc)
51 {
52 return readq(cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
53 }
54
hisi_cpa_pmu_write_counter(struct hisi_pmu * cpa_pmu,struct hw_perf_event * hwc,u64 val)55 static void hisi_cpa_pmu_write_counter(struct hisi_pmu *cpa_pmu,
56 struct hw_perf_event *hwc, u64 val)
57 {
58 writeq(val, cpa_pmu->base + hisi_cpa_pmu_get_counter_offset(hwc->idx));
59 }
60
hisi_cpa_pmu_write_evtype(struct hisi_pmu * cpa_pmu,int idx,u32 type)61 static void hisi_cpa_pmu_write_evtype(struct hisi_pmu *cpa_pmu, int idx,
62 u32 type)
63 {
64 u32 reg, reg_idx, shift, val;
65
66 /*
67 * Select the appropriate event select register(CPA_EVENT_TYPE0/1).
68 * There are 2 event select registers for the 8 hardware counters.
69 * Event code is 8-bits and for the former 4 hardware counters,
70 * CPA_EVENT_TYPE0 is chosen. For the latter 4 hardware counters,
71 * CPA_EVENT_TYPE1 is chosen.
72 */
73 reg = CPA_EVENT_TYPE0 + (idx / 4) * 4;
74 reg_idx = idx % 4;
75 shift = CPA_REG_OFFSET * reg_idx;
76
77 /* Write event code to CPA_EVENT_TYPEx Register */
78 val = readl(cpa_pmu->base + reg);
79 val &= ~(CPA_EVTYPE_MASK << shift);
80 val |= type << shift;
81 writel(val, cpa_pmu->base + reg);
82 }
83
hisi_cpa_pmu_start_counters(struct hisi_pmu * cpa_pmu)84 static void hisi_cpa_pmu_start_counters(struct hisi_pmu *cpa_pmu)
85 {
86 u32 val;
87
88 val = readl(cpa_pmu->base + CPA_PERF_CTRL);
89 val |= CPA_PERF_CTRL_EN;
90 writel(val, cpa_pmu->base + CPA_PERF_CTRL);
91 }
92
hisi_cpa_pmu_stop_counters(struct hisi_pmu * cpa_pmu)93 static void hisi_cpa_pmu_stop_counters(struct hisi_pmu *cpa_pmu)
94 {
95 u32 val;
96
97 val = readl(cpa_pmu->base + CPA_PERF_CTRL);
98 val &= ~(CPA_PERF_CTRL_EN);
99 writel(val, cpa_pmu->base + CPA_PERF_CTRL);
100 }
101
hisi_cpa_pmu_disable_pm(struct hisi_pmu * cpa_pmu)102 static void hisi_cpa_pmu_disable_pm(struct hisi_pmu *cpa_pmu)
103 {
104 u32 val;
105
106 val = readl(cpa_pmu->base + CPA_CFG_REG);
107 val |= CPA_PM_CTRL;
108 writel(val, cpa_pmu->base + CPA_CFG_REG);
109 }
110
hisi_cpa_pmu_enable_pm(struct hisi_pmu * cpa_pmu)111 static void hisi_cpa_pmu_enable_pm(struct hisi_pmu *cpa_pmu)
112 {
113 u32 val;
114
115 val = readl(cpa_pmu->base + CPA_CFG_REG);
116 val &= ~(CPA_PM_CTRL);
117 writel(val, cpa_pmu->base + CPA_CFG_REG);
118 }
119
hisi_cpa_pmu_enable_counter(struct hisi_pmu * cpa_pmu,struct hw_perf_event * hwc)120 static void hisi_cpa_pmu_enable_counter(struct hisi_pmu *cpa_pmu,
121 struct hw_perf_event *hwc)
122 {
123 u32 val;
124
125 /* Enable counter index in CPA_EVENT_CTRL register */
126 val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
127 val |= 1 << hwc->idx;
128 writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
129 }
130
hisi_cpa_pmu_disable_counter(struct hisi_pmu * cpa_pmu,struct hw_perf_event * hwc)131 static void hisi_cpa_pmu_disable_counter(struct hisi_pmu *cpa_pmu,
132 struct hw_perf_event *hwc)
133 {
134 u32 val;
135
136 /* Clear counter index in CPA_EVENT_CTRL register */
137 val = readl(cpa_pmu->base + CPA_EVENT_CTRL);
138 val &= ~(1UL << hwc->idx);
139 writel(val, cpa_pmu->base + CPA_EVENT_CTRL);
140 }
141
hisi_cpa_pmu_enable_counter_int(struct hisi_pmu * cpa_pmu,struct hw_perf_event * hwc)142 static void hisi_cpa_pmu_enable_counter_int(struct hisi_pmu *cpa_pmu,
143 struct hw_perf_event *hwc)
144 {
145 u32 val;
146
147 /* Write 0 to enable interrupt */
148 val = readl(cpa_pmu->base + CPA_INT_MASK);
149 val &= ~(1UL << hwc->idx);
150 writel(val, cpa_pmu->base + CPA_INT_MASK);
151 }
152
hisi_cpa_pmu_disable_counter_int(struct hisi_pmu * cpa_pmu,struct hw_perf_event * hwc)153 static void hisi_cpa_pmu_disable_counter_int(struct hisi_pmu *cpa_pmu,
154 struct hw_perf_event *hwc)
155 {
156 u32 val;
157
158 /* Write 1 to mask interrupt */
159 val = readl(cpa_pmu->base + CPA_INT_MASK);
160 val |= 1 << hwc->idx;
161 writel(val, cpa_pmu->base + CPA_INT_MASK);
162 }
163
hisi_cpa_pmu_get_int_status(struct hisi_pmu * cpa_pmu)164 static u32 hisi_cpa_pmu_get_int_status(struct hisi_pmu *cpa_pmu)
165 {
166 return readl(cpa_pmu->base + CPA_INT_STATUS);
167 }
168
hisi_cpa_pmu_clear_int_status(struct hisi_pmu * cpa_pmu,int idx)169 static void hisi_cpa_pmu_clear_int_status(struct hisi_pmu *cpa_pmu, int idx)
170 {
171 writel(1 << idx, cpa_pmu->base + CPA_INT_CLEAR);
172 }
173
174 static const struct acpi_device_id hisi_cpa_pmu_acpi_match[] = {
175 { "HISI0281", },
176 {}
177 };
178 MODULE_DEVICE_TABLE(acpi, hisi_cpa_pmu_acpi_match);
179
hisi_cpa_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * cpa_pmu)180 static int hisi_cpa_pmu_init_data(struct platform_device *pdev,
181 struct hisi_pmu *cpa_pmu)
182 {
183 hisi_uncore_pmu_init_topology(cpa_pmu, &pdev->dev);
184
185 if (cpa_pmu->topo.sicl_id < 0) {
186 dev_err(&pdev->dev, "Can not read sicl-id\n");
187 return -EINVAL;
188 }
189
190 if (cpa_pmu->topo.index_id < 0) {
191 dev_err(&pdev->dev, "Cannot read idx-id\n");
192 return -EINVAL;
193 }
194
195 cpa_pmu->base = devm_platform_ioremap_resource(pdev, 0);
196 if (IS_ERR(cpa_pmu->base))
197 return PTR_ERR(cpa_pmu->base);
198
199 cpa_pmu->identifier = readl(cpa_pmu->base + CPA_VERSION);
200
201 return 0;
202 }
203
204 static struct attribute *hisi_cpa_pmu_format_attr[] = {
205 HISI_PMU_FORMAT_ATTR(event, "config:0-15"),
206 NULL
207 };
208
209 static const struct attribute_group hisi_cpa_pmu_format_group = {
210 .name = "format",
211 .attrs = hisi_cpa_pmu_format_attr,
212 };
213
214 static struct attribute *hisi_cpa_pmu_events_attr[] = {
215 HISI_PMU_EVENT_ATTR(cpa_cycles, 0x00),
216 HISI_PMU_EVENT_ATTR(cpa_p1_wr_dat, 0x61),
217 HISI_PMU_EVENT_ATTR(cpa_p1_rd_dat, 0x62),
218 HISI_PMU_EVENT_ATTR(cpa_p0_wr_dat, 0xE1),
219 HISI_PMU_EVENT_ATTR(cpa_p0_rd_dat, 0xE2),
220 NULL
221 };
222
223 static const struct attribute_group hisi_cpa_pmu_events_group = {
224 .name = "events",
225 .attrs = hisi_cpa_pmu_events_attr,
226 };
227
228 static const struct attribute_group *hisi_cpa_pmu_attr_groups[] = {
229 &hisi_cpa_pmu_format_group,
230 &hisi_cpa_pmu_events_group,
231 &hisi_pmu_cpumask_attr_group,
232 &hisi_pmu_identifier_group,
233 NULL
234 };
235
236 static const struct hisi_uncore_ops hisi_uncore_cpa_pmu_ops = {
237 .write_evtype = hisi_cpa_pmu_write_evtype,
238 .get_event_idx = hisi_uncore_pmu_get_event_idx,
239 .start_counters = hisi_cpa_pmu_start_counters,
240 .stop_counters = hisi_cpa_pmu_stop_counters,
241 .enable_counter = hisi_cpa_pmu_enable_counter,
242 .disable_counter = hisi_cpa_pmu_disable_counter,
243 .enable_counter_int = hisi_cpa_pmu_enable_counter_int,
244 .disable_counter_int = hisi_cpa_pmu_disable_counter_int,
245 .write_counter = hisi_cpa_pmu_write_counter,
246 .read_counter = hisi_cpa_pmu_read_counter,
247 .get_int_status = hisi_cpa_pmu_get_int_status,
248 .clear_int_status = hisi_cpa_pmu_clear_int_status,
249 };
250
hisi_cpa_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * cpa_pmu)251 static int hisi_cpa_pmu_dev_probe(struct platform_device *pdev,
252 struct hisi_pmu *cpa_pmu)
253 {
254 int ret;
255
256 ret = hisi_cpa_pmu_init_data(pdev, cpa_pmu);
257 if (ret)
258 return ret;
259
260 ret = hisi_uncore_pmu_init_irq(cpa_pmu, pdev);
261 if (ret)
262 return ret;
263
264 cpa_pmu->counter_bits = CPA_COUNTER_BITS;
265 cpa_pmu->check_event = CPA_NR_EVENTS;
266 cpa_pmu->pmu_events.attr_groups = hisi_cpa_pmu_attr_groups;
267 cpa_pmu->ops = &hisi_uncore_cpa_pmu_ops;
268 cpa_pmu->num_counters = CPA_NR_COUNTERS;
269 cpa_pmu->dev = &pdev->dev;
270 cpa_pmu->on_cpu = -1;
271
272 return 0;
273 }
274
hisi_cpa_pmu_probe(struct platform_device * pdev)275 static int hisi_cpa_pmu_probe(struct platform_device *pdev)
276 {
277 struct hisi_pmu *cpa_pmu;
278 char *name;
279 int ret;
280
281 cpa_pmu = devm_kzalloc(&pdev->dev, sizeof(*cpa_pmu), GFP_KERNEL);
282 if (!cpa_pmu)
283 return -ENOMEM;
284
285 ret = hisi_cpa_pmu_dev_probe(pdev, cpa_pmu);
286 if (ret)
287 return ret;
288
289 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sicl%d_cpa%d",
290 cpa_pmu->topo.sicl_id, cpa_pmu->topo.index_id);
291 if (!name)
292 return -ENOMEM;
293
294 hisi_pmu_init(cpa_pmu, THIS_MODULE);
295
296 /* Power Management should be disabled before using CPA PMU. */
297 hisi_cpa_pmu_disable_pm(cpa_pmu);
298 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
299 &cpa_pmu->node);
300 if (ret) {
301 dev_err(&pdev->dev, "Error %d registering hotplug\n", ret);
302 hisi_cpa_pmu_enable_pm(cpa_pmu);
303 return ret;
304 }
305
306 ret = perf_pmu_register(&cpa_pmu->pmu, name, -1);
307 if (ret) {
308 dev_err(cpa_pmu->dev, "PMU register failed\n");
309 cpuhp_state_remove_instance_nocalls(
310 CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE, &cpa_pmu->node);
311 hisi_cpa_pmu_enable_pm(cpa_pmu);
312 return ret;
313 }
314
315 platform_set_drvdata(pdev, cpa_pmu);
316 return ret;
317 }
318
hisi_cpa_pmu_remove(struct platform_device * pdev)319 static void hisi_cpa_pmu_remove(struct platform_device *pdev)
320 {
321 struct hisi_pmu *cpa_pmu = platform_get_drvdata(pdev);
322
323 perf_pmu_unregister(&cpa_pmu->pmu);
324 cpuhp_state_remove_instance_nocalls(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
325 &cpa_pmu->node);
326 hisi_cpa_pmu_enable_pm(cpa_pmu);
327 }
328
329 static struct platform_driver hisi_cpa_pmu_driver = {
330 .driver = {
331 .name = "hisi_cpa_pmu",
332 .acpi_match_table = ACPI_PTR(hisi_cpa_pmu_acpi_match),
333 .suppress_bind_attrs = true,
334 },
335 .probe = hisi_cpa_pmu_probe,
336 .remove = hisi_cpa_pmu_remove,
337 };
338
hisi_cpa_pmu_module_init(void)339 static int __init hisi_cpa_pmu_module_init(void)
340 {
341 int ret;
342
343 ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE,
344 "AP_PERF_ARM_HISI_CPA_ONLINE",
345 hisi_uncore_pmu_online_cpu,
346 hisi_uncore_pmu_offline_cpu);
347 if (ret) {
348 pr_err("setup hotplug failed: %d\n", ret);
349 return ret;
350 }
351
352 ret = platform_driver_register(&hisi_cpa_pmu_driver);
353 if (ret)
354 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
355
356 return ret;
357 }
358 module_init(hisi_cpa_pmu_module_init);
359
hisi_cpa_pmu_module_exit(void)360 static void __exit hisi_cpa_pmu_module_exit(void)
361 {
362 platform_driver_unregister(&hisi_cpa_pmu_driver);
363 cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_HISI_CPA_ONLINE);
364 }
365 module_exit(hisi_cpa_pmu_module_exit);
366
367 MODULE_IMPORT_NS("HISI_PMU");
368 MODULE_DESCRIPTION("HiSilicon SoC CPA PMU driver");
369 MODULE_LICENSE("GPL v2");
370 MODULE_AUTHOR("Qi Liu <[email protected]>");
371