1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * HiSilicon SoC UC (unified cache) uncore Hardware event counters support
4 *
5 * Copyright (C) 2023 HiSilicon Limited
6 *
7 * This code is based on the uncore PMUs like hisi_uncore_l3c_pmu.
8 */
9 #include <linux/cpuhotplug.h>
10 #include <linux/interrupt.h>
11 #include <linux/irq.h>
12 #include <linux/list.h>
13 #include <linux/mod_devicetable.h>
14
15 #include "hisi_uncore_pmu.h"
16
17 /* Dynamic CPU hotplug state used by UC PMU */
18 static enum cpuhp_state hisi_uc_pmu_online;
19
20 /* UC register definition */
21 #define HISI_UC_INT_MASK_REG 0x0800
22 #define HISI_UC_INT_STS_REG 0x0808
23 #define HISI_UC_INT_CLEAR_REG 0x080c
24 #define HISI_UC_TRACETAG_CTRL_REG 0x1b2c
25 #define HISI_UC_TRACETAG_REQ_MSK GENMASK(9, 7)
26 #define HISI_UC_TRACETAG_MARK_EN BIT(0)
27 #define HISI_UC_TRACETAG_REQ_EN (HISI_UC_TRACETAG_MARK_EN | BIT(2))
28 #define HISI_UC_TRACETAG_SRCID_EN BIT(3)
29 #define HISI_UC_SRCID_CTRL_REG 0x1b40
30 #define HISI_UC_SRCID_MSK GENMASK(14, 1)
31 #define HISI_UC_EVENT_CTRL_REG 0x1c00
32 #define HISI_UC_EVENT_TRACETAG_EN BIT(29)
33 #define HISI_UC_EVENT_URING_MSK GENMASK(28, 27)
34 #define HISI_UC_EVENT_GLB_EN BIT(26)
35 #define HISI_UC_VERSION_REG 0x1cf0
36 #define HISI_UC_EVTYPE_REGn(n) (0x1d00 + (n) * 4)
37 #define HISI_UC_EVTYPE_MASK GENMASK(7, 0)
38 #define HISI_UC_CNTR_REGn(n) (0x1e00 + (n) * 8)
39
40 #define HISI_UC_NR_COUNTERS 0x8
41 #define HISI_UC_V2_NR_EVENTS 0xFF
42 #define HISI_UC_CNTR_REG_BITS 64
43
44 #define HISI_UC_RD_REQ_TRACETAG 0x4
45 #define HISI_UC_URING_EVENT_MIN 0x47
46 #define HISI_UC_URING_EVENT_MAX 0x59
47
48 HISI_PMU_EVENT_ATTR_EXTRACTOR(rd_req_en, config1, 0, 0);
49 HISI_PMU_EVENT_ATTR_EXTRACTOR(uring_channel, config1, 5, 4);
50 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid, config1, 19, 6);
51 HISI_PMU_EVENT_ATTR_EXTRACTOR(srcid_en, config1, 20, 20);
52
hisi_uc_pmu_check_filter(struct perf_event * event)53 static int hisi_uc_pmu_check_filter(struct perf_event *event)
54 {
55 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
56
57 if (hisi_get_srcid_en(event) && !hisi_get_rd_req_en(event)) {
58 dev_err(uc_pmu->dev,
59 "rcid_en depends on rd_req_en being enabled!\n");
60 return -EINVAL;
61 }
62
63 if (!hisi_get_uring_channel(event))
64 return 0;
65
66 if ((HISI_GET_EVENTID(event) < HISI_UC_URING_EVENT_MIN) ||
67 (HISI_GET_EVENTID(event) > HISI_UC_URING_EVENT_MAX))
68 dev_warn(uc_pmu->dev,
69 "Only events: [%#x ~ %#x] support channel filtering!",
70 HISI_UC_URING_EVENT_MIN, HISI_UC_URING_EVENT_MAX);
71
72 return 0;
73 }
74
hisi_uc_pmu_config_req_tracetag(struct perf_event * event)75 static void hisi_uc_pmu_config_req_tracetag(struct perf_event *event)
76 {
77 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
78 u32 val;
79
80 if (!hisi_get_rd_req_en(event))
81 return;
82
83 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
84
85 /* The request-type has been configured */
86 if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == HISI_UC_RD_REQ_TRACETAG)
87 return;
88
89 /* Set request-type for tracetag, only read request is supported! */
90 val &= ~HISI_UC_TRACETAG_REQ_MSK;
91 val |= FIELD_PREP(HISI_UC_TRACETAG_REQ_MSK, HISI_UC_RD_REQ_TRACETAG);
92 val |= HISI_UC_TRACETAG_REQ_EN;
93 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
94 }
95
hisi_uc_pmu_clear_req_tracetag(struct perf_event * event)96 static void hisi_uc_pmu_clear_req_tracetag(struct perf_event *event)
97 {
98 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
99 u32 val;
100
101 if (!hisi_get_rd_req_en(event))
102 return;
103
104 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
105
106 /* Do nothing, the request-type tracetag has been cleaned up */
107 if (FIELD_GET(HISI_UC_TRACETAG_REQ_MSK, val) == 0)
108 return;
109
110 /* Clear request-type */
111 val &= ~HISI_UC_TRACETAG_REQ_MSK;
112 val &= ~HISI_UC_TRACETAG_REQ_EN;
113 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
114 }
115
hisi_uc_pmu_config_srcid_tracetag(struct perf_event * event)116 static void hisi_uc_pmu_config_srcid_tracetag(struct perf_event *event)
117 {
118 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
119 u32 val;
120
121 if (!hisi_get_srcid_en(event))
122 return;
123
124 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
125
126 /* Do nothing, the source id has been configured */
127 if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val))
128 return;
129
130 /* Enable source id tracetag */
131 val |= HISI_UC_TRACETAG_SRCID_EN;
132 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
133
134 val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
135 val &= ~HISI_UC_SRCID_MSK;
136 val |= FIELD_PREP(HISI_UC_SRCID_MSK, hisi_get_srcid(event));
137 writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
138
139 /* Depend on request-type tracetag enabled */
140 hisi_uc_pmu_config_req_tracetag(event);
141 }
142
hisi_uc_pmu_clear_srcid_tracetag(struct perf_event * event)143 static void hisi_uc_pmu_clear_srcid_tracetag(struct perf_event *event)
144 {
145 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
146 u32 val;
147
148 if (!hisi_get_srcid_en(event))
149 return;
150
151 val = readl(uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
152
153 /* Do nothing, the source id has been cleaned up */
154 if (FIELD_GET(HISI_UC_TRACETAG_SRCID_EN, val) == 0)
155 return;
156
157 hisi_uc_pmu_clear_req_tracetag(event);
158
159 /* Disable source id tracetag */
160 val &= ~HISI_UC_TRACETAG_SRCID_EN;
161 writel(val, uc_pmu->base + HISI_UC_TRACETAG_CTRL_REG);
162
163 val = readl(uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
164 val &= ~HISI_UC_SRCID_MSK;
165 writel(val, uc_pmu->base + HISI_UC_SRCID_CTRL_REG);
166 }
167
hisi_uc_pmu_config_uring_channel(struct perf_event * event)168 static void hisi_uc_pmu_config_uring_channel(struct perf_event *event)
169 {
170 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
171 u32 uring_channel = hisi_get_uring_channel(event);
172 u32 val;
173
174 /* Do nothing if not being set or is set explicitly to zero (default) */
175 if (uring_channel == 0)
176 return;
177
178 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
179
180 /* Do nothing, the uring_channel has been configured */
181 if (uring_channel == FIELD_GET(HISI_UC_EVENT_URING_MSK, val))
182 return;
183
184 val &= ~HISI_UC_EVENT_URING_MSK;
185 val |= FIELD_PREP(HISI_UC_EVENT_URING_MSK, uring_channel);
186 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
187 }
188
hisi_uc_pmu_clear_uring_channel(struct perf_event * event)189 static void hisi_uc_pmu_clear_uring_channel(struct perf_event *event)
190 {
191 struct hisi_pmu *uc_pmu = to_hisi_pmu(event->pmu);
192 u32 val;
193
194 /* Do nothing if not being set or is set explicitly to zero (default) */
195 if (hisi_get_uring_channel(event) == 0)
196 return;
197
198 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
199
200 /* Do nothing, the uring_channel has been cleaned up */
201 if (FIELD_GET(HISI_UC_EVENT_URING_MSK, val) == 0)
202 return;
203
204 val &= ~HISI_UC_EVENT_URING_MSK;
205 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
206 }
207
hisi_uc_pmu_enable_filter(struct perf_event * event)208 static void hisi_uc_pmu_enable_filter(struct perf_event *event)
209 {
210 if (event->attr.config1 == 0)
211 return;
212
213 hisi_uc_pmu_config_uring_channel(event);
214 hisi_uc_pmu_config_req_tracetag(event);
215 hisi_uc_pmu_config_srcid_tracetag(event);
216 }
217
hisi_uc_pmu_disable_filter(struct perf_event * event)218 static void hisi_uc_pmu_disable_filter(struct perf_event *event)
219 {
220 if (event->attr.config1 == 0)
221 return;
222
223 hisi_uc_pmu_clear_srcid_tracetag(event);
224 hisi_uc_pmu_clear_req_tracetag(event);
225 hisi_uc_pmu_clear_uring_channel(event);
226 }
227
hisi_uc_pmu_write_evtype(struct hisi_pmu * uc_pmu,int idx,u32 type)228 static void hisi_uc_pmu_write_evtype(struct hisi_pmu *uc_pmu, int idx, u32 type)
229 {
230 u32 val;
231
232 /*
233 * Select the appropriate event select register.
234 * There are 2 32-bit event select registers for the
235 * 8 hardware counters, each event code is 8-bit wide.
236 */
237 val = readl(uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
238 val &= ~(HISI_UC_EVTYPE_MASK << HISI_PMU_EVTYPE_SHIFT(idx));
239 val |= (type << HISI_PMU_EVTYPE_SHIFT(idx));
240 writel(val, uc_pmu->base + HISI_UC_EVTYPE_REGn(idx / 4));
241 }
242
hisi_uc_pmu_start_counters(struct hisi_pmu * uc_pmu)243 static void hisi_uc_pmu_start_counters(struct hisi_pmu *uc_pmu)
244 {
245 u32 val;
246
247 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
248 val |= HISI_UC_EVENT_GLB_EN;
249 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
250 }
251
hisi_uc_pmu_stop_counters(struct hisi_pmu * uc_pmu)252 static void hisi_uc_pmu_stop_counters(struct hisi_pmu *uc_pmu)
253 {
254 u32 val;
255
256 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
257 val &= ~HISI_UC_EVENT_GLB_EN;
258 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
259 }
260
hisi_uc_pmu_enable_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)261 static void hisi_uc_pmu_enable_counter(struct hisi_pmu *uc_pmu,
262 struct hw_perf_event *hwc)
263 {
264 u32 val;
265
266 /* Enable counter index */
267 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
268 val |= (1 << hwc->idx);
269 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
270 }
271
hisi_uc_pmu_disable_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)272 static void hisi_uc_pmu_disable_counter(struct hisi_pmu *uc_pmu,
273 struct hw_perf_event *hwc)
274 {
275 u32 val;
276
277 /* Clear counter index */
278 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
279 val &= ~(1 << hwc->idx);
280 writel(val, uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
281 }
282
hisi_uc_pmu_read_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)283 static u64 hisi_uc_pmu_read_counter(struct hisi_pmu *uc_pmu,
284 struct hw_perf_event *hwc)
285 {
286 return readq(uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
287 }
288
hisi_uc_pmu_get_glb_en_state(struct hisi_pmu * uc_pmu)289 static bool hisi_uc_pmu_get_glb_en_state(struct hisi_pmu *uc_pmu)
290 {
291 u32 val;
292
293 val = readl(uc_pmu->base + HISI_UC_EVENT_CTRL_REG);
294 return !!FIELD_GET(HISI_UC_EVENT_GLB_EN, val);
295 }
296
hisi_uc_pmu_write_counter_normal(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc,u64 val)297 static void hisi_uc_pmu_write_counter_normal(struct hisi_pmu *uc_pmu,
298 struct hw_perf_event *hwc, u64 val)
299 {
300 writeq(val, uc_pmu->base + HISI_UC_CNTR_REGn(hwc->idx));
301 }
302
hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc,u64 val)303 static void hisi_uc_pmu_write_counter_quirk_v2(struct hisi_pmu *uc_pmu,
304 struct hw_perf_event *hwc, u64 val)
305 {
306 hisi_uc_pmu_start_counters(uc_pmu);
307 hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
308 hisi_uc_pmu_stop_counters(uc_pmu);
309 }
310
hisi_uc_pmu_write_counter(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc,u64 val)311 static void hisi_uc_pmu_write_counter(struct hisi_pmu *uc_pmu,
312 struct hw_perf_event *hwc, u64 val)
313 {
314 bool enable = hisi_uc_pmu_get_glb_en_state(uc_pmu);
315 bool erratum = uc_pmu->identifier == HISI_PMU_V2;
316
317 /*
318 * HiSilicon UC PMU v2 suffers the erratum 162700402 that the
319 * PMU counter cannot be set due to the lack of clock under power
320 * saving mode. This will lead to error or inaccurate counts.
321 * The clock can be enabled by the PMU global enabling control.
322 * The irq handler and pmu_start() will call the function to set
323 * period. If the function under irq context, the PMU has been
324 * enabled therefore we set counter directly. Other situations
325 * the PMU is disabled, we need to enable it to turn on the
326 * counter clock to set period, and then restore PMU enable
327 * status, the counter can hold its value without a clock.
328 */
329 if (enable || !erratum)
330 hisi_uc_pmu_write_counter_normal(uc_pmu, hwc, val);
331 else
332 hisi_uc_pmu_write_counter_quirk_v2(uc_pmu, hwc, val);
333 }
334
hisi_uc_pmu_enable_counter_int(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)335 static void hisi_uc_pmu_enable_counter_int(struct hisi_pmu *uc_pmu,
336 struct hw_perf_event *hwc)
337 {
338 u32 val;
339
340 val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
341 val &= ~(1 << hwc->idx);
342 writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
343 }
344
hisi_uc_pmu_disable_counter_int(struct hisi_pmu * uc_pmu,struct hw_perf_event * hwc)345 static void hisi_uc_pmu_disable_counter_int(struct hisi_pmu *uc_pmu,
346 struct hw_perf_event *hwc)
347 {
348 u32 val;
349
350 val = readl(uc_pmu->base + HISI_UC_INT_MASK_REG);
351 val |= (1 << hwc->idx);
352 writel(val, uc_pmu->base + HISI_UC_INT_MASK_REG);
353 }
354
hisi_uc_pmu_get_int_status(struct hisi_pmu * uc_pmu)355 static u32 hisi_uc_pmu_get_int_status(struct hisi_pmu *uc_pmu)
356 {
357 return readl(uc_pmu->base + HISI_UC_INT_STS_REG);
358 }
359
hisi_uc_pmu_clear_int_status(struct hisi_pmu * uc_pmu,int idx)360 static void hisi_uc_pmu_clear_int_status(struct hisi_pmu *uc_pmu, int idx)
361 {
362 writel(1 << idx, uc_pmu->base + HISI_UC_INT_CLEAR_REG);
363 }
364
hisi_uc_pmu_init_data(struct platform_device * pdev,struct hisi_pmu * uc_pmu)365 static int hisi_uc_pmu_init_data(struct platform_device *pdev,
366 struct hisi_pmu *uc_pmu)
367 {
368 hisi_uncore_pmu_init_topology(uc_pmu, &pdev->dev);
369
370 /*
371 * Use SCCL (Super CPU Cluster) ID and CCL (CPU Cluster) ID to
372 * identify the topology information of UC PMU devices in the chip.
373 * They have some CCLs per SCCL and then 4 UC PMU per CCL.
374 */
375 if (uc_pmu->topo.sccl_id < 0) {
376 dev_err(&pdev->dev, "Can not read uc sccl-id!\n");
377 return -EINVAL;
378 }
379
380 if (uc_pmu->topo.ccl_id < 0) {
381 dev_err(&pdev->dev, "Can not read uc ccl-id!\n");
382 return -EINVAL;
383 }
384
385 if (uc_pmu->topo.sub_id < 0) {
386 dev_err(&pdev->dev, "Can not read sub-id!\n");
387 return -EINVAL;
388 }
389
390 uc_pmu->base = devm_platform_ioremap_resource(pdev, 0);
391 if (IS_ERR(uc_pmu->base)) {
392 dev_err(&pdev->dev, "ioremap failed for uc_pmu resource\n");
393 return PTR_ERR(uc_pmu->base);
394 }
395
396 uc_pmu->identifier = readl(uc_pmu->base + HISI_UC_VERSION_REG);
397
398 return 0;
399 }
400
401 static struct attribute *hisi_uc_pmu_format_attr[] = {
402 HISI_PMU_FORMAT_ATTR(event, "config:0-7"),
403 HISI_PMU_FORMAT_ATTR(rd_req_en, "config1:0-0"),
404 HISI_PMU_FORMAT_ATTR(uring_channel, "config1:4-5"),
405 HISI_PMU_FORMAT_ATTR(srcid, "config1:6-19"),
406 HISI_PMU_FORMAT_ATTR(srcid_en, "config1:20-20"),
407 NULL
408 };
409
410 static const struct attribute_group hisi_uc_pmu_format_group = {
411 .name = "format",
412 .attrs = hisi_uc_pmu_format_attr,
413 };
414
415 static struct attribute *hisi_uc_pmu_events_attr[] = {
416 HISI_PMU_EVENT_ATTR(sq_time, 0x00),
417 HISI_PMU_EVENT_ATTR(pq_time, 0x01),
418 HISI_PMU_EVENT_ATTR(hbm_time, 0x02),
419 HISI_PMU_EVENT_ATTR(iq_comp_time_cring, 0x03),
420 HISI_PMU_EVENT_ATTR(iq_comp_time_uring, 0x05),
421 HISI_PMU_EVENT_ATTR(cpu_rd, 0x10),
422 HISI_PMU_EVENT_ATTR(cpu_rd64, 0x17),
423 HISI_PMU_EVENT_ATTR(cpu_rs64, 0x19),
424 HISI_PMU_EVENT_ATTR(cpu_mru, 0x1c),
425 HISI_PMU_EVENT_ATTR(cycles, 0x95),
426 HISI_PMU_EVENT_ATTR(spipe_hit, 0xb3),
427 HISI_PMU_EVENT_ATTR(hpipe_hit, 0xdb),
428 HISI_PMU_EVENT_ATTR(cring_rxdat_cnt, 0xfa),
429 HISI_PMU_EVENT_ATTR(cring_txdat_cnt, 0xfb),
430 HISI_PMU_EVENT_ATTR(uring_rxdat_cnt, 0xfc),
431 HISI_PMU_EVENT_ATTR(uring_txdat_cnt, 0xfd),
432 NULL
433 };
434
435 static const struct attribute_group hisi_uc_pmu_events_group = {
436 .name = "events",
437 .attrs = hisi_uc_pmu_events_attr,
438 };
439
440 static const struct attribute_group *hisi_uc_pmu_attr_groups[] = {
441 &hisi_uc_pmu_format_group,
442 &hisi_uc_pmu_events_group,
443 &hisi_pmu_cpumask_attr_group,
444 &hisi_pmu_identifier_group,
445 NULL
446 };
447
448 static const struct hisi_uncore_ops hisi_uncore_uc_pmu_ops = {
449 .check_filter = hisi_uc_pmu_check_filter,
450 .write_evtype = hisi_uc_pmu_write_evtype,
451 .get_event_idx = hisi_uncore_pmu_get_event_idx,
452 .start_counters = hisi_uc_pmu_start_counters,
453 .stop_counters = hisi_uc_pmu_stop_counters,
454 .enable_counter = hisi_uc_pmu_enable_counter,
455 .disable_counter = hisi_uc_pmu_disable_counter,
456 .enable_counter_int = hisi_uc_pmu_enable_counter_int,
457 .disable_counter_int = hisi_uc_pmu_disable_counter_int,
458 .write_counter = hisi_uc_pmu_write_counter,
459 .read_counter = hisi_uc_pmu_read_counter,
460 .get_int_status = hisi_uc_pmu_get_int_status,
461 .clear_int_status = hisi_uc_pmu_clear_int_status,
462 .enable_filter = hisi_uc_pmu_enable_filter,
463 .disable_filter = hisi_uc_pmu_disable_filter,
464 };
465
hisi_uc_pmu_dev_probe(struct platform_device * pdev,struct hisi_pmu * uc_pmu)466 static int hisi_uc_pmu_dev_probe(struct platform_device *pdev,
467 struct hisi_pmu *uc_pmu)
468 {
469 int ret;
470
471 ret = hisi_uc_pmu_init_data(pdev, uc_pmu);
472 if (ret)
473 return ret;
474
475 ret = hisi_uncore_pmu_init_irq(uc_pmu, pdev);
476 if (ret)
477 return ret;
478
479 uc_pmu->pmu_events.attr_groups = hisi_uc_pmu_attr_groups;
480 uc_pmu->check_event = HISI_UC_EVTYPE_MASK;
481 uc_pmu->ops = &hisi_uncore_uc_pmu_ops;
482 uc_pmu->counter_bits = HISI_UC_CNTR_REG_BITS;
483 uc_pmu->num_counters = HISI_UC_NR_COUNTERS;
484 uc_pmu->dev = &pdev->dev;
485 uc_pmu->on_cpu = -1;
486
487 return 0;
488 }
489
hisi_uc_pmu_remove_cpuhp_instance(void * hotplug_node)490 static void hisi_uc_pmu_remove_cpuhp_instance(void *hotplug_node)
491 {
492 cpuhp_state_remove_instance_nocalls(hisi_uc_pmu_online, hotplug_node);
493 }
494
hisi_uc_pmu_unregister_pmu(void * pmu)495 static void hisi_uc_pmu_unregister_pmu(void *pmu)
496 {
497 perf_pmu_unregister(pmu);
498 }
499
hisi_uc_pmu_probe(struct platform_device * pdev)500 static int hisi_uc_pmu_probe(struct platform_device *pdev)
501 {
502 struct hisi_pmu *uc_pmu;
503 char *name;
504 int ret;
505
506 uc_pmu = devm_kzalloc(&pdev->dev, sizeof(*uc_pmu), GFP_KERNEL);
507 if (!uc_pmu)
508 return -ENOMEM;
509
510 platform_set_drvdata(pdev, uc_pmu);
511
512 ret = hisi_uc_pmu_dev_probe(pdev, uc_pmu);
513 if (ret)
514 return ret;
515
516 name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "hisi_sccl%d_uc%d_%d",
517 uc_pmu->topo.sccl_id, uc_pmu->topo.ccl_id,
518 uc_pmu->topo.sub_id);
519 if (!name)
520 return -ENOMEM;
521
522 ret = cpuhp_state_add_instance(hisi_uc_pmu_online, &uc_pmu->node);
523 if (ret)
524 return dev_err_probe(&pdev->dev, ret, "Error registering hotplug\n");
525
526 ret = devm_add_action_or_reset(&pdev->dev,
527 hisi_uc_pmu_remove_cpuhp_instance,
528 &uc_pmu->node);
529 if (ret)
530 return ret;
531
532 hisi_pmu_init(uc_pmu, THIS_MODULE);
533
534 ret = perf_pmu_register(&uc_pmu->pmu, name, -1);
535 if (ret)
536 return ret;
537
538 return devm_add_action_or_reset(&pdev->dev,
539 hisi_uc_pmu_unregister_pmu,
540 &uc_pmu->pmu);
541 }
542
543 static const struct acpi_device_id hisi_uc_pmu_acpi_match[] = {
544 { "HISI0291", },
545 {}
546 };
547 MODULE_DEVICE_TABLE(acpi, hisi_uc_pmu_acpi_match);
548
549 static struct platform_driver hisi_uc_pmu_driver = {
550 .driver = {
551 .name = "hisi_uc_pmu",
552 .acpi_match_table = hisi_uc_pmu_acpi_match,
553 /*
554 * We have not worked out a safe bind/unbind process,
555 * Forcefully unbinding during sampling will lead to a
556 * kernel panic, so this is not supported yet.
557 */
558 .suppress_bind_attrs = true,
559 },
560 .probe = hisi_uc_pmu_probe,
561 };
562
hisi_uc_pmu_module_init(void)563 static int __init hisi_uc_pmu_module_init(void)
564 {
565 int ret;
566
567 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN,
568 "perf/hisi/uc:online",
569 hisi_uncore_pmu_online_cpu,
570 hisi_uncore_pmu_offline_cpu);
571 if (ret < 0) {
572 pr_err("UC PMU: Error setup hotplug, ret = %d\n", ret);
573 return ret;
574 }
575 hisi_uc_pmu_online = ret;
576
577 ret = platform_driver_register(&hisi_uc_pmu_driver);
578 if (ret)
579 cpuhp_remove_multi_state(hisi_uc_pmu_online);
580
581 return ret;
582 }
583 module_init(hisi_uc_pmu_module_init);
584
hisi_uc_pmu_module_exit(void)585 static void __exit hisi_uc_pmu_module_exit(void)
586 {
587 platform_driver_unregister(&hisi_uc_pmu_driver);
588 cpuhp_remove_multi_state(hisi_uc_pmu_online);
589 }
590 module_exit(hisi_uc_pmu_module_exit);
591
592 MODULE_IMPORT_NS("HISI_PMU");
593 MODULE_DESCRIPTION("HiSilicon SoC UC uncore PMU driver");
594 MODULE_LICENSE("GPL");
595 MODULE_AUTHOR("Junhao He <[email protected]>");
596