Lines Matching +full:attr +full:- +full:cnt +full:- +full:name

1 // SPDX-License-Identifier: GPL-2.0-only
8 * Perf: amd_iommu - AMD IOMMU Performance Counter PMU implementation
17 #include <linux/amd-iommu.h>
23 #define GET_CSOURCE(x) ((x)->conf & 0xFFULL)
24 #define GET_DEVID(x) (((x)->conf >> 8) & 0xFFFFULL)
25 #define GET_DOMID(x) (((x)->conf >> 24) & 0xFFFFULL)
26 #define GET_PASID(x) (((x)->conf >> 40) & 0xFFFFFULL)
29 #define GET_DEVID_MASK(x) ((x)->conf1 & 0xFFFFULL)
30 #define GET_DOMID_MASK(x) (((x)->conf1 >> 16) & 0xFFFFULL)
31 #define GET_PASID_MASK(x) (((x)->conf1 >> 32) & 0xFFFFFULL)
39 char name[IOMMU_NAME_SIZE]; member
48 /*---------------------------------------------
50 *---------------------------------------------*/
51 PMU_FORMAT_ATTR(csource, "config:0-7");
52 PMU_FORMAT_ATTR(devid, "config:8-23");
53 PMU_FORMAT_ATTR(domid, "config:24-39");
54 PMU_FORMAT_ATTR(pasid, "config:40-59");
55 PMU_FORMAT_ATTR(devid_mask, "config1:0-15");
56 PMU_FORMAT_ATTR(domid_mask, "config1:16-31");
57 PMU_FORMAT_ATTR(pasid_mask, "config1:32-51");
60 &format_attr_csource.attr,
61 &format_attr_devid.attr,
62 &format_attr_pasid.attr,
63 &format_attr_domid.attr,
64 &format_attr_devid_mask.attr,
65 &format_attr_pasid_mask.attr,
66 &format_attr_domid_mask.attr,
71 .name = "format",
75 /*---------------------------------------------
77 *---------------------------------------------*/
79 .name = "events",
83 struct device_attribute attr; member
88 struct device_attribute *attr, char *buf) in _iommu_event_show() argument
91 container_of(attr, struct amd_iommu_event_desc, attr); in _iommu_event_show()
92 return sprintf(buf, "%s\n", event->event); in _iommu_event_show()
97 .attr = __ATTR(_name, 0444, _iommu_event_show, NULL), \
129 /*---------------------------------------------
131 *---------------------------------------------*/
135 struct device_attribute *attr, in _iommu_cpumask_show() argument
143 &dev_attr_cpumask.attr,
151 /*---------------------------------------------*/
155 struct perf_amd_iommu *piommu = container_of(event->pmu, struct perf_amd_iommu, pmu); in get_next_avail_iommu_bnk_cntr()
156 int max_cntrs = piommu->max_counters; in get_next_avail_iommu_bnk_cntr()
157 int max_banks = piommu->max_banks; in get_next_avail_iommu_bnk_cntr()
162 raw_spin_lock_irqsave(&piommu->lock, flags); in get_next_avail_iommu_bnk_cntr()
167 if (piommu->cntr_assign_mask & BIT_ULL(shift)) { in get_next_avail_iommu_bnk_cntr()
170 piommu->cntr_assign_mask |= BIT_ULL(shift); in get_next_avail_iommu_bnk_cntr()
171 event->hw.iommu_bank = bank; in get_next_avail_iommu_bnk_cntr()
172 event->hw.iommu_cntr = cntr; in get_next_avail_iommu_bnk_cntr()
178 retval = -ENOSPC; in get_next_avail_iommu_bnk_cntr()
180 raw_spin_unlock_irqrestore(&piommu->lock, flags); in get_next_avail_iommu_bnk_cntr()
191 max_banks = perf_iommu->max_banks; in clear_avail_iommu_bnk_cntr()
192 max_cntrs = perf_iommu->max_counters; in clear_avail_iommu_bnk_cntr()
195 return -EINVAL; in clear_avail_iommu_bnk_cntr()
199 raw_spin_lock_irqsave(&perf_iommu->lock, flags); in clear_avail_iommu_bnk_cntr()
200 perf_iommu->cntr_assign_mask &= ~(1ULL<<shift); in clear_avail_iommu_bnk_cntr()
201 raw_spin_unlock_irqrestore(&perf_iommu->lock, flags); in clear_avail_iommu_bnk_cntr()
208 struct hw_perf_event *hwc = &event->hw; in perf_iommu_event_init()
210 /* test the event attr type check for PMU enumeration */ in perf_iommu_event_init()
211 if (event->attr.type != event->pmu->type) in perf_iommu_event_init()
212 return -ENOENT; in perf_iommu_event_init()
216 * Therefore, it does not support per-process mode. in perf_iommu_event_init()
219 if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) in perf_iommu_event_init()
220 return -EINVAL; in perf_iommu_event_init()
222 if (event->cpu < 0) in perf_iommu_event_init()
223 return -EINVAL; in perf_iommu_event_init()
226 hwc->conf = event->attr.config; in perf_iommu_event_init()
227 hwc->conf1 = event->attr.config1; in perf_iommu_event_init()
234 return (container_of(ev->pmu, struct perf_amd_iommu, pmu))->iommu; in perf_event_2_iommu()
240 struct hw_perf_event *hwc = &ev->hw; in perf_iommu_enable_event()
241 u8 bank = hwc->iommu_bank; in perf_iommu_enable_event()
242 u8 cntr = hwc->iommu_cntr; in perf_iommu_enable_event()
270 struct hw_perf_event *hwc = &event->hw; in perf_iommu_disable_event()
273 amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, in perf_iommu_disable_event()
279 struct hw_perf_event *hwc = &event->hw; in perf_iommu_start()
281 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED))) in perf_iommu_start()
284 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in perf_iommu_start()
285 hwc->state = 0; in perf_iommu_start()
288 * To account for power-gating, which prevents write to in perf_iommu_start()
302 amd_iommu_pc_set_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, in perf_iommu_start()
312 struct hw_perf_event *hwc = &event->hw; in perf_iommu_read()
315 if (amd_iommu_pc_get_reg(iommu, hwc->iommu_bank, hwc->iommu_cntr, in perf_iommu_read()
326 local64_add(count, &event->count); in perf_iommu_read()
331 struct hw_perf_event *hwc = &event->hw; in perf_iommu_stop()
333 if (hwc->state & PERF_HES_UPTODATE) in perf_iommu_stop()
337 * To account for power-gating, in which reading the counter would in perf_iommu_stop()
341 hwc->state |= PERF_HES_UPTODATE; in perf_iommu_stop()
344 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); in perf_iommu_stop()
345 hwc->state |= PERF_HES_STOPPED; in perf_iommu_stop()
352 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in perf_iommu_add()
367 struct hw_perf_event *hwc = &event->hw; in perf_iommu_del()
369 container_of(event->pmu, struct perf_amd_iommu, pmu); in perf_iommu_del()
375 hwc->iommu_bank, hwc->iommu_cntr); in perf_iommu_del()
385 while (amd_iommu_v2_event_descs[i].attr.attr.name) in _init_events_attrs()
390 return -ENOMEM; in _init_events_attrs()
393 attrs[j] = &amd_iommu_v2_event_descs[j].attr.attr; in _init_events_attrs()
425 return -ENOMEM; in init_one_iommu()
427 raw_spin_lock_init(&perf_iommu->lock); in init_one_iommu()
429 perf_iommu->pmu = iommu_pmu; in init_one_iommu()
430 perf_iommu->iommu = get_amd_iommu(idx); in init_one_iommu()
431 perf_iommu->max_banks = amd_iommu_pc_get_max_banks(idx); in init_one_iommu()
432 perf_iommu->max_counters = amd_iommu_pc_get_max_counters(idx); in init_one_iommu()
434 if (!perf_iommu->iommu || in init_one_iommu()
435 !perf_iommu->max_banks || in init_one_iommu()
436 !perf_iommu->max_counters) { in init_one_iommu()
438 return -EINVAL; in init_one_iommu()
441 snprintf(perf_iommu->name, IOMMU_NAME_SIZE, "amd_iommu_%u", idx); in init_one_iommu()
443 ret = perf_pmu_register(&perf_iommu->pmu, perf_iommu->name, -1); in init_one_iommu()
446 idx, perf_iommu->max_banks, perf_iommu->max_counters); in init_one_iommu()
447 list_add_tail(&perf_iommu->list, &perf_amd_iommu_list); in init_one_iommu()
457 unsigned int i, cnt = 0; in amd_iommu_pc_init() local
462 return -ENODEV; in amd_iommu_pc_init()
476 cnt++; in amd_iommu_pc_init()
479 if (!cnt) { in amd_iommu_pc_init()
481 return -ENODEV; in amd_iommu_pc_init()