Lines Matching +full:rtl +full:- +full:intc
1 // SPDX-License-Identifier: GPL-2.0+
6 // Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
38 * (based on a specific RTL build)
61 [PERF_COUNT_ARC_BPOK] = "bpok", /* NP-NT, PT-T, PNT-NT */
62 [PERF_COUNT_HW_BRANCH_MISSES] = "bpfail", /* NP-T, PT-NT, PNT-T */
67 [PERF_COUNT_ARC_DCLM] = "dclm", /* D-cache Load Miss */
68 [PERF_COUNT_ARC_DCSM] = "dcsm", /* D-cache Store Miss */
69 [PERF_COUNT_ARC_ICM] = "icm", /* I-cache Miss */
70 [PERF_COUNT_ARC_EDTLB] = "edtlb", /* D-TLB Miss */
71 [PERF_COUNT_ARC_EITLB] = "eitlb", /* I-TLB Miss */
74 [PERF_COUNT_HW_CACHE_MISSES] = "dclm", /* D-cache Load Miss */
227 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; in callchain_trace()
231 if (ctrl->depth++ < 3) in callchain_trace()
234 return -1; in callchain_trace()
283 u64 prev_raw_count = local64_read(&hwc->prev_count); in arc_perf_event_update()
285 s64 delta = new_raw_count - prev_raw_count; in arc_perf_event_update()
288 * We aren't afraid of hwc->prev_count changing beneath our feet in arc_perf_event_update()
289 * because there's no way for us to re-enter this function anytime. in arc_perf_event_update()
291 local64_set(&hwc->prev_count, new_raw_count); in arc_perf_event_update()
292 local64_add(delta, &event->count); in arc_perf_event_update()
293 local64_sub(delta, &hwc->period_left); in arc_perf_event_update()
298 arc_perf_event_update(event, &event->hw, event->hw.idx); in arc_pmu_read()
310 return -EINVAL; in arc_pmu_cache_event()
312 return -EINVAL; in arc_pmu_cache_event()
314 return -EINVAL; in arc_pmu_cache_event()
319 return -ENOENT; in arc_pmu_cache_event()
331 struct hw_perf_event *hwc = &event->hw; in arc_pmu_event_init()
335 hwc->sample_period = arc_pmu->max_period; in arc_pmu_event_init()
336 hwc->last_period = hwc->sample_period; in arc_pmu_event_init()
337 local64_set(&hwc->period_left, hwc->sample_period); in arc_pmu_event_init()
340 hwc->config = 0; in arc_pmu_event_init()
344 if (event->attr.exclude_user) in arc_pmu_event_init()
345 hwc->config |= ARC_REG_PCT_CONFIG_KERN; in arc_pmu_event_init()
348 if (event->attr.exclude_kernel) in arc_pmu_event_init()
349 hwc->config |= ARC_REG_PCT_CONFIG_USER; in arc_pmu_event_init()
352 switch (event->attr.type) { in arc_pmu_event_init()
354 if (event->attr.config >= PERF_COUNT_HW_MAX) in arc_pmu_event_init()
355 return -ENOENT; in arc_pmu_event_init()
356 if (arc_pmu->ev_hw_idx[event->attr.config] < 0) in arc_pmu_event_init()
357 return -ENOENT; in arc_pmu_event_init()
358 hwc->config |= arc_pmu->ev_hw_idx[event->attr.config]; in arc_pmu_event_init()
360 (int)event->attr.config, (int)hwc->config, in arc_pmu_event_init()
361 arc_pmu_ev_hw_map[event->attr.config]); in arc_pmu_event_init()
365 ret = arc_pmu_cache_event(event->attr.config); in arc_pmu_event_init()
368 hwc->config |= arc_pmu->ev_hw_idx[ret]; in arc_pmu_event_init()
370 (int)hwc->config, arc_pmu_ev_hw_map[ret]); in arc_pmu_event_init()
374 if (event->attr.config >= arc_pmu->n_events) in arc_pmu_event_init()
375 return -ENOENT; in arc_pmu_event_init()
377 hwc->config |= event->attr.config; in arc_pmu_event_init()
379 event->attr.config, in arc_pmu_event_init()
380 arc_pmu->raw_entry[event->attr.config].name); in arc_pmu_event_init()
385 return -ENOENT; in arc_pmu_event_init()
407 struct hw_perf_event *hwc = &event->hw; in arc_pmu_event_set_period()
408 s64 left = local64_read(&hwc->period_left); in arc_pmu_event_set_period()
409 s64 period = hwc->sample_period; in arc_pmu_event_set_period()
410 int idx = hwc->idx; in arc_pmu_event_set_period()
414 if (unlikely(left <= -period)) { in arc_pmu_event_set_period()
417 local64_set(&hwc->period_left, left); in arc_pmu_event_set_period()
418 hwc->last_period = period; in arc_pmu_event_set_period()
423 local64_set(&hwc->period_left, left); in arc_pmu_event_set_period()
424 hwc->last_period = period; in arc_pmu_event_set_period()
428 if (left > arc_pmu->max_period) in arc_pmu_event_set_period()
429 left = arc_pmu->max_period; in arc_pmu_event_set_period()
431 value = arc_pmu->max_period - left; in arc_pmu_event_set_period()
432 local64_set(&hwc->prev_count, value); in arc_pmu_event_set_period()
453 struct hw_perf_event *hwc = &event->hw; in arc_pmu_start()
454 int idx = hwc->idx; in arc_pmu_start()
456 if (WARN_ON_ONCE(idx == -1)) in arc_pmu_start()
460 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); in arc_pmu_start()
462 hwc->state = 0; in arc_pmu_start()
473 write_aux_reg(ARC_REG_PCT_CONFIG, hwc->config); /* condition */ in arc_pmu_start()
478 struct hw_perf_event *hwc = &event->hw; in arc_pmu_stop()
479 int idx = hwc->idx; in arc_pmu_stop()
492 if (!(event->hw.state & PERF_HES_STOPPED)) { in arc_pmu_stop()
499 event->hw.state |= PERF_HES_STOPPED; in arc_pmu_stop()
503 !(event->hw.state & PERF_HES_UPTODATE)) { in arc_pmu_stop()
504 arc_perf_event_update(event, &event->hw, idx); in arc_pmu_stop()
505 event->hw.state |= PERF_HES_UPTODATE; in arc_pmu_stop()
514 __clear_bit(event->hw.idx, pmu_cpu->used_mask); in arc_pmu_del()
516 pmu_cpu->act_counter[event->hw.idx] = 0; in arc_pmu_del()
525 struct hw_perf_event *hwc = &event->hw; in arc_pmu_add()
528 idx = ffz(pmu_cpu->used_mask[0]); in arc_pmu_add()
529 if (idx == arc_pmu->n_counters) in arc_pmu_add()
530 return -EAGAIN; in arc_pmu_add()
532 __set_bit(idx, pmu_cpu->used_mask); in arc_pmu_add()
533 hwc->idx = idx; in arc_pmu_add()
537 pmu_cpu->act_counter[idx] = event; in arc_pmu_add()
542 lower_32_bits(arc_pmu->max_period)); in arc_pmu_add()
544 upper_32_bits(arc_pmu->max_period)); in arc_pmu_add()
550 local64_set(&hwc->prev_count, 0); in arc_pmu_add()
552 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; in arc_pmu_add()
570 arc_pmu_disable(&arc_pmu->pmu); in arc_pmu_intr()
590 * Now we need to re-enable interrupt for the counter. in arc_pmu_intr()
595 event = pmu_cpu->act_counter[idx]; in arc_pmu_intr()
596 hwc = &event->hw; in arc_pmu_intr()
598 WARN_ON_ONCE(hwc->idx != idx); in arc_pmu_intr()
600 arc_perf_event_update(event, &event->hw, event->hw.idx); in arc_pmu_intr()
601 perf_sample_data_init(&data, 0, hwc->last_period); in arc_pmu_intr()
611 arc_pmu_enable(&arc_pmu->pmu); in arc_pmu_intr()
635 PMU_FORMAT_ATTR(event, "config:0-14");
653 return sprintf(page, "event=0x%04llx\n", pmu_attr->id); in arc_pmu_events_sysfs_show()
657 * We don't add attrs here as we don't have pre-defined list of perf events.
667 memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1); in arc_pmu_add_raw_event_attr()
668 arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name; in arc_pmu_add_raw_event_attr()
669 arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444); in arc_pmu_add_raw_event_attr()
670 arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show; in arc_pmu_add_raw_event_attr()
671 arc_pmu->attr[j].id = j; in arc_pmu_add_raw_event_attr()
672 arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr); in arc_pmu_add_raw_event_attr()
677 arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1, in arc_pmu_raw_alloc()
678 sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO); in arc_pmu_raw_alloc()
679 if (!arc_pmu->attr) in arc_pmu_raw_alloc()
680 return -ENOMEM; in arc_pmu_raw_alloc()
682 arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1, in arc_pmu_raw_alloc()
683 sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO); in arc_pmu_raw_alloc()
684 if (!arc_pmu->attrs) in arc_pmu_raw_alloc()
685 return -ENOMEM; in arc_pmu_raw_alloc()
687 arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events, in arc_pmu_raw_alloc()
688 sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO); in arc_pmu_raw_alloc()
689 if (!arc_pmu->raw_entry) in arc_pmu_raw_alloc()
690 return -ENOMEM; in arc_pmu_raw_alloc()
718 arc_pmu->ev_hw_idx[i] = j; in arc_pmu_map_hw_event()
727 int i, has_interrupts, irq = -1; in arc_pmu_device_probe()
742 return -ENODEV; in arc_pmu_device_probe()
746 return -EINVAL; in arc_pmu_device_probe()
750 return -EINVAL; in arc_pmu_device_probe()
752 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); in arc_pmu_device_probe()
754 return -ENOMEM; in arc_pmu_device_probe()
756 arc_pmu->n_events = cc_bcr.c; in arc_pmu_device_probe()
758 if (arc_pmu_raw_alloc(&pdev->dev)) in arc_pmu_device_probe()
759 return -ENOMEM; in arc_pmu_device_probe()
763 arc_pmu->n_counters = pct_bcr.c; in arc_pmu_device_probe()
766 arc_pmu->max_period = (1ULL << counter_size) / 2 - 1ULL; in arc_pmu_device_probe()
769 arc_pmu->n_counters, counter_size, cc_bcr.c, in arc_pmu_device_probe()
772 cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0; in arc_pmu_device_probe()
774 arc_pmu->ev_hw_idx[i] = -1; in arc_pmu_device_probe()
786 arc_pmu_events_attr_gr.attrs = arc_pmu->attrs; in arc_pmu_device_probe()
787 arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr; in arc_pmu_device_probe()
788 arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr; in arc_pmu_device_probe()
790 arc_pmu->pmu = (struct pmu) { in arc_pmu_device_probe()
799 .attr_groups = arc_pmu->attr_groups, in arc_pmu_device_probe()
807 arc_pmu->irq = irq; in arc_pmu_device_probe()
809 /* intc map function ensures irq_set_percpu_devid() called */ in arc_pmu_device_probe()
816 irq = -1; in arc_pmu_device_probe()
821 if (irq == -1) in arc_pmu_device_probe()
822 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; in arc_pmu_device_probe()
825 * perf parser doesn't really like '-' symbol in events name, so let's in arc_pmu_device_probe()
828 return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW); in arc_pmu_device_probe()
832 { .compatible = "snps,arc700-pct" },
833 { .compatible = "snps,archs-pct" },
840 .name = "arc-pct",