Lines Matching +full:riscv +full:- +full:j +full:- +full:extension
1 // SPDX-License-Identifier: GPL-2.0
3 * RISC-V performance counter support.
11 #define pr_fmt(fmt) "riscv-pmu-sbi: " fmt
62 PMU_FORMAT_ATTR(event, "config:0-47");
63 PMU_FORMAT_ATTR(firmware, "config:62-63");
90 * RISC-V doesn't have heterogeneous harts yet. This need to be part of
306 0, cmask, 0, edata->event_idx, 0, 0); in pmu_sbi_check_event()
312 edata->event_idx = -ENOENT; in pmu_sbi_check_event()
322 for (int j = 0; j < ARRAY_SIZE(pmu_cache_event_map[i]); j++) in pmu_sbi_check_std_events() local
323 for (int k = 0; k < ARRAY_SIZE(pmu_cache_event_map[i][j]); k++) in pmu_sbi_check_std_events()
324 pmu_sbi_check_event(&pmu_cache_event_map[i][j][k]); in pmu_sbi_check_std_events()
342 return (info->type == SBI_PMU_CTR_TYPE_FW) ? true : false; in pmu_sbi_ctr_is_fw()
357 return -EINVAL; in riscv_pmu_get_hpm_info()
363 if (!hpm_width && info->csr != CSR_CYCLE && info->csr != CSR_INSTRET) in riscv_pmu_get_hpm_info()
364 hpm_width = info->width; in riscv_pmu_get_hpm_info()
365 if (info->type == SBI_PMU_CTR_TYPE_HW) in riscv_pmu_get_hpm_info()
378 return pmu_ctr_list[event->hw.idx].csr - CSR_CYCLE; in pmu_sbi_csr_index()
386 if (event->attr.config1 & RISCV_PMU_CONFIG1_GUEST_EVENTS) in pmu_sbi_get_filter_flags()
388 if (event->attr.exclude_kernel) in pmu_sbi_get_filter_flags()
390 if (event->attr.exclude_user) in pmu_sbi_get_filter_flags()
392 if (guest_events && event->attr.exclude_hv) in pmu_sbi_get_filter_flags()
394 if (event->attr.exclude_host) in pmu_sbi_get_filter_flags()
396 if (event->attr.exclude_guest) in pmu_sbi_get_filter_flags()
404 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_get_idx()
405 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_get_idx()
406 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_get_idx()
409 uint64_t cbase = 0, cmask = rvpmu->cmask; in pmu_sbi_ctr_get_idx()
419 if ((hwc->flags & PERF_EVENT_FLAG_LEGACY) && (event->attr.type == PERF_TYPE_HARDWARE)) { in pmu_sbi_ctr_get_idx()
420 if (event->attr.config == PERF_COUNT_HW_CPU_CYCLES) { in pmu_sbi_ctr_get_idx()
423 } else if (event->attr.config == PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_ctr_get_idx()
425 cmask = BIT(CSR_INSTRET - CSR_CYCLE); in pmu_sbi_ctr_get_idx()
432 cmask, cflags, hwc->event_base, hwc->config, in pmu_sbi_ctr_get_idx()
433 hwc->config >> 32); in pmu_sbi_ctr_get_idx()
436 cmask, cflags, hwc->event_base, hwc->config, 0); in pmu_sbi_ctr_get_idx()
440 hwc->event_base, hwc->config); in pmu_sbi_ctr_get_idx()
445 if (!test_bit(idx, &rvpmu->cmask) || !pmu_ctr_list[idx].value) in pmu_sbi_ctr_get_idx()
446 return -ENOENT; in pmu_sbi_ctr_get_idx()
450 if (!test_and_set_bit(idx, cpuc->used_fw_ctrs)) in pmu_sbi_ctr_get_idx()
453 if (!test_and_set_bit(idx, cpuc->used_hw_ctrs)) in pmu_sbi_ctr_get_idx()
457 return -ENOENT; in pmu_sbi_ctr_get_idx()
463 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_clear_idx()
464 struct riscv_pmu *rvpmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_clear_idx()
465 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in pmu_sbi_ctr_clear_idx()
466 int idx = hwc->idx; in pmu_sbi_ctr_clear_idx()
469 clear_bit(idx, cpuc->used_fw_ctrs); in pmu_sbi_ctr_clear_idx()
471 clear_bit(idx, cpuc->used_hw_ctrs); in pmu_sbi_ctr_clear_idx()
480 return -EINVAL; in pmu_event_find_cache()
484 return -EINVAL; in pmu_event_find_cache()
488 return -EINVAL; in pmu_event_find_cache()
497 u32 type = event->attr.type; in pmu_sbi_is_fw_event()
498 u64 config = event->attr.config; in pmu_sbi_is_fw_event()
508 u32 type = event->attr.type; in pmu_sbi_event_map()
509 u64 config = event->attr.config; in pmu_sbi_event_map()
510 int ret = -ENOENT; in pmu_sbi_event_map()
521 return -EINVAL; in pmu_sbi_event_map()
522 ret = pmu_hw_event_map[event->attr.config].event_idx; in pmu_sbi_event_map()
532 * 00 - Hardware raw event in pmu_sbi_event_map()
533 * 10 - SBI firmware events in pmu_sbi_event_map()
534 * 11 - Risc-V platform specific firmware event in pmu_sbi_event_map()
539 /* Return error any bits [48-63] is set as it is not allowed by the spec */ in pmu_sbi_event_map()
550 * For Risc-V platform specific firmware events in pmu_sbi_event_map()
551 * Event code - 0xFFFF in pmu_sbi_event_map()
552 * Event data - raw event encoding in pmu_sbi_event_map()
573 struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); in pmu_sbi_snapshot_free()
575 if (!cpu_hw_evt->snapshot_addr) in pmu_sbi_snapshot_free()
578 free_page((unsigned long)cpu_hw_evt->snapshot_addr); in pmu_sbi_snapshot_free()
579 cpu_hw_evt->snapshot_addr = NULL; in pmu_sbi_snapshot_free()
580 cpu_hw_evt->snapshot_addr_phys = 0; in pmu_sbi_snapshot_free()
590 struct cpu_hw_events *cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); in pmu_sbi_snapshot_alloc()
595 return -ENOMEM; in pmu_sbi_snapshot_alloc()
597 cpu_hw_evt->snapshot_addr = page_to_virt(snapshot_page); in pmu_sbi_snapshot_alloc()
598 cpu_hw_evt->snapshot_addr_phys = page_to_phys(snapshot_page); in pmu_sbi_snapshot_alloc()
623 cpu_hw_evt = per_cpu_ptr(pmu->hw_events, cpu); in pmu_sbi_snapshot_setup()
624 if (!cpu_hw_evt->snapshot_addr_phys) in pmu_sbi_snapshot_setup()
625 return -EINVAL; in pmu_sbi_snapshot_setup()
627 if (cpu_hw_evt->snapshot_set_done) in pmu_sbi_snapshot_setup()
632 cpu_hw_evt->snapshot_addr_phys, in pmu_sbi_snapshot_setup()
633 (u64)(cpu_hw_evt->snapshot_addr_phys) >> 32, 0, 0, 0, 0); in pmu_sbi_snapshot_setup()
636 cpu_hw_evt->snapshot_addr_phys, 0, 0, 0, 0, 0); in pmu_sbi_snapshot_setup()
645 memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS); in pmu_sbi_snapshot_setup()
646 cpu_hw_evt->snapshot_set_done = true; in pmu_sbi_snapshot_setup()
653 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_read()
654 int idx = hwc->idx; in pmu_sbi_ctr_read()
657 struct riscv_pmu *pmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_read()
658 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_ctr_read()
659 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_ctr_read()
663 if (sbi_pmu_snapshot_available() && (hwc->state & PERF_HES_STOPPED)) { in pmu_sbi_ctr_read()
664 val = sdata->ctr_values[idx]; in pmu_sbi_ctr_read()
670 hwc->idx, 0, 0, 0, 0, 0); in pmu_sbi_ctr_read()
677 hwc->idx, 0, 0, 0, 0, 0); in pmu_sbi_ctr_read()
697 if (event->hw.idx != -1) in pmu_sbi_set_scounteren()
706 if (event->hw.idx != -1) in pmu_sbi_reset_scounteren()
714 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_start()
719 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, in pmu_sbi_ctr_start()
722 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_START, hwc->idx, in pmu_sbi_ctr_start()
727 hwc->idx, sbi_err_map_linux_errno(ret.error)); in pmu_sbi_ctr_start()
729 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && in pmu_sbi_ctr_start()
730 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in pmu_sbi_ctr_start()
737 struct hw_perf_event *hwc = &event->hw; in pmu_sbi_ctr_stop()
738 struct riscv_pmu *pmu = to_riscv_pmu(event->pmu); in pmu_sbi_ctr_stop()
739 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_ctr_stop()
740 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_ctr_stop()
742 if ((hwc->flags & PERF_EVENT_FLAG_USER_ACCESS) && in pmu_sbi_ctr_stop()
743 (hwc->flags & PERF_EVENT_FLAG_USER_READ_CNT)) in pmu_sbi_ctr_stop()
749 ret = sbi_ecall(SBI_EXT_PMU, SBI_EXT_PMU_COUNTER_STOP, hwc->idx, 1, flag, 0, 0, 0); in pmu_sbi_ctr_stop()
752 * The counter snapshot is based on the index base specified by hwc->idx. in pmu_sbi_ctr_stop()
755 * the counter value to shared memory. However, if hwc->idx is zero, the counter in pmu_sbi_ctr_stop()
759 if (hwc->idx > 0) { in pmu_sbi_ctr_stop()
760 sdata->ctr_values[hwc->idx] = sdata->ctr_values[0]; in pmu_sbi_ctr_stop()
761 sdata->ctr_values[0] = 0; in pmu_sbi_ctr_stop()
766 hwc->idx, sbi_err_map_linux_errno(ret.error)); in pmu_sbi_ctr_stop()
789 return -ENOMEM; in pmu_sbi_get_ctrinfo()
819 0, pmu->cmask, SBI_PMU_STOP_FLAG_RESET, 0, 0, 0); in pmu_sbi_stop_all()
824 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_stop_hw_ctrs()
825 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_stop_hw_ctrs()
835 memset(cpu_hw_evt->snapshot_cval_shcopy, 0, sizeof(u64) * RISCV_MAX_COUNTERS); in pmu_sbi_stop_hw_ctrs()
840 cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0); in pmu_sbi_stop_hw_ctrs()
843 for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG) in pmu_sbi_stop_hw_ctrs()
844 cpu_hw_evt->snapshot_cval_shcopy[i * BITS_PER_LONG + idx] = in pmu_sbi_stop_hw_ctrs()
845 sdata->ctr_values[idx]; in pmu_sbi_stop_hw_ctrs()
847 temp_ctr_overflow_mask |= sdata->ctr_overflow_mask << (i * BITS_PER_LONG); in pmu_sbi_stop_hw_ctrs()
853 for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) in pmu_sbi_stop_hw_ctrs()
854 sdata->ctr_values[idx] = cpu_hw_evt->snapshot_cval_shcopy[idx]; in pmu_sbi_stop_hw_ctrs()
856 sdata->ctr_overflow_mask = temp_ctr_overflow_mask; in pmu_sbi_stop_hw_ctrs()
878 ctr_start_mask = cpu_hw_evt->used_hw_ctrs[i] & ~ctr_ovf_mask; in pmu_sbi_start_ovf_ctrs_sbi()
887 event = cpu_hw_evt->events[idx]; in pmu_sbi_start_ovf_ctrs_sbi()
888 hwc = &event->hw; in pmu_sbi_start_ovf_ctrs_sbi()
890 init_val = local64_read(&hwc->prev_count) & max_period; in pmu_sbi_start_ovf_ctrs_sbi()
913 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_start_ovf_ctrs_snapshot()
915 for_each_set_bit(idx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { in pmu_sbi_start_ovf_ctrs_snapshot()
917 event = cpu_hw_evt->events[idx]; in pmu_sbi_start_ovf_ctrs_snapshot()
918 hwc = &event->hw; in pmu_sbi_start_ovf_ctrs_snapshot()
920 init_val = local64_read(&hwc->prev_count) & max_period; in pmu_sbi_start_ovf_ctrs_snapshot()
921 cpu_hw_evt->snapshot_cval_shcopy[idx] = init_val; in pmu_sbi_start_ovf_ctrs_snapshot()
924 * We do not need to update the non-overflow counters the previous in pmu_sbi_start_ovf_ctrs_snapshot()
931 for_each_set_bit(idx, &cpu_hw_evt->used_hw_ctrs[i], BITS_PER_LONG) in pmu_sbi_start_ovf_ctrs_snapshot()
932 sdata->ctr_values[idx] = in pmu_sbi_start_ovf_ctrs_snapshot()
933 cpu_hw_evt->snapshot_cval_shcopy[idx + i * BITS_PER_LONG]; in pmu_sbi_start_ovf_ctrs_snapshot()
936 cpu_hw_evt->used_hw_ctrs[i], flag, 0, 0, 0); in pmu_sbi_start_ovf_ctrs_snapshot()
943 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_start_overflow_mask()
964 struct riscv_pmu_snapshot_data *sdata = cpu_hw_evt->snapshot_addr; in pmu_sbi_ovf_handler()
970 fidx = find_first_bit(cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS); in pmu_sbi_ovf_handler()
976 event = cpu_hw_evt->events[fidx]; in pmu_sbi_ovf_handler()
982 pmu = to_riscv_pmu(event->pmu); in pmu_sbi_ovf_handler()
987 overflow = sdata->ctr_overflow_mask; in pmu_sbi_ovf_handler()
1003 for_each_set_bit(lidx, cpu_hw_evt->used_hw_ctrs, RISCV_MAX_COUNTERS) { in pmu_sbi_ovf_handler()
1004 struct perf_event *event = cpu_hw_evt->events[lidx]; in pmu_sbi_ovf_handler()
1012 if (!info || info->type != SBI_PMU_CTR_TYPE_HW) in pmu_sbi_ovf_handler()
1020 hidx = info->csr - CSR_CYCLE; in pmu_sbi_ovf_handler()
1031 hw_evt = &event->hw; in pmu_sbi_ovf_handler()
1033 hw_evt->state |= PERF_HES_STOPPED; in pmu_sbi_ovf_handler()
1035 hw_evt->state |= PERF_HES_UPTODATE; in pmu_sbi_ovf_handler()
1036 perf_sample_data_init(&data, 0, hw_evt->last_period); in pmu_sbi_ovf_handler()
1039 * Unlike other ISAs, RISC-V don't have to disable interrupts in pmu_sbi_ovf_handler()
1049 hw_evt->state = 0; in pmu_sbi_ovf_handler()
1053 perf_sample_event_took(sched_clock() - start_clock); in pmu_sbi_ovf_handler()
1061 struct cpu_hw_events *cpu_hw_evt = this_cpu_ptr(pmu->hw_events); in pmu_sbi_starting_cpu()
1076 cpu_hw_evt->irq = riscv_pmu_irq; in pmu_sbi_starting_cpu()
1105 struct cpu_hw_events __percpu *hw_events = pmu->hw_events; in pmu_sbi_setup_irqs()
1127 return -EOPNOTSUPP; in pmu_sbi_setup_irqs()
1133 return -ENODEV; in pmu_sbi_setup_irqs()
1139 return -ENODEV; in pmu_sbi_setup_irqs()
1142 ret = request_percpu_irq(riscv_pmu_irq, pmu_sbi_ovf_handler, "riscv-pmu", hw_events); in pmu_sbi_setup_irqs()
1156 struct cpu_hw_events *cpuc = this_cpu_ptr(rvpmu->hw_events); in riscv_pm_pmu_notify()
1157 int enabled = bitmap_weight(cpuc->used_hw_ctrs, RISCV_MAX_COUNTERS); in riscv_pm_pmu_notify()
1165 event = cpuc->events[idx]; in riscv_pm_pmu_notify()
1193 pmu->riscv_pm_nb.notifier_call = riscv_pm_pmu_notify; in riscv_pm_pmu_register()
1194 return cpu_pm_register_notifier(&pmu->riscv_pm_nb); in riscv_pm_pmu_register()
1199 cpu_pm_unregister_notifier(&pmu->riscv_pm_nb); in riscv_pm_pmu_unregister()
1215 cpuhp_state_remove_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); in riscv_pmu_destroy()
1225 event->hw.flags |= PERF_EVENT_FLAG_NO_USER_ACCESS; in pmu_sbi_event_init()
1227 event->hw.flags |= PERF_EVENT_FLAG_USER_ACCESS; in pmu_sbi_event_init()
1229 event->hw.flags |= PERF_EVENT_FLAG_LEGACY; in pmu_sbi_event_init()
1234 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS) in pmu_sbi_event_mapped()
1237 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) { in pmu_sbi_event_mapped()
1238 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES && in pmu_sbi_event_mapped()
1239 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_event_mapped()
1253 event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; in pmu_sbi_event_mapped()
1262 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS) in pmu_sbi_event_mapped()
1269 if (event->hw.flags & PERF_EVENT_FLAG_NO_USER_ACCESS) in pmu_sbi_event_unmapped()
1272 if (event->hw.flags & PERF_EVENT_FLAG_LEGACY) { in pmu_sbi_event_unmapped()
1273 if (event->attr.config != PERF_COUNT_HW_CPU_CYCLES && in pmu_sbi_event_unmapped()
1274 event->attr.config != PERF_COUNT_HW_INSTRUCTIONS) { in pmu_sbi_event_unmapped()
1285 event->hw.flags &= ~PERF_EVENT_FLAG_USER_READ_CNT; in pmu_sbi_event_unmapped()
1287 if (event->hw.flags & PERF_EVENT_FLAG_USER_ACCESS) in pmu_sbi_event_unmapped()
1335 int ret = -ENODEV; in pmu_sbi_device_probe()
1338 pr_info("SBI PMU extension is available\n"); in pmu_sbi_device_probe()
1341 return -ENOMEM; in pmu_sbi_device_probe()
1345 pr_err("SBI PMU extension doesn't provide any counters\n"); in pmu_sbi_device_probe()
1361 pr_info("Perf sampling/filtering is not supported as sscof extension is not available\n"); in pmu_sbi_device_probe()
1362 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; in pmu_sbi_device_probe()
1363 pmu->pmu.capabilities |= PERF_PMU_CAP_NO_EXCLUDE; in pmu_sbi_device_probe()
1366 pmu->pmu.attr_groups = riscv_pmu_attr_groups; in pmu_sbi_device_probe()
1367 pmu->pmu.parent = &pdev->dev; in pmu_sbi_device_probe()
1368 pmu->cmask = cmask; in pmu_sbi_device_probe()
1369 pmu->ctr_start = pmu_sbi_ctr_start; in pmu_sbi_device_probe()
1370 pmu->ctr_stop = pmu_sbi_ctr_stop; in pmu_sbi_device_probe()
1371 pmu->event_map = pmu_sbi_event_map; in pmu_sbi_device_probe()
1372 pmu->ctr_get_idx = pmu_sbi_ctr_get_idx; in pmu_sbi_device_probe()
1373 pmu->ctr_get_width = pmu_sbi_ctr_get_width; in pmu_sbi_device_probe()
1374 pmu->ctr_clear_idx = pmu_sbi_ctr_clear_idx; in pmu_sbi_device_probe()
1375 pmu->ctr_read = pmu_sbi_ctr_read; in pmu_sbi_device_probe()
1376 pmu->event_init = pmu_sbi_event_init; in pmu_sbi_device_probe()
1377 pmu->event_mapped = pmu_sbi_event_mapped; in pmu_sbi_device_probe()
1378 pmu->event_unmapped = pmu_sbi_event_unmapped; in pmu_sbi_device_probe()
1379 pmu->csr_index = pmu_sbi_csr_index; in pmu_sbi_device_probe()
1385 ret = perf_pmu_register(&pmu->pmu, "cpu", PERF_TYPE_RAW); in pmu_sbi_device_probe()
1418 ret = cpuhp_state_add_instance(CPUHP_AP_PERF_RISCV_STARTING, &pmu->node); in pmu_sbi_device_probe()
1456 "perf/riscv/pmu:starting", in pmu_sbi_devinit()
1468 pdev = platform_device_register_simple(RISCV_PMU_SBI_PDEV_NAME, -1, NULL, 0); in pmu_sbi_devinit()