Lines Matching full:pmc
37 static u64 kvm_pmu_get_sample_period(struct kvm_pmc *pmc) in kvm_pmu_get_sample_period() argument
39 u64 counter_val_mask = GENMASK(pmc->cinfo.width, 0); in kvm_pmu_get_sample_period()
42 if (!pmc->counter_val) in kvm_pmu_get_sample_period()
45 sample_period = (-pmc->counter_val) & counter_val_mask; in kvm_pmu_get_sample_period()
78 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) in kvm_pmu_release_perf_event() argument
80 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
81 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
82 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
83 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
204 struct kvm_pmc *pmc; in pmu_fw_ctr_read_hi() local
217 pmc = &kvpmu->pmc[cidx]; in pmu_fw_ctr_read_hi()
219 if (pmc->cinfo.type != SBI_PMU_CTR_TYPE_FW) in pmu_fw_ctr_read_hi()
222 fevent_code = get_event_code(pmc->event_idx); in pmu_fw_ctr_read_hi()
223 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in pmu_fw_ctr_read_hi()
225 *out_val = pmc->counter_val >> 32; in pmu_fw_ctr_read_hi()
234 struct kvm_pmc *pmc; in pmu_ctr_read() local
243 pmc = &kvpmu->pmc[cidx]; in pmu_ctr_read()
245 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in pmu_ctr_read()
246 fevent_code = get_event_code(pmc->event_idx); in pmu_ctr_read()
247 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in pmu_ctr_read()
248 } else if (pmc->perf_event) { in pmu_ctr_read()
249 pmc->counter_val += perf_event_read_value(pmc->perf_event, &enabled, &running); in pmu_ctr_read()
253 *out_val = pmc->counter_val; in pmu_ctr_read()
272 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_riscv_pmu_overflow() local
273 struct kvm_vcpu *vcpu = pmc->vcpu; in kvm_riscv_pmu_overflow()
303 set_bit(pmc->idx, kvpmu->pmc_overflown); in kvm_riscv_pmu_overflow()
309 static long kvm_pmu_create_perf_event(struct kvm_pmc *pmc, struct perf_event_attr *attr, in kvm_pmu_create_perf_event() argument
315 kvm_pmu_release_perf_event(pmc); in kvm_pmu_create_perf_event()
319 pmc->counter_val = 0; in kvm_pmu_create_perf_event()
326 attr->sample_period = kvm_pmu_get_sample_period(pmc); in kvm_pmu_create_perf_event()
328 event = perf_event_create_kernel_counter(attr, -1, current, kvm_riscv_pmu_overflow, pmc); in kvm_pmu_create_perf_event()
334 pmc->perf_event = event; in kvm_pmu_create_perf_event()
336 perf_event_enable(pmc->perf_event); in kvm_pmu_create_perf_event()
475 retdata->out_val = kvpmu->pmc[cidx].cinfo.value; in kvm_riscv_vcpu_pmu_ctr_info()
486 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_ctr_start() local
514 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_start()
516 pmc->counter_val = ival; in kvm_riscv_vcpu_pmu_ctr_start()
519 pmc->counter_val = kvpmu->sdata->ctr_values[i]; in kvm_riscv_vcpu_pmu_ctr_start()
522 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_start()
523 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_start()
536 kvpmu->fw_event[fevent_code].value = pmc->counter_val; in kvm_riscv_vcpu_pmu_ctr_start()
537 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_start()
538 if (unlikely(pmc->started)) { in kvm_riscv_vcpu_pmu_ctr_start()
542 perf_event_period(pmc->perf_event, kvm_pmu_get_sample_period(pmc)); in kvm_riscv_vcpu_pmu_ctr_start()
543 perf_event_enable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_start()
544 pmc->started = true; in kvm_riscv_vcpu_pmu_ctr_start()
563 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_ctr_stop() local
583 pmc = &kvpmu->pmc[pmc_index]; in kvm_riscv_vcpu_pmu_ctr_stop()
584 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) { in kvm_riscv_vcpu_pmu_ctr_stop()
585 fevent_code = get_event_code(pmc->event_idx); in kvm_riscv_vcpu_pmu_ctr_stop()
595 } else if (pmc->perf_event) { in kvm_riscv_vcpu_pmu_ctr_stop()
596 if (pmc->started) { in kvm_riscv_vcpu_pmu_ctr_stop()
598 perf_event_disable(pmc->perf_event); in kvm_riscv_vcpu_pmu_ctr_stop()
599 pmc->started = false; in kvm_riscv_vcpu_pmu_ctr_stop()
606 kvm_pmu_release_perf_event(pmc); in kvm_riscv_vcpu_pmu_ctr_stop()
612 if (pmc->cinfo.type == SBI_PMU_CTR_TYPE_FW) in kvm_riscv_vcpu_pmu_ctr_stop()
613 pmc->counter_val = kvpmu->fw_event[fevent_code].value; in kvm_riscv_vcpu_pmu_ctr_stop()
614 else if (pmc->perf_event) in kvm_riscv_vcpu_pmu_ctr_stop()
615 pmc->counter_val += perf_event_read_value(pmc->perf_event, in kvm_riscv_vcpu_pmu_ctr_stop()
624 kvpmu->sdata->ctr_values[i] = pmc->counter_val; in kvm_riscv_vcpu_pmu_ctr_stop()
629 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_ctr_stop()
664 struct kvm_pmc *pmc = NULL; in kvm_riscv_vcpu_pmu_ctr_cfg_match() local
711 pmc = &kvpmu->pmc[ctr_idx]; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
712 pmc->idx = ctr_idx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
718 ret = kvm_pmu_create_perf_event(pmc, &attr, flags, eidx, evtdata); in kvm_riscv_vcpu_pmu_ctr_cfg_match()
726 pmc->event_idx = eidx; in kvm_riscv_vcpu_pmu_ctr_cfg_match()
762 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_init() local
799 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_init()
800 pmc->idx = i; in kvm_riscv_vcpu_pmu_init()
801 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_init()
802 pmc->vcpu = vcpu; in kvm_riscv_vcpu_pmu_init()
804 pmc->cinfo.type = SBI_PMU_CTR_TYPE_HW; in kvm_riscv_vcpu_pmu_init()
807 pmc->cinfo.width = 63; in kvm_riscv_vcpu_pmu_init()
809 pmc->cinfo.width = hpm_width; in kvm_riscv_vcpu_pmu_init()
816 pmc->cinfo.csr = CSR_CYCLE + i; in kvm_riscv_vcpu_pmu_init()
818 pmc->cinfo.type = SBI_PMU_CTR_TYPE_FW; in kvm_riscv_vcpu_pmu_init()
819 pmc->cinfo.width = 63; in kvm_riscv_vcpu_pmu_init()
829 struct kvm_pmc *pmc; in kvm_riscv_vcpu_pmu_deinit() local
836 pmc = &kvpmu->pmc[i]; in kvm_riscv_vcpu_pmu_deinit()
837 pmc->counter_val = 0; in kvm_riscv_vcpu_pmu_deinit()
838 kvm_pmu_release_perf_event(pmc); in kvm_riscv_vcpu_pmu_deinit()
839 pmc->event_idx = SBI_PMU_EVENT_IDX_INVALID; in kvm_riscv_vcpu_pmu_deinit()