Lines Matching full:pmc

25 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc);
26 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc);
27 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc);
29 static struct kvm_vcpu *kvm_pmc_to_vcpu(const struct kvm_pmc *pmc) in kvm_pmc_to_vcpu() argument
31 return container_of(pmc, struct kvm_vcpu, arch.pmu.pmc[pmc->idx]); in kvm_pmc_to_vcpu()
36 return &vcpu->arch.pmu.pmc[cnt_idx]; in kvm_vcpu_idx_to_pmc()
81 * @pmc: counter context
83 static bool kvm_pmc_is_64bit(struct kvm_pmc *pmc) in kvm_pmc_is_64bit() argument
85 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_is_64bit()
87 return (pmc->idx == ARMV8_PMU_CYCLE_IDX || in kvm_pmc_is_64bit()
91 static bool kvm_pmc_has_64bit_overflow(struct kvm_pmc *pmc) in kvm_pmc_has_64bit_overflow() argument
93 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_has_64bit_overflow()
96 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmc_has_64bit_overflow()
99 return (pmc->idx < ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LP)) || in kvm_pmc_has_64bit_overflow()
100 (pmc->idx == ARMV8_PMU_CYCLE_IDX && (val & ARMV8_PMU_PMCR_LC)); in kvm_pmc_has_64bit_overflow()
103 static bool kvm_pmu_counter_can_chain(struct kvm_pmc *pmc) in kvm_pmu_counter_can_chain() argument
105 return (!(pmc->idx & 1) && (pmc->idx + 1) < ARMV8_PMU_CYCLE_IDX && in kvm_pmu_counter_can_chain()
106 !kvm_pmc_has_64bit_overflow(pmc)); in kvm_pmu_counter_can_chain()
119 static u64 kvm_pmc_read_evtreg(const struct kvm_pmc *pmc) in kvm_pmc_read_evtreg() argument
121 return __vcpu_sys_reg(kvm_pmc_to_vcpu(pmc), counter_index_to_evtreg(pmc->idx)); in kvm_pmc_read_evtreg()
124 static u64 kvm_pmu_get_pmc_value(struct kvm_pmc *pmc) in kvm_pmu_get_pmc_value() argument
126 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_get_pmc_value()
129 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_get_pmc_value()
136 if (pmc->perf_event) in kvm_pmu_get_pmc_value()
137 counter += perf_event_read_value(pmc->perf_event, &enabled, in kvm_pmu_get_pmc_value()
140 if (!kvm_pmc_is_64bit(pmc)) in kvm_pmu_get_pmc_value()
159 static void kvm_pmu_set_pmc_value(struct kvm_pmc *pmc, u64 val, bool force) in kvm_pmu_set_pmc_value() argument
161 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_set_pmc_value()
164 kvm_pmu_release_perf_event(pmc); in kvm_pmu_set_pmc_value()
166 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_set_pmc_value()
168 if (vcpu_mode_is_32bit(vcpu) && pmc->idx != ARMV8_PMU_CYCLE_IDX && in kvm_pmu_set_pmc_value()
183 kvm_pmu_create_perf_event(pmc); in kvm_pmu_set_pmc_value()
202 * @pmc: The PMU counter pointer
204 static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) in kvm_pmu_release_perf_event() argument
206 if (pmc->perf_event) { in kvm_pmu_release_perf_event()
207 perf_event_disable(pmc->perf_event); in kvm_pmu_release_perf_event()
208 perf_event_release_kernel(pmc->perf_event); in kvm_pmu_release_perf_event()
209 pmc->perf_event = NULL; in kvm_pmu_release_perf_event()
215 * @pmc: The PMU counter pointer
219 static void kvm_pmu_stop_counter(struct kvm_pmc *pmc) in kvm_pmu_stop_counter() argument
221 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_stop_counter()
224 if (!pmc->perf_event) in kvm_pmu_stop_counter()
227 val = kvm_pmu_get_pmc_value(pmc); in kvm_pmu_stop_counter()
229 reg = counter_index_to_reg(pmc->idx); in kvm_pmu_stop_counter()
233 kvm_pmu_release_perf_event(pmc); in kvm_pmu_stop_counter()
247 pmu->pmc[i].idx = i; in kvm_pmu_vcpu_init()
331 static void kvm_pmc_enable_perf_event(struct kvm_pmc *pmc) in kvm_pmc_enable_perf_event() argument
333 if (!pmc->perf_event) { in kvm_pmc_enable_perf_event()
334 kvm_pmu_create_perf_event(pmc); in kvm_pmc_enable_perf_event()
338 perf_event_enable(pmc->perf_event); in kvm_pmc_enable_perf_event()
339 if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) in kvm_pmc_enable_perf_event()
343 static void kvm_pmc_disable_perf_event(struct kvm_pmc *pmc) in kvm_pmc_disable_perf_event() argument
345 if (pmc->perf_event) in kvm_pmc_disable_perf_event()
346 perf_event_disable(pmc->perf_event); in kvm_pmc_disable_perf_event()
357 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_reprogram_counter_mask() local
362 if (kvm_pmu_counter_is_enabled(pmc)) in kvm_pmu_reprogram_counter_mask()
363 kvm_pmc_enable_perf_event(pmc); in kvm_pmu_reprogram_counter_mask()
365 kvm_pmc_disable_perf_event(pmc); in kvm_pmu_reprogram_counter_mask()
499 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_counter_increment() local
510 if (!kvm_pmc_is_64bit(pmc)) in kvm_pmu_counter_increment()
515 if (kvm_pmc_has_64bit_overflow(pmc) ? reg : lower_32_bits(reg)) in kvm_pmu_counter_increment()
521 if (kvm_pmu_counter_can_chain(pmc)) in kvm_pmu_counter_increment()
528 static u64 compute_period(struct kvm_pmc *pmc, u64 counter) in compute_period() argument
532 if (kvm_pmc_is_64bit(pmc) && kvm_pmc_has_64bit_overflow(pmc)) in compute_period()
547 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_pmu_perf_overflow() local
549 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_perf_overflow()
550 int idx = pmc->idx; in kvm_pmu_perf_overflow()
559 period = compute_period(pmc, local64_read(&perf_event->count)); in kvm_pmu_perf_overflow()
567 if (kvm_pmu_counter_can_chain(pmc)) in kvm_pmu_perf_overflow()
633 static bool kvm_pmu_counter_is_enabled(struct kvm_pmc *pmc) in kvm_pmu_counter_is_enabled() argument
635 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_counter_is_enabled()
638 if (!(__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(pmc->idx))) in kvm_pmu_counter_is_enabled()
641 if (kvm_pmu_counter_is_hyp(vcpu, pmc->idx)) in kvm_pmu_counter_is_enabled()
647 static bool kvm_pmc_counts_at_el0(struct kvm_pmc *pmc) in kvm_pmc_counts_at_el0() argument
649 u64 evtreg = kvm_pmc_read_evtreg(pmc); in kvm_pmc_counts_at_el0()
656 static bool kvm_pmc_counts_at_el1(struct kvm_pmc *pmc) in kvm_pmc_counts_at_el1() argument
658 u64 evtreg = kvm_pmc_read_evtreg(pmc); in kvm_pmc_counts_at_el1()
665 static bool kvm_pmc_counts_at_el2(struct kvm_pmc *pmc) in kvm_pmc_counts_at_el2() argument
667 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmc_counts_at_el2()
670 if (!kvm_pmu_counter_is_hyp(vcpu, pmc->idx) && (mdcr & MDCR_EL2_HPMD)) in kvm_pmc_counts_at_el2()
673 return kvm_pmc_read_evtreg(pmc) & ARMV8_PMU_INCLUDE_EL2; in kvm_pmc_counts_at_el2()
678 * @pmc: Counter context
680 static void kvm_pmu_create_perf_event(struct kvm_pmc *pmc) in kvm_pmu_create_perf_event() argument
682 struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); in kvm_pmu_create_perf_event()
688 evtreg = kvm_pmc_read_evtreg(pmc); in kvm_pmu_create_perf_event()
690 kvm_pmu_stop_counter(pmc); in kvm_pmu_create_perf_event()
691 if (pmc->idx == ARMV8_PMU_CYCLE_IDX) in kvm_pmu_create_perf_event()
716 attr.disabled = !kvm_pmu_counter_is_enabled(pmc); in kvm_pmu_create_perf_event()
717 attr.exclude_user = !kvm_pmc_counts_at_el0(pmc); in kvm_pmu_create_perf_event()
727 attr.exclude_kernel = !kvm_pmc_counts_at_el2(pmc); in kvm_pmu_create_perf_event()
729 attr.exclude_kernel = !kvm_pmc_counts_at_el1(pmc); in kvm_pmu_create_perf_event()
736 if (kvm_pmc_is_64bit(pmc)) in kvm_pmu_create_perf_event()
739 attr.sample_period = compute_period(pmc, kvm_pmu_get_pmc_value(pmc)); in kvm_pmu_create_perf_event()
742 kvm_pmu_perf_overflow, pmc); in kvm_pmu_create_perf_event()
750 pmc->perf_event = event; in kvm_pmu_create_perf_event()
759 * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
766 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, select_idx); in kvm_pmu_set_counter_event_type() local
772 reg = counter_index_to_evtreg(pmc->idx); in kvm_pmu_set_counter_event_type()
775 kvm_pmu_create_perf_event(pmc); in kvm_pmu_set_counter_event_type()
1239 struct kvm_pmc *pmc = kvm_vcpu_idx_to_pmc(vcpu, i); in kvm_pmu_nested_transition() local
1246 if (kvm_pmc_counts_at_el1(pmc) == kvm_pmc_counts_at_el2(pmc)) in kvm_pmu_nested_transition()
1249 kvm_pmu_create_perf_event(pmc); in kvm_pmu_nested_transition()