Lines Matching full:pmc

58  * - There are three types of index to access perf counters (PMC):
68 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
69 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
71 * between pmc and perf counters is as the following:
99 static inline void __kvm_perf_overflow(struct kvm_pmc *pmc, bool in_pmi) in __kvm_perf_overflow() argument
101 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in __kvm_perf_overflow()
104 if (pmc->perf_event && pmc->perf_event->attr.precise_ip) { in __kvm_perf_overflow()
119 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); in __kvm_perf_overflow()
122 if (pmc->intr && !skip_pmi) in __kvm_perf_overflow()
123 kvm_make_request(KVM_REQ_PMI, pmc->vcpu); in __kvm_perf_overflow()
130 struct kvm_pmc *pmc = perf_event->overflow_handler_context; in kvm_perf_overflow() local
137 if (test_and_set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi)) in kvm_perf_overflow()
140 __kvm_perf_overflow(pmc, true); in kvm_perf_overflow()
142 kvm_make_request(KVM_REQ_PMU, pmc->vcpu); in kvm_perf_overflow()
145 static u64 pmc_get_pebs_precise_level(struct kvm_pmc *pmc) in pmc_get_pebs_precise_level() argument
154 if ((pmc->idx == 0 && x86_match_cpu(vmx_pebs_pdist_cpu)) || in pmc_get_pebs_precise_level()
155 (pmc->idx == 32 && x86_match_cpu(vmx_pebs_pdir_cpu))) in pmc_get_pebs_precise_level()
167 static u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) in get_sample_period() argument
169 u64 sample_period = (-counter_value) & pmc_bitmask(pmc); in get_sample_period()
172 sample_period = pmc_bitmask(pmc) + 1; in get_sample_period()
176 static int pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, u64 config, in pmc_reprogram_counter() argument
180 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in pmc_reprogram_counter()
192 bool pebs = test_bit(pmc->idx, (unsigned long *)&pmu->pebs_enable); in pmc_reprogram_counter()
194 attr.sample_period = get_sample_period(pmc, pmc->counter); in pmc_reprogram_counter()
212 attr.precise_ip = pmc_get_pebs_precise_level(pmc); in pmc_reprogram_counter()
216 kvm_perf_overflow, pmc); in pmc_reprogram_counter()
218 pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", in pmc_reprogram_counter()
219 PTR_ERR(event), pmc->idx); in pmc_reprogram_counter()
223 pmc->perf_event = event; in pmc_reprogram_counter()
224 pmc_to_pmu(pmc)->event_count++; in pmc_reprogram_counter()
225 pmc->is_paused = false; in pmc_reprogram_counter()
226 pmc->intr = intr || pebs; in pmc_reprogram_counter()
230 static bool pmc_pause_counter(struct kvm_pmc *pmc) in pmc_pause_counter() argument
232 u64 counter = pmc->counter; in pmc_pause_counter()
236 if (pmc->perf_event && !pmc->is_paused) in pmc_pause_counter()
237 counter += perf_event_pause(pmc->perf_event, true); in pmc_pause_counter()
245 prev_counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
247 counter += pmc->emulated_counter; in pmc_pause_counter()
248 pmc->counter = counter & pmc_bitmask(pmc); in pmc_pause_counter()
250 pmc->emulated_counter = 0; in pmc_pause_counter()
251 pmc->is_paused = true; in pmc_pause_counter()
253 return pmc->counter < prev_counter; in pmc_pause_counter()
256 static bool pmc_resume_counter(struct kvm_pmc *pmc) in pmc_resume_counter() argument
258 if (!pmc->perf_event) in pmc_resume_counter()
262 if (is_sampling_event(pmc->perf_event) && in pmc_resume_counter()
263 perf_event_period(pmc->perf_event, in pmc_resume_counter()
264 get_sample_period(pmc, pmc->counter))) in pmc_resume_counter()
267 if (test_bit(pmc->idx, (unsigned long *)&pmc_to_pmu(pmc)->pebs_enable) != in pmc_resume_counter()
268 (!!pmc->perf_event->attr.precise_ip)) in pmc_resume_counter()
272 perf_event_enable(pmc->perf_event); in pmc_resume_counter()
273 pmc->is_paused = false; in pmc_resume_counter()
278 static void pmc_release_perf_event(struct kvm_pmc *pmc) in pmc_release_perf_event() argument
280 if (pmc->perf_event) { in pmc_release_perf_event()
281 perf_event_release_kernel(pmc->perf_event); in pmc_release_perf_event()
282 pmc->perf_event = NULL; in pmc_release_perf_event()
283 pmc->current_config = 0; in pmc_release_perf_event()
284 pmc_to_pmu(pmc)->event_count--; in pmc_release_perf_event()
288 static void pmc_stop_counter(struct kvm_pmc *pmc) in pmc_stop_counter() argument
290 if (pmc->perf_event) { in pmc_stop_counter()
291 pmc->counter = pmc_read_counter(pmc); in pmc_stop_counter()
292 pmc_release_perf_event(pmc); in pmc_stop_counter()
296 static void pmc_update_sample_period(struct kvm_pmc *pmc) in pmc_update_sample_period() argument
298 if (!pmc->perf_event || pmc->is_paused || in pmc_update_sample_period()
299 !is_sampling_event(pmc->perf_event)) in pmc_update_sample_period()
302 perf_event_period(pmc->perf_event, in pmc_update_sample_period()
303 get_sample_period(pmc, pmc->counter)); in pmc_update_sample_period()
306 void pmc_write_counter(struct kvm_pmc *pmc, u64 val) in pmc_write_counter() argument
316 pmc->emulated_counter = 0; in pmc_write_counter()
317 pmc->counter += val - pmc_read_counter(pmc); in pmc_write_counter()
318 pmc->counter &= pmc_bitmask(pmc); in pmc_write_counter()
319 pmc_update_sample_period(pmc); in pmc_write_counter()
429 static bool check_pmu_event_filter(struct kvm_pmc *pmc) in check_pmu_event_filter() argument
432 struct kvm *kvm = pmc->vcpu->kvm; in check_pmu_event_filter()
438 if (pmc_is_gp(pmc)) in check_pmu_event_filter()
439 return is_gp_event_allowed(filter, pmc->eventsel); in check_pmu_event_filter()
441 return is_fixed_event_allowed(filter, pmc->idx); in check_pmu_event_filter()
444 static bool pmc_event_is_allowed(struct kvm_pmc *pmc) in pmc_event_is_allowed() argument
446 return pmc_is_globally_enabled(pmc) && pmc_speculative_in_use(pmc) && in pmc_event_is_allowed()
447 check_pmu_event_filter(pmc); in pmc_event_is_allowed()
450 static int reprogram_counter(struct kvm_pmc *pmc) in reprogram_counter() argument
452 struct kvm_pmu *pmu = pmc_to_pmu(pmc); in reprogram_counter()
453 u64 eventsel = pmc->eventsel; in reprogram_counter()
458 emulate_overflow = pmc_pause_counter(pmc); in reprogram_counter()
460 if (!pmc_event_is_allowed(pmc)) in reprogram_counter()
464 __kvm_perf_overflow(pmc, false); in reprogram_counter()
469 if (pmc_is_fixed(pmc)) { in reprogram_counter()
471 pmc->idx - KVM_FIXED_PMC_BASE_IDX); in reprogram_counter()
481 if (pmc->current_config == new_config && pmc_resume_counter(pmc)) in reprogram_counter()
484 pmc_release_perf_event(pmc); in reprogram_counter()
486 pmc->current_config = new_config; in reprogram_counter()
488 return pmc_reprogram_counter(pmc, PERF_TYPE_RAW, in reprogram_counter()
499 struct kvm_pmc *pmc; in kvm_pmu_handle_event() local
512 kvm_for_each_pmc(pmu, pmc, bit, bitmap) { in kvm_pmu_handle_event()
519 if (reprogram_counter(pmc)) in kvm_pmu_handle_event()
520 set_bit(pmc->idx, pmu->reprogram_pmi); in kvm_pmu_handle_event()
540 * i.e. an invalid PMC results in a #GP, not #VMEXIT. in kvm_pmu_check_rdpmc_early()
585 struct kvm_pmc *pmc; in kvm_pmu_rdpmc() local
594 pmc = kvm_pmu_call(rdpmc_ecx_to_pmc)(vcpu, idx, &mask); in kvm_pmu_rdpmc()
595 if (!pmc) in kvm_pmu_rdpmc()
603 *data = pmc_read_counter(pmc) & mask; in kvm_pmu_rdpmc()
632 struct kvm_pmc *pmc = kvm_pmu_call(msr_idx_to_pmc)(vcpu, msr); in kvm_pmu_mark_pmc_in_use() local
634 if (pmc) in kvm_pmu_mark_pmc_in_use()
635 __set_bit(pmc->idx, pmu->pmc_in_use); in kvm_pmu_mark_pmc_in_use()
725 struct kvm_pmc *pmc; in kvm_pmu_reset() local
732 kvm_for_each_pmc(pmu, pmc, i, pmu->all_valid_pmc_idx) { in kvm_pmu_reset()
733 pmc_stop_counter(pmc); in kvm_pmu_reset()
734 pmc->counter = 0; in kvm_pmu_reset()
735 pmc->emulated_counter = 0; in kvm_pmu_reset()
737 if (pmc_is_gp(pmc)) in kvm_pmu_reset()
738 pmc->eventsel = 0; in kvm_pmu_reset()
806 struct kvm_pmc *pmc = NULL; in kvm_pmu_cleanup() local
815 kvm_for_each_pmc(pmu, pmc, i, bitmask) { in kvm_pmu_cleanup()
816 if (pmc->perf_event && !pmc_speculative_in_use(pmc)) in kvm_pmu_cleanup()
817 pmc_stop_counter(pmc); in kvm_pmu_cleanup()
830 static void kvm_pmu_incr_counter(struct kvm_pmc *pmc) in kvm_pmu_incr_counter() argument
832 pmc->emulated_counter++; in kvm_pmu_incr_counter()
833 kvm_pmu_request_counter_reprogram(pmc); in kvm_pmu_incr_counter()
836 static inline bool cpl_is_matched(struct kvm_pmc *pmc) in cpl_is_matched() argument
841 if (pmc_is_gp(pmc)) { in cpl_is_matched()
842 config = pmc->eventsel; in cpl_is_matched()
846 config = fixed_ctrl_field(pmc_to_pmu(pmc)->fixed_ctr_ctrl, in cpl_is_matched()
847 pmc->idx - KVM_FIXED_PMC_BASE_IDX); in cpl_is_matched()
859 return (kvm_x86_call(get_cpl)(pmc->vcpu) == 0) ? select_os : in cpl_is_matched()
867 struct kvm_pmc *pmc; in kvm_pmu_trigger_event() local
878 kvm_for_each_pmc(pmu, pmc, i, bitmap) { in kvm_pmu_trigger_event()
893 if (((pmc->eventsel ^ eventsel) & AMD64_RAW_EVENT_MASK_NB) || in kvm_pmu_trigger_event()
894 !pmc_event_is_allowed(pmc) || !cpl_is_matched(pmc)) in kvm_pmu_trigger_event()
897 kvm_pmu_incr_counter(pmc); in kvm_pmu_trigger_event()