Lines Matching +full:spe +full:- +full:pmu

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
8 #include <hyp/sysreg-sr.h>
10 #include <linux/arm-smccc.h>
26 #include <asm/debug-monitors.h>
31 /* Non-VHE specific context */
99 ___activate_traps(vcpu, vcpu->arch.hcr_el2); in __activate_traps()
106 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; in __activate_traps()
145 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2); in __deactivate_traps()
151 /* Save VGICv3 state on non-VHE systems */
155 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_save_state()
156 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_save_state()
160 /* Restore VGICv3 state on non-VHE systems */
164 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_restore_state()
165 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); in __hyp_vgic_restore_state()
175 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; in __pmu_switch_to_guest() local
177 if (pmu->events_host) in __pmu_switch_to_guest()
178 write_sysreg(pmu->events_host, pmcntenclr_el0); in __pmu_switch_to_guest()
180 if (pmu->events_guest) in __pmu_switch_to_guest()
181 write_sysreg(pmu->events_guest, pmcntenset_el0); in __pmu_switch_to_guest()
183 return (pmu->events_host || pmu->events_guest); in __pmu_switch_to_guest()
191 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events; in __pmu_switch_to_host() local
193 if (pmu->events_guest) in __pmu_switch_to_host()
194 write_sysreg(pmu->events_guest, pmcntenclr_el0); in __pmu_switch_to_host()
196 if (pmu->events_host) in __pmu_switch_to_host()
197 write_sysreg(pmu->events_host, pmcntenset_el0); in __pmu_switch_to_host()
268 * As we have caught the guest red-handed, decide that it isn't in fixup_guest_exit()
270 * can try and fix it by re-initializing the vcpu with in fixup_guest_exit()
282 /* Switch to the guest for legacy non-VHE systems */
303 host_ctxt->__hyp_running_vcpu = vcpu; in __kvm_vcpu_run()
304 guest_ctxt = &vcpu->arch.ctxt; in __kvm_vcpu_run()
310 * We must flush and disable the SPE buffer for nVHE, as in __kvm_vcpu_run()
320 * ongoing page-table walks that have started before we in __kvm_vcpu_run()
322 * above disabling of SPE and TRBE. in __kvm_vcpu_run()
324 * See DDI0487I.a D8.1.5 "Out-of-context translation regimes", in __kvm_vcpu_run()
332 * We must restore the 32-bit state before the sysregs, thanks in __kvm_vcpu_run()
333 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). in __kvm_vcpu_run()
336 * and #1319367 (A72), we must ensure that all VM-related sysreg are in __kvm_vcpu_run()
342 mmu = kern_hyp_va(vcpu->arch.hw_mmu); in __kvm_vcpu_run()
343 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in __kvm_vcpu_run()
380 * This must come after restoring the host sysregs, since a non-VHE in __kvm_vcpu_run()
381 * system may enable SPE here and make use of the TTBRs. in __kvm_vcpu_run()
392 host_ctxt->__hyp_running_vcpu = NULL; in __kvm_vcpu_run()
406 vcpu = host_ctxt->__hyp_running_vcpu; in hyp_panic()