Lines Matching +full:re +full:- +full:started
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
27 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in enter_vmid_context()
28 vcpu = host_ctxt->__hyp_running_vcpu; in enter_vmid_context()
29 cxt->mmu = NULL; in enter_vmid_context()
34 * - ensure that the page table updates are visible to all in enter_vmid_context()
35 * CPUs, for which a dsb(DOMAIN-st) is what we need, DOMAIN in enter_vmid_context()
39 * - complete any speculative page table walk started before in enter_vmid_context()
45 * Inner-Shareable and Non-Shareable, as specified by the in enter_vmid_context()
54 * If we're already in the desired context, then there's nothing to do. in enter_vmid_context()
58 * We're in guest context. However, for this to work, this needs in enter_vmid_context()
62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context()
65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
67 /* We're in host context. */ in enter_vmid_context()
71 cxt->mmu = host_s2_mmu; in enter_vmid_context()
79 * avoid a Stage-1 walk with the old VMID while we have in enter_vmid_context()
81 * We're guaranteed that the host S1 MMU is enabled, so in enter_vmid_context()
86 val = cxt->tcr = read_sysreg_el1(SYS_TCR); in enter_vmid_context()
92 val = cxt->sctlr = read_sysreg_el1(SYS_SCTLR); in enter_vmid_context()
100 cxt->sctlr = SCTLR_ELx_M; in enter_vmid_context()
113 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in enter_vmid_context()
120 struct kvm_s2_mmu *mmu = cxt->mmu; in exit_vmid_context()
124 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; in exit_vmid_context()
125 vcpu = host_ctxt->__hyp_running_vcpu; in exit_vmid_context()
131 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in exit_vmid_context()
139 if (!(cxt->sctlr & SCTLR_ELx_M)) { in exit_vmid_context()
140 write_sysreg_el1(cxt->sctlr, SYS_SCTLR); in exit_vmid_context()
144 write_sysreg_el1(cxt->tcr, SYS_TCR); in exit_vmid_context()
158 * Instead, we invalidate Stage-2 for this IPA, and the in __kvm_tlb_flush_vmid_ipa()
159 * whole of Stage-1. Weep... in __kvm_tlb_flush_vmid_ipa()
165 * We have to ensure completion of the invalidation at Stage-2, in __kvm_tlb_flush_vmid_ipa()
167 * complete (S1 + S2) walk based on the old Stage-2 mapping if in __kvm_tlb_flush_vmid_ipa()
168 * the Stage-1 invalidation happened first. in __kvm_tlb_flush_vmid_ipa()
188 * Instead, we invalidate Stage-2 for this IPA, and the in __kvm_tlb_flush_vmid_ipa_nsh()
189 * whole of Stage-1. Weep... in __kvm_tlb_flush_vmid_ipa_nsh()
195 * We have to ensure completion of the invalidation at Stage-2, in __kvm_tlb_flush_vmid_ipa_nsh()
197 * complete (S1 + S2) walk based on the old Stage-2 mapping if in __kvm_tlb_flush_vmid_ipa_nsh()
198 * the Stage-1 invalidation happened first. in __kvm_tlb_flush_vmid_ipa_nsh()