Lines Matching full:mmu

14 	struct kvm_s2_mmu	*mmu;  member
19 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument
23 struct kvm_s2_mmu *host_s2_mmu = &host_mmu.arch.mmu; in enter_vmid_context()
29 cxt->mmu = NULL; in enter_vmid_context()
62 if (mmu == vcpu->arch.hw_mmu || WARN_ON(mmu != host_s2_mmu)) in enter_vmid_context()
65 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
68 if (mmu == host_s2_mmu) in enter_vmid_context()
71 cxt->mmu = host_s2_mmu; in enter_vmid_context()
81 * We're guaranteed that the host S1 MMU is enabled, so in enter_vmid_context()
83 * TLB fill. For guests, we ensure that the S1 MMU is in enter_vmid_context()
99 /* The host S1 MMU is always enabled. */ in enter_vmid_context()
113 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in enter_vmid_context()
120 struct kvm_s2_mmu *mmu = cxt->mmu; in exit_vmid_context() local
127 if (!mmu) in exit_vmid_context()
131 __load_stage2(mmu, kern_hyp_va(mmu->arch)); in exit_vmid_context()
148 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
154 enter_vmid_context(mmu, &cxt, false); in __kvm_tlb_flush_vmid_ipa()
178 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa_nsh() argument
184 enter_vmid_context(mmu, &cxt, true); in __kvm_tlb_flush_vmid_ipa_nsh()
208 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_range() argument
222 enter_vmid_context(mmu, &cxt, false); in __kvm_tlb_flush_vmid_range()
235 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
240 enter_vmid_context(mmu, &cxt, false); in __kvm_tlb_flush_vmid()
249 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument
254 enter_vmid_context(mmu, &cxt, false); in __kvm_flush_cpu_context()