Lines Matching full:mmu

14 	struct kvm_s2_mmu	*mmu;  member
20 static void enter_vmid_context(struct kvm_s2_mmu *mmu, in enter_vmid_context() argument
28 if (vcpu && mmu != vcpu->arch.hw_mmu) in enter_vmid_context()
29 cxt->mmu = vcpu->arch.hw_mmu; in enter_vmid_context()
31 cxt->mmu = NULL; in enter_vmid_context()
41 * allocate IPA->PA walks, so we enable the S1 MMU... in enter_vmid_context()
63 __load_stage2(mmu, mmu->arch); in enter_vmid_context()
79 /* ... and the stage-2 MMU context that we switched away from */ in exit_vmid_context()
80 if (cxt->mmu) in exit_vmid_context()
81 __load_stage2(cxt->mmu, cxt->mmu->arch); in exit_vmid_context()
92 void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa() argument
100 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa()
124 void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_ipa_nsh() argument
132 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid_ipa_nsh()
156 void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu, in __kvm_tlb_flush_vmid_range() argument
172 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid_range()
185 void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu) in __kvm_tlb_flush_vmid() argument
192 enter_vmid_context(mmu, &cxt); in __kvm_tlb_flush_vmid()
201 void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu) in __kvm_flush_cpu_context() argument
206 enter_vmid_context(mmu, &cxt); in __kvm_flush_cpu_context()
232 int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding) in __kvm_tlbi_s1e2() argument
244 if (mmu) in __kvm_tlbi_s1e2()
245 enter_vmid_context(mmu, &cxt); in __kvm_tlbi_s1e2()
364 if (mmu) in __kvm_tlbi_s1e2()