Lines Matching full:fault

257 	 * Maximum page size that can be created for this fault; input to
299 int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
305 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
306 * RET_PF_RETRY: let CPU fault again on the address.
307 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
310 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
339 struct kvm_page_fault *fault) in kvm_mmu_prepare_memory_fault_exit() argument
341 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit()
342 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit()
343 fault->is_private); in kvm_mmu_prepare_memory_fault_exit()
350 struct kvm_page_fault fault = { in kvm_mmu_do_page_fault() local
376 * fault.addr can be used when the shared bit is needed. in kvm_mmu_do_page_fault()
378 fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm); in kvm_mmu_do_page_fault()
379 fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn); in kvm_mmu_do_page_fault()
386 if (IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) && fault.is_tdp) in kvm_mmu_do_page_fault()
387 r = kvm_tdp_page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
389 r = vcpu->arch.mmu->page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
396 if (r == RET_PF_EMULATE && fault.is_private) { in kvm_mmu_do_page_fault()
398 kvm_mmu_prepare_memory_fault_exit(vcpu, &fault); in kvm_mmu_do_page_fault()
402 if (fault.write_fault_to_shadow_pgtable && emulation_type) in kvm_mmu_do_page_fault()
405 *level = fault.goal_level; in kvm_mmu_do_page_fault()
412 void kvm_mmu_hugepage_adjust(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
413 void disallowed_hugepage_adjust(struct kvm_page_fault *fault, u64 spte, int cur_level);