Lines Matching +full:page +full:- +full:level

1 /* SPDX-License-Identifier: GPL-2.0 */
17 /* Page table builder macros common to shadow (host) PTEs and guest PTEs. */
19 #define __PT_LEVEL_SHIFT(level, bits_per_level) \ argument
20 (PAGE_SHIFT + ((level) - 1) * (bits_per_level))
21 #define __PT_INDEX(address, level, bits_per_level) \ argument
22 (((address) >> __PT_LEVEL_SHIFT(level, bits_per_level)) & ((1 << (bits_per_level)) - 1))
24 #define __PT_LVL_ADDR_MASK(base_addr_mask, level, bits_per_level) \ argument
25 ((base_addr_mask) & ~((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
27 #define __PT_LVL_OFFSET_MASK(base_addr_mask, level, bits_per_level) \ argument
28 ((base_addr_mask) & ((1ULL << (PAGE_SHIFT + (((level) - 1) * (bits_per_level)))) - 1))
34 * bit, and thus are guaranteed to be non-zero when valid. And, when a guest
57 * 64-bit kernels, keep it that way unless there's a reason not to.
72 * The shadow page can't be replaced by an equivalent huge page
73 * because it is being used to map an executable page in the guest
74 * and the NX huge page mitigation is enabled.
79 * The following two entries are used to key the shadow page in the
89 * SPTE. KVM shadows two types of guest translations: nGPA -> GPA
90 * (shadow EPT/NPT) and GVA -> GPA (traditional shadow paging). In both
112 * visited this page.
117 * Page table page of external PT.
130 * huge page. A shadow page will have nx_huge_page_disallowed set but
131 * not be on the list if a huge page is disallowed for other reasons,
138 * Used out of the mmu-lock to avoid reading spte values while an
145 /* Used for freeing the page asynchronously if it is a TDP MMU page. */
159 return kvm_mmu_role_as_id(sp->role); in kvm_mmu_page_as_id()
164 return sp->role.is_mirror; in is_mirror_sp()
171 * TDX module will initialize the page by itself. in kvm_mmu_alloc_external_spt()
173 * KVM only interacts with sp->spt for private EPT operations. in kvm_mmu_alloc_external_spt()
175 sp->external_spt = kvm_mmu_memory_cache_alloc(&vcpu->arch.mmu_external_spt_cache); in kvm_mmu_alloc_external_spt()
182 * at its "natural" GFN, no mask needs to be applied to them - and, dually, in kvm_gfn_root_bits()
193 * When using the EPT page-modification log, the GPAs in the CPU dirty in kvm_mmu_page_ad_need_write_protect()
197 * being enabled is mandatory as the bits used to denote WP-only SPTEs in kvm_mmu_page_ad_need_write_protect()
198 * are reserved for PAE paging (32-bit KVM). in kvm_mmu_page_ad_need_write_protect()
200 return kvm_x86_ops.cpu_dirty_log_size && sp->role.guest_mode; in kvm_mmu_page_ad_need_write_protect()
203 static inline gfn_t gfn_round_for_level(gfn_t gfn, int level) in gfn_round_for_level() argument
205 return gfn & -KVM_PAGES_PER_HPAGE(level); in gfn_round_for_level()
217 /* Flush the given page (huge or not) of guest memory. */
218 static inline void kvm_flush_remote_tlbs_gfn(struct kvm *kvm, gfn_t gfn, int level) in kvm_flush_remote_tlbs_gfn() argument
220 kvm_flush_remote_tlbs_range(kvm, gfn_round_for_level(gfn, level), in kvm_flush_remote_tlbs_gfn()
221 KVM_PAGES_PER_HPAGE(level)); in kvm_flush_remote_tlbs_gfn()
229 return READ_ONCE(nx_huge_pages) && !kvm->arch.disable_nx_huge_pages; in is_nx_huge_page_enabled()
257 * Maximum page size that can be created for this fault; input to
263 * Page size that can be created based on the max_level and the
264 * page size used by the host mapping.
269 * Page size that will be created based on the req_level and
275 * Shifted addr, or result of guest page table walk if addr is a gva. In
288 struct page *refcounted_page;
294 * is changing its own translation in the guest page tables.
305 * RET_PF_CONTINUE: So far, so good, keep handling the page fault.
307 * RET_PF_EMULATE: mmio page fault, emulate the instruction directly.
308 * RET_PF_WRITE_PROTECTED: the gfn is write-protected, either unprotected the
310 * RET_PF_INVALID: the spte is invalid, let the real page fault path update it.
318 * on -errno return values.
332 * - efficient machine code when checking for CONTINUE, e.g.
333 * "TEST %rax, %rax, JNZ", as all "stop!" values are non-zero,
334 * - kvm_mmu_do_page_fault() to return other RET_PF_* as a positive value.
341 kvm_prepare_memory_fault_exit(vcpu, fault->gfn << PAGE_SHIFT, in kvm_mmu_prepare_memory_fault_exit()
342 PAGE_SIZE, fault->write, fault->exec, in kvm_mmu_prepare_memory_fault_exit()
343 fault->is_private); in kvm_mmu_prepare_memory_fault_exit()
348 int *emulation_type, u8 *level) in kvm_mmu_do_page_fault() argument
359 .is_tdp = likely(vcpu->arch.mmu->page_fault == kvm_tdp_page_fault), in kvm_mmu_do_page_fault()
361 is_nx_huge_page_enabled(vcpu->kvm), in kvm_mmu_do_page_fault()
372 if (vcpu->arch.mmu->root_role.direct) { in kvm_mmu_do_page_fault()
378 fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_direct_bits(vcpu->kvm); in kvm_mmu_do_page_fault()
389 r = vcpu->arch.mmu->page_fault(vcpu, &fault); in kvm_mmu_do_page_fault()
399 return -EFAULT; in kvm_mmu_do_page_fault()
404 if (level) in kvm_mmu_do_page_fault()
405 *level = fault.goal_level; in kvm_mmu_do_page_fault()