1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_MMU_H
3 #define __KVM_X86_MMU_H
4 
5 #include <linux/kvm_host.h>
6 #include "kvm_cache_regs.h"
7 #include "x86.h"
8 #include "cpuid.h"
9 
10 extern bool __read_mostly enable_mmio_caching;
11 
12 #define PT_WRITABLE_SHIFT 1
13 #define PT_USER_SHIFT 2
14 
15 #define PT_PRESENT_MASK (1ULL << 0)
16 #define PT_WRITABLE_MASK (1ULL << PT_WRITABLE_SHIFT)
17 #define PT_USER_MASK (1ULL << PT_USER_SHIFT)
18 #define PT_PWT_MASK (1ULL << 3)
19 #define PT_PCD_MASK (1ULL << 4)
20 #define PT_ACCESSED_SHIFT 5
21 #define PT_ACCESSED_MASK (1ULL << PT_ACCESSED_SHIFT)
22 #define PT_DIRTY_SHIFT 6
23 #define PT_DIRTY_MASK (1ULL << PT_DIRTY_SHIFT)
24 #define PT_PAGE_SIZE_SHIFT 7
25 #define PT_PAGE_SIZE_MASK (1ULL << PT_PAGE_SIZE_SHIFT)
26 #define PT_PAT_MASK (1ULL << 7)
27 #define PT_GLOBAL_MASK (1ULL << 8)
28 #define PT64_NX_SHIFT 63
29 #define PT64_NX_MASK (1ULL << PT64_NX_SHIFT)
30 
31 #define PT_PAT_SHIFT 7
32 #define PT_DIR_PAT_SHIFT 12
33 #define PT_DIR_PAT_MASK (1ULL << PT_DIR_PAT_SHIFT)
34 
35 #define PT64_ROOT_5LEVEL 5
36 #define PT64_ROOT_4LEVEL 4
37 #define PT32_ROOT_LEVEL 2
38 #define PT32E_ROOT_LEVEL 3
39 
40 #define KVM_MMU_CR4_ROLE_BITS (X86_CR4_PSE | X86_CR4_PAE | X86_CR4_LA57 | \
41 			       X86_CR4_SMEP | X86_CR4_SMAP | X86_CR4_PKE)
42 
43 #define KVM_MMU_CR0_ROLE_BITS (X86_CR0_PG | X86_CR0_WP)
44 #define KVM_MMU_EFER_ROLE_BITS (EFER_LME | EFER_NX)
45 
rsvd_bits(int s,int e)46 static __always_inline u64 rsvd_bits(int s, int e)
47 {
48 	BUILD_BUG_ON(__builtin_constant_p(e) && __builtin_constant_p(s) && e < s);
49 
50 	if (__builtin_constant_p(e))
51 		BUILD_BUG_ON(e > 63);
52 	else
53 		e &= 63;
54 
55 	if (e < s)
56 		return 0;
57 
58 	return ((2ULL << (e - s)) - 1) << s;
59 }
60 
kvm_mmu_max_gfn(void)61 static inline gfn_t kvm_mmu_max_gfn(void)
62 {
63 	/*
64 	 * Note that this uses the host MAXPHYADDR, not the guest's.
65 	 * EPT/NPT cannot support GPAs that would exceed host.MAXPHYADDR;
66 	 * assuming KVM is running on bare metal, guest accesses beyond
67 	 * host.MAXPHYADDR will hit a #PF(RSVD) and never cause a vmexit
68 	 * (either EPT Violation/Misconfig or #NPF), and so KVM will never
69 	 * install a SPTE for such addresses.  If KVM is running as a VM
70 	 * itself, on the other hand, it might see a MAXPHYADDR that is less
71 	 * than hardware's real MAXPHYADDR.  Using the host MAXPHYADDR
72 	 * disallows such SPTEs entirely and simplifies the TDP MMU.
73 	 */
74 	int max_gpa_bits = likely(tdp_enabled) ? kvm_host.maxphyaddr : 52;
75 
76 	return (1ULL << (max_gpa_bits - PAGE_SHIFT)) - 1;
77 }
78 
79 u8 kvm_mmu_get_max_tdp_level(void);
80 
81 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask);
82 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask);
83 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only);
84 
85 void kvm_init_mmu(struct kvm_vcpu *vcpu);
86 void kvm_init_shadow_npt_mmu(struct kvm_vcpu *vcpu, unsigned long cr0,
87 			     unsigned long cr4, u64 efer, gpa_t nested_cr3);
88 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly,
89 			     int huge_page_level, bool accessed_dirty,
90 			     gpa_t new_eptp);
91 bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
92 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
93 				u64 fault_address, char *insn, int insn_len);
94 void __kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
95 					struct kvm_mmu *mmu);
96 
97 int kvm_mmu_load(struct kvm_vcpu *vcpu);
98 void kvm_mmu_unload(struct kvm_vcpu *vcpu);
99 void kvm_mmu_free_obsolete_roots(struct kvm_vcpu *vcpu);
100 void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu);
101 void kvm_mmu_sync_prev_roots(struct kvm_vcpu *vcpu);
102 void kvm_mmu_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
103 			 int bytes);
104 
kvm_mmu_reload(struct kvm_vcpu * vcpu)105 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
106 {
107 	/*
108 	 * Checking root.hpa is sufficient even when KVM has mirror root.
109 	 * We can have either:
110 	 * (1) mirror_root_hpa = INVALID_PAGE, root.hpa = INVALID_PAGE
111 	 * (2) mirror_root_hpa = root,         root.hpa = INVALID_PAGE
112 	 * (3) mirror_root_hpa = root1,        root.hpa = root2
113 	 * We don't ever have:
114 	 *     mirror_root_hpa = INVALID_PAGE, root.hpa = root
115 	 */
116 	if (likely(vcpu->arch.mmu->root.hpa != INVALID_PAGE))
117 		return 0;
118 
119 	return kvm_mmu_load(vcpu);
120 }
121 
kvm_get_pcid(struct kvm_vcpu * vcpu,gpa_t cr3)122 static inline unsigned long kvm_get_pcid(struct kvm_vcpu *vcpu, gpa_t cr3)
123 {
124 	BUILD_BUG_ON((X86_CR3_PCID_MASK & PAGE_MASK) != 0);
125 
126 	return kvm_is_cr4_bit_set(vcpu, X86_CR4_PCIDE)
127 	       ? cr3 & X86_CR3_PCID_MASK
128 	       : 0;
129 }
130 
kvm_get_active_pcid(struct kvm_vcpu * vcpu)131 static inline unsigned long kvm_get_active_pcid(struct kvm_vcpu *vcpu)
132 {
133 	return kvm_get_pcid(vcpu, kvm_read_cr3(vcpu));
134 }
135 
kvm_get_active_cr3_lam_bits(struct kvm_vcpu * vcpu)136 static inline unsigned long kvm_get_active_cr3_lam_bits(struct kvm_vcpu *vcpu)
137 {
138 	if (!guest_cpu_cap_has(vcpu, X86_FEATURE_LAM))
139 		return 0;
140 
141 	return kvm_read_cr3(vcpu) & (X86_CR3_LAM_U48 | X86_CR3_LAM_U57);
142 }
143 
kvm_mmu_load_pgd(struct kvm_vcpu * vcpu)144 static inline void kvm_mmu_load_pgd(struct kvm_vcpu *vcpu)
145 {
146 	u64 root_hpa = vcpu->arch.mmu->root.hpa;
147 
148 	if (!VALID_PAGE(root_hpa))
149 		return;
150 
151 	kvm_x86_call(load_mmu_pgd)(vcpu, root_hpa,
152 				   vcpu->arch.mmu->root_role.level);
153 }
154 
kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu)155 static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
156 						    struct kvm_mmu *mmu)
157 {
158 	/*
159 	 * When EPT is enabled, KVM may passthrough CR0.WP to the guest, i.e.
160 	 * @mmu's snapshot of CR0.WP and thus all related paging metadata may
161 	 * be stale.  Refresh CR0.WP and the metadata on-demand when checking
162 	 * for permission faults.  Exempt nested MMUs, i.e. MMUs for shadowing
163 	 * nEPT and nNPT, as CR0.WP is ignored in both cases.  Note, KVM does
164 	 * need to refresh nested_mmu, a.k.a. the walker used to translate L2
165 	 * GVAs to GPAs, as that "MMU" needs to honor L2's CR0.WP.
166 	 */
167 	if (!tdp_enabled || mmu == &vcpu->arch.guest_mmu)
168 		return;
169 
170 	__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
171 }
172 
173 /*
174  * Check if a given access (described through the I/D, W/R and U/S bits of a
175  * page fault error code pfec) causes a permission fault with the given PTE
176  * access rights (in ACC_* format).
177  *
178  * Return zero if the access does not fault; return the page fault error code
179  * if the access faults.
180  */
permission_fault(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,unsigned pte_access,unsigned pte_pkey,u64 access)181 static inline u8 permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
182 				  unsigned pte_access, unsigned pte_pkey,
183 				  u64 access)
184 {
185 	/* strip nested paging fault error codes */
186 	unsigned int pfec = access;
187 	unsigned long rflags = kvm_x86_call(get_rflags)(vcpu);
188 
189 	/*
190 	 * For explicit supervisor accesses, SMAP is disabled if EFLAGS.AC = 1.
191 	 * For implicit supervisor accesses, SMAP cannot be overridden.
192 	 *
193 	 * SMAP works on supervisor accesses only, and not_smap can
194 	 * be set or not set when user access with neither has any bearing
195 	 * on the result.
196 	 *
197 	 * We put the SMAP checking bit in place of the PFERR_RSVD_MASK bit;
198 	 * this bit will always be zero in pfec, but it will be one in index
199 	 * if SMAP checks are being disabled.
200 	 */
201 	u64 implicit_access = access & PFERR_IMPLICIT_ACCESS;
202 	bool not_smap = ((rflags & X86_EFLAGS_AC) | implicit_access) == X86_EFLAGS_AC;
203 	int index = (pfec | (not_smap ? PFERR_RSVD_MASK : 0)) >> 1;
204 	u32 errcode = PFERR_PRESENT_MASK;
205 	bool fault;
206 
207 	kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
208 
209 	fault = (mmu->permissions[index] >> pte_access) & 1;
210 
211 	WARN_ON(pfec & (PFERR_PK_MASK | PFERR_RSVD_MASK));
212 	if (unlikely(mmu->pkru_mask)) {
213 		u32 pkru_bits, offset;
214 
215 		/*
216 		* PKRU defines 32 bits, there are 16 domains and 2
217 		* attribute bits per domain in pkru.  pte_pkey is the
218 		* index of the protection domain, so pte_pkey * 2 is
219 		* is the index of the first bit for the domain.
220 		*/
221 		pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
222 
223 		/* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
224 		offset = (pfec & ~1) | ((pte_access & PT_USER_MASK) ? PFERR_RSVD_MASK : 0);
225 
226 		pkru_bits &= mmu->pkru_mask >> offset;
227 		errcode |= -pkru_bits & PFERR_PK_MASK;
228 		fault |= (pkru_bits != 0);
229 	}
230 
231 	return -(u32)fault & errcode;
232 }
233 
234 bool kvm_mmu_may_ignore_guest_pat(void);
235 
236 int kvm_mmu_post_init_vm(struct kvm *kvm);
237 void kvm_mmu_pre_destroy_vm(struct kvm *kvm);
238 
kvm_shadow_root_allocated(struct kvm * kvm)239 static inline bool kvm_shadow_root_allocated(struct kvm *kvm)
240 {
241 	/*
242 	 * Read shadow_root_allocated before related pointers. Hence, threads
243 	 * reading shadow_root_allocated in any lock context are guaranteed to
244 	 * see the pointers. Pairs with smp_store_release in
245 	 * mmu_first_shadow_root_alloc.
246 	 */
247 	return smp_load_acquire(&kvm->arch.shadow_root_allocated);
248 }
249 
250 #ifdef CONFIG_X86_64
251 extern bool tdp_mmu_enabled;
252 #else
253 #define tdp_mmu_enabled false
254 #endif
255 
kvm_memslots_have_rmaps(struct kvm * kvm)256 static inline bool kvm_memslots_have_rmaps(struct kvm *kvm)
257 {
258 	return !tdp_mmu_enabled || kvm_shadow_root_allocated(kvm);
259 }
260 
gfn_to_index(gfn_t gfn,gfn_t base_gfn,int level)261 static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
262 {
263 	/* KVM_HPAGE_GFN_SHIFT(PG_LEVEL_4K) must be 0. */
264 	return (gfn >> KVM_HPAGE_GFN_SHIFT(level)) -
265 		(base_gfn >> KVM_HPAGE_GFN_SHIFT(level));
266 }
267 
268 static inline unsigned long
__kvm_mmu_slot_lpages(struct kvm_memory_slot * slot,unsigned long npages,int level)269 __kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, unsigned long npages,
270 		      int level)
271 {
272 	return gfn_to_index(slot->base_gfn + npages - 1,
273 			    slot->base_gfn, level) + 1;
274 }
275 
276 static inline unsigned long
kvm_mmu_slot_lpages(struct kvm_memory_slot * slot,int level)277 kvm_mmu_slot_lpages(struct kvm_memory_slot *slot, int level)
278 {
279 	return __kvm_mmu_slot_lpages(slot, slot->npages, level);
280 }
281 
kvm_update_page_stats(struct kvm * kvm,int level,int count)282 static inline void kvm_update_page_stats(struct kvm *kvm, int level, int count)
283 {
284 	atomic64_add(count, &kvm->stat.pages[level - 1]);
285 }
286 
287 gpa_t translate_nested_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u64 access,
288 			   struct x86_exception *exception);
289 
kvm_translate_gpa(struct kvm_vcpu * vcpu,struct kvm_mmu * mmu,gpa_t gpa,u64 access,struct x86_exception * exception)290 static inline gpa_t kvm_translate_gpa(struct kvm_vcpu *vcpu,
291 				      struct kvm_mmu *mmu,
292 				      gpa_t gpa, u64 access,
293 				      struct x86_exception *exception)
294 {
295 	if (mmu != &vcpu->arch.nested_mmu)
296 		return gpa;
297 	return translate_nested_gpa(vcpu, gpa, access, exception);
298 }
299 
kvm_has_mirrored_tdp(const struct kvm * kvm)300 static inline bool kvm_has_mirrored_tdp(const struct kvm *kvm)
301 {
302 	return kvm->arch.vm_type == KVM_X86_TDX_VM;
303 }
304 
kvm_gfn_direct_bits(const struct kvm * kvm)305 static inline gfn_t kvm_gfn_direct_bits(const struct kvm *kvm)
306 {
307 	return kvm->arch.gfn_direct_bits;
308 }
309 
kvm_is_addr_direct(struct kvm * kvm,gpa_t gpa)310 static inline bool kvm_is_addr_direct(struct kvm *kvm, gpa_t gpa)
311 {
312 	gpa_t gpa_direct_bits = gfn_to_gpa(kvm_gfn_direct_bits(kvm));
313 
314 	return !gpa_direct_bits || (gpa & gpa_direct_bits);
315 }
316 
kvm_is_gfn_alias(struct kvm * kvm,gfn_t gfn)317 static inline bool kvm_is_gfn_alias(struct kvm *kvm, gfn_t gfn)
318 {
319 	return gfn & kvm_gfn_direct_bits(kvm);
320 }
321 #endif
322