1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <[email protected]>
5  *
6  * Derived from arch/arm/include/kvm_emulate.h
7  * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8  * Author: Christoffer Dall <[email protected]>
9  */
10 
11 #ifndef __ARM64_KVM_EMULATE_H__
12 #define __ARM64_KVM_EMULATE_H__
13 
14 #include <linux/bitfield.h>
15 #include <linux/kvm_host.h>
16 
17 #include <asm/debug-monitors.h>
18 #include <asm/esr.h>
19 #include <asm/kvm_arm.h>
20 #include <asm/kvm_hyp.h>
21 #include <asm/kvm_nested.h>
22 #include <asm/ptrace.h>
23 #include <asm/cputype.h>
24 #include <asm/virt.h>
25 
26 #define CURRENT_EL_SP_EL0_VECTOR	0x0
27 #define CURRENT_EL_SP_ELx_VECTOR	0x200
28 #define LOWER_EL_AArch64_VECTOR		0x400
29 #define LOWER_EL_AArch32_VECTOR		0x600
30 
31 enum exception_type {
32 	except_type_sync	= 0,
33 	except_type_irq		= 0x80,
34 	except_type_fiq		= 0x100,
35 	except_type_serror	= 0x180,
36 };
37 
38 #define kvm_exception_type_names		\
39 	{ except_type_sync,	"SYNC"   },	\
40 	{ except_type_irq,	"IRQ"    },	\
41 	{ except_type_fiq,	"FIQ"    },	\
42 	{ except_type_serror,	"SERROR" }
43 
44 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
45 void kvm_skip_instr32(struct kvm_vcpu *vcpu);
46 
47 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
48 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
49 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
50 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
51 void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
52 
53 void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
54 
55 void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
56 int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
57 int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
58 
kvm_inject_nested_sve_trap(struct kvm_vcpu * vcpu)59 static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
60 {
61 	u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) |
62 		  ESR_ELx_IL;
63 
64 	kvm_inject_nested_sync(vcpu, esr);
65 }
66 
67 #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)68 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
69 {
70 	return !(vcpu->arch.hcr_el2 & HCR_RW);
71 }
72 #else
vcpu_el1_is_32bit(struct kvm_vcpu * vcpu)73 static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
74 {
75 	return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
76 }
77 #endif
78 
vcpu_reset_hcr(struct kvm_vcpu * vcpu)79 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
80 {
81 	if (!vcpu_has_run_once(vcpu))
82 		vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
83 
84 	/*
85 	 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
86 	 * get set in SCTLR_EL1 such that we can detect when the guest
87 	 * MMU gets turned on and do the necessary cache maintenance
88 	 * then.
89 	 */
90 	if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
91 		vcpu->arch.hcr_el2 |= HCR_TVM;
92 }
93 
vcpu_hcr(struct kvm_vcpu * vcpu)94 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
95 {
96 	return (unsigned long *)&vcpu->arch.hcr_el2;
97 }
98 
vcpu_clear_wfx_traps(struct kvm_vcpu * vcpu)99 static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
100 {
101 	vcpu->arch.hcr_el2 &= ~HCR_TWE;
102 	if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
103 	    vcpu->kvm->arch.vgic.nassgireq)
104 		vcpu->arch.hcr_el2 &= ~HCR_TWI;
105 	else
106 		vcpu->arch.hcr_el2 |= HCR_TWI;
107 }
108 
vcpu_set_wfx_traps(struct kvm_vcpu * vcpu)109 static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
110 {
111 	vcpu->arch.hcr_el2 |= HCR_TWE;
112 	vcpu->arch.hcr_el2 |= HCR_TWI;
113 }
114 
vcpu_get_vsesr(struct kvm_vcpu * vcpu)115 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116 {
117 	return vcpu->arch.vsesr_el2;
118 }
119 
vcpu_set_vsesr(struct kvm_vcpu * vcpu,u64 vsesr)120 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
121 {
122 	vcpu->arch.vsesr_el2 = vsesr;
123 }
124 
vcpu_pc(const struct kvm_vcpu * vcpu)125 static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
126 {
127 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
128 }
129 
vcpu_cpsr(const struct kvm_vcpu * vcpu)130 static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
131 {
132 	return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
133 }
134 
vcpu_mode_is_32bit(const struct kvm_vcpu * vcpu)135 static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
136 {
137 	return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
138 }
139 
kvm_condition_valid(const struct kvm_vcpu * vcpu)140 static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
141 {
142 	if (vcpu_mode_is_32bit(vcpu))
143 		return kvm_condition_valid32(vcpu);
144 
145 	return true;
146 }
147 
vcpu_set_thumb(struct kvm_vcpu * vcpu)148 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
149 {
150 	*vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
151 }
152 
153 /*
154  * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155  * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156  * AArch32 with banked registers.
157  */
vcpu_get_reg(const struct kvm_vcpu * vcpu,u8 reg_num)158 static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
159 					 u8 reg_num)
160 {
161 	return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
162 }
163 
vcpu_set_reg(struct kvm_vcpu * vcpu,u8 reg_num,unsigned long val)164 static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
165 				unsigned long val)
166 {
167 	if (reg_num != 31)
168 		vcpu_gp_regs(vcpu)->regs[reg_num] = val;
169 }
170 
vcpu_is_el2_ctxt(const struct kvm_cpu_context * ctxt)171 static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
172 {
173 	switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
174 	case PSR_MODE_EL2h:
175 	case PSR_MODE_EL2t:
176 		return true;
177 	default:
178 		return false;
179 	}
180 }
181 
vcpu_is_el2(const struct kvm_vcpu * vcpu)182 static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
183 {
184 	return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
185 }
186 
vcpu_el2_e2h_is_set(const struct kvm_vcpu * vcpu)187 static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
188 {
189 	return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
190 		(__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_E2H));
191 }
192 
vcpu_el2_tge_is_set(const struct kvm_vcpu * vcpu)193 static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
194 {
195 	return ctxt_sys_reg(&vcpu->arch.ctxt, HCR_EL2) & HCR_TGE;
196 }
197 
is_hyp_ctxt(const struct kvm_vcpu * vcpu)198 static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
199 {
200 	bool e2h, tge;
201 	u64 hcr;
202 
203 	if (!vcpu_has_nv(vcpu))
204 		return false;
205 
206 	hcr = __vcpu_sys_reg(vcpu, HCR_EL2);
207 
208 	e2h = (hcr & HCR_E2H);
209 	tge = (hcr & HCR_TGE);
210 
211 	/*
212 	 * We are in a hypervisor context if the vcpu mode is EL2 or
213 	 * E2H and TGE bits are set. The latter means we are in the user space
214 	 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
215 	 *
216 	 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
217 	 * rest of the KVM code, and will result in a misbehaving guest.
218 	 */
219 	return vcpu_is_el2(vcpu) || (e2h && tge) || tge;
220 }
221 
vcpu_is_host_el0(const struct kvm_vcpu * vcpu)222 static inline bool vcpu_is_host_el0(const struct kvm_vcpu *vcpu)
223 {
224 	return is_hyp_ctxt(vcpu) && !vcpu_is_el2(vcpu);
225 }
226 
227 /*
228  * The layout of SPSR for an AArch32 state is different when observed from an
229  * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
230  * view given an AArch64 view.
231  *
232  * In ARM DDI 0487E.a see:
233  *
234  * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
235  * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
236  * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
237  *
238  * Which show the following differences:
239  *
240  * | Bit | AA64 | AA32 | Notes                       |
241  * +-----+------+------+-----------------------------|
242  * | 24  | DIT  | J    | J is RES0 in ARMv8          |
243  * | 21  | SS   | DIT  | SS doesn't exist in AArch32 |
244  *
245  * ... and all other bits are (currently) common.
246  */
host_spsr_to_spsr32(unsigned long spsr)247 static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
248 {
249 	const unsigned long overlap = BIT(24) | BIT(21);
250 	unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
251 
252 	spsr &= ~overlap;
253 
254 	spsr |= dit << 21;
255 
256 	return spsr;
257 }
258 
vcpu_mode_priv(const struct kvm_vcpu * vcpu)259 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
260 {
261 	u32 mode;
262 
263 	if (vcpu_mode_is_32bit(vcpu)) {
264 		mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
265 		return mode > PSR_AA32_MODE_USR;
266 	}
267 
268 	mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
269 
270 	return mode != PSR_MODE_EL0t;
271 }
272 
kvm_vcpu_get_esr(const struct kvm_vcpu * vcpu)273 static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
274 {
275 	return vcpu->arch.fault.esr_el2;
276 }
277 
kvm_vcpu_get_condition(const struct kvm_vcpu * vcpu)278 static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
279 {
280 	u64 esr = kvm_vcpu_get_esr(vcpu);
281 
282 	if (esr & ESR_ELx_CV)
283 		return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
284 
285 	return -1;
286 }
287 
kvm_vcpu_get_hfar(const struct kvm_vcpu * vcpu)288 static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
289 {
290 	return vcpu->arch.fault.far_el2;
291 }
292 
kvm_vcpu_get_fault_ipa(const struct kvm_vcpu * vcpu)293 static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
294 {
295 	return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
296 }
297 
kvm_vcpu_get_disr(const struct kvm_vcpu * vcpu)298 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
299 {
300 	return vcpu->arch.fault.disr_el1;
301 }
302 
kvm_vcpu_hvc_get_imm(const struct kvm_vcpu * vcpu)303 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
304 {
305 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
306 }
307 
kvm_vcpu_dabt_isvalid(const struct kvm_vcpu * vcpu)308 static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
309 {
310 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
311 }
312 
kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu * vcpu)313 static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
314 {
315 	return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
316 }
317 
kvm_vcpu_dabt_issext(const struct kvm_vcpu * vcpu)318 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
319 {
320 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
321 }
322 
kvm_vcpu_dabt_issf(const struct kvm_vcpu * vcpu)323 static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
324 {
325 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
326 }
327 
kvm_vcpu_dabt_get_rd(const struct kvm_vcpu * vcpu)328 static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
329 {
330 	return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
331 }
332 
kvm_vcpu_abt_iss1tw(const struct kvm_vcpu * vcpu)333 static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
334 {
335 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
336 }
337 
338 /* Always check for S1PTW *before* using this. */
kvm_vcpu_dabt_iswrite(const struct kvm_vcpu * vcpu)339 static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
340 {
341 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
342 }
343 
kvm_vcpu_dabt_is_cm(const struct kvm_vcpu * vcpu)344 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
345 {
346 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
347 }
348 
kvm_vcpu_dabt_get_as(const struct kvm_vcpu * vcpu)349 static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
350 {
351 	return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
352 }
353 
354 /* This one is not specific to Data Abort */
kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu * vcpu)355 static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
356 {
357 	return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
358 }
359 
kvm_vcpu_trap_get_class(const struct kvm_vcpu * vcpu)360 static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
361 {
362 	return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
363 }
364 
kvm_vcpu_trap_is_iabt(const struct kvm_vcpu * vcpu)365 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
366 {
367 	return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
368 }
369 
kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu * vcpu)370 static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
371 {
372 	return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
373 }
374 
kvm_vcpu_trap_get_fault(const struct kvm_vcpu * vcpu)375 static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
376 {
377 	return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
378 }
379 
380 static inline
kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu * vcpu)381 bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
382 {
383 	return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
384 }
385 
386 static inline
kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu * vcpu)387 bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
388 {
389 	return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
390 }
391 
392 static inline
kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu * vcpu)393 u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
394 {
395 	unsigned long esr = kvm_vcpu_get_esr(vcpu);
396 
397 	BUG_ON(!esr_fsc_is_permission_fault(esr));
398 	return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
399 }
400 
kvm_vcpu_abt_issea(const struct kvm_vcpu * vcpu)401 static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
402 {
403 	switch (kvm_vcpu_trap_get_fault(vcpu)) {
404 	case ESR_ELx_FSC_EXTABT:
405 	case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
406 	case ESR_ELx_FSC_SECC:
407 	case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
408 		return true;
409 	default:
410 		return false;
411 	}
412 }
413 
kvm_vcpu_sys_get_rt(struct kvm_vcpu * vcpu)414 static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
415 {
416 	u64 esr = kvm_vcpu_get_esr(vcpu);
417 	return ESR_ELx_SYS64_ISS_RT(esr);
418 }
419 
kvm_is_write_fault(struct kvm_vcpu * vcpu)420 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
421 {
422 	if (kvm_vcpu_abt_iss1tw(vcpu)) {
423 		/*
424 		 * Only a permission fault on a S1PTW should be
425 		 * considered as a write. Otherwise, page tables baked
426 		 * in a read-only memslot will result in an exception
427 		 * being delivered in the guest.
428 		 *
429 		 * The drawback is that we end-up faulting twice if the
430 		 * guest is using any of HW AF/DB: a translation fault
431 		 * to map the page containing the PT (read only at
432 		 * first), then a permission fault to allow the flags
433 		 * to be set.
434 		 */
435 		return kvm_vcpu_trap_is_permission_fault(vcpu);
436 	}
437 
438 	if (kvm_vcpu_trap_is_iabt(vcpu))
439 		return false;
440 
441 	return kvm_vcpu_dabt_iswrite(vcpu);
442 }
443 
kvm_vcpu_get_mpidr_aff(struct kvm_vcpu * vcpu)444 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
445 {
446 	return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
447 }
448 
kvm_vcpu_set_be(struct kvm_vcpu * vcpu)449 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
450 {
451 	if (vcpu_mode_is_32bit(vcpu)) {
452 		*vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
453 	} else {
454 		u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
455 		sctlr |= SCTLR_ELx_EE;
456 		vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
457 	}
458 }
459 
kvm_vcpu_is_be(struct kvm_vcpu * vcpu)460 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
461 {
462 	if (vcpu_mode_is_32bit(vcpu))
463 		return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
464 
465 	if (vcpu_mode_priv(vcpu))
466 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
467 	else
468 		return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
469 }
470 
vcpu_data_guest_to_host(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)471 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
472 						    unsigned long data,
473 						    unsigned int len)
474 {
475 	if (kvm_vcpu_is_be(vcpu)) {
476 		switch (len) {
477 		case 1:
478 			return data & 0xff;
479 		case 2:
480 			return be16_to_cpu(data & 0xffff);
481 		case 4:
482 			return be32_to_cpu(data & 0xffffffff);
483 		default:
484 			return be64_to_cpu(data);
485 		}
486 	} else {
487 		switch (len) {
488 		case 1:
489 			return data & 0xff;
490 		case 2:
491 			return le16_to_cpu(data & 0xffff);
492 		case 4:
493 			return le32_to_cpu(data & 0xffffffff);
494 		default:
495 			return le64_to_cpu(data);
496 		}
497 	}
498 
499 	return data;		/* Leave LE untouched */
500 }
501 
vcpu_data_host_to_guest(struct kvm_vcpu * vcpu,unsigned long data,unsigned int len)502 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
503 						    unsigned long data,
504 						    unsigned int len)
505 {
506 	if (kvm_vcpu_is_be(vcpu)) {
507 		switch (len) {
508 		case 1:
509 			return data & 0xff;
510 		case 2:
511 			return cpu_to_be16(data & 0xffff);
512 		case 4:
513 			return cpu_to_be32(data & 0xffffffff);
514 		default:
515 			return cpu_to_be64(data);
516 		}
517 	} else {
518 		switch (len) {
519 		case 1:
520 			return data & 0xff;
521 		case 2:
522 			return cpu_to_le16(data & 0xffff);
523 		case 4:
524 			return cpu_to_le32(data & 0xffffffff);
525 		default:
526 			return cpu_to_le64(data);
527 		}
528 	}
529 
530 	return data;		/* Leave LE untouched */
531 }
532 
kvm_incr_pc(struct kvm_vcpu * vcpu)533 static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
534 {
535 	WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
536 	vcpu_set_flag(vcpu, INCREMENT_PC);
537 }
538 
539 #define kvm_pend_exception(v, e)					\
540 	do {								\
541 		WARN_ON(vcpu_get_flag((v), INCREMENT_PC));		\
542 		vcpu_set_flag((v), PENDING_EXCEPTION);			\
543 		vcpu_set_flag((v), e);					\
544 	} while (0)
545 
546 #define __build_check_all_or_none(r, bits)				\
547 	BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
548 
549 #define __cpacr_to_cptr_clr(clr, set)					\
550 	({								\
551 		u64 cptr = 0;						\
552 									\
553 		if ((set) & CPACR_EL1_FPEN)				\
554 			cptr |= CPTR_EL2_TFP;				\
555 		if ((set) & CPACR_EL1_ZEN)				\
556 			cptr |= CPTR_EL2_TZ;				\
557 		if ((set) & CPACR_EL1_SMEN)				\
558 			cptr |= CPTR_EL2_TSM;				\
559 		if ((clr) & CPACR_EL1_TTA)				\
560 			cptr |= CPTR_EL2_TTA;				\
561 		if ((clr) & CPTR_EL2_TAM)				\
562 			cptr |= CPTR_EL2_TAM;				\
563 		if ((clr) & CPTR_EL2_TCPAC)				\
564 			cptr |= CPTR_EL2_TCPAC;				\
565 									\
566 		cptr;							\
567 	})
568 
569 #define __cpacr_to_cptr_set(clr, set)					\
570 	({								\
571 		u64 cptr = 0;						\
572 									\
573 		if ((clr) & CPACR_EL1_FPEN)				\
574 			cptr |= CPTR_EL2_TFP;				\
575 		if ((clr) & CPACR_EL1_ZEN)				\
576 			cptr |= CPTR_EL2_TZ;				\
577 		if ((clr) & CPACR_EL1_SMEN)				\
578 			cptr |= CPTR_EL2_TSM;				\
579 		if ((set) & CPACR_EL1_TTA)				\
580 			cptr |= CPTR_EL2_TTA;				\
581 		if ((set) & CPTR_EL2_TAM)				\
582 			cptr |= CPTR_EL2_TAM;				\
583 		if ((set) & CPTR_EL2_TCPAC)				\
584 			cptr |= CPTR_EL2_TCPAC;				\
585 									\
586 		cptr;							\
587 	})
588 
589 #define cpacr_clear_set(clr, set)					\
590 	do {								\
591 		BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0);		\
592 		BUILD_BUG_ON((clr) & CPACR_EL1_E0POE);			\
593 		__build_check_all_or_none((clr), CPACR_EL1_FPEN);	\
594 		__build_check_all_or_none((set), CPACR_EL1_FPEN);	\
595 		__build_check_all_or_none((clr), CPACR_EL1_ZEN);	\
596 		__build_check_all_or_none((set), CPACR_EL1_ZEN);	\
597 		__build_check_all_or_none((clr), CPACR_EL1_SMEN);	\
598 		__build_check_all_or_none((set), CPACR_EL1_SMEN);	\
599 									\
600 		if (has_vhe() || has_hvhe())				\
601 			sysreg_clear_set(cpacr_el1, clr, set);		\
602 		else							\
603 			sysreg_clear_set(cptr_el2,			\
604 					 __cpacr_to_cptr_clr(clr, set),	\
605 					 __cpacr_to_cptr_set(clr, set));\
606 	} while (0)
607 
608 /*
609  * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
610  * format if E2H isn't set.
611  */
vcpu_sanitised_cptr_el2(const struct kvm_vcpu * vcpu)612 static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
613 {
614 	u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
615 
616 	if (!vcpu_el2_e2h_is_set(vcpu))
617 		cptr = translate_cptr_el2_to_cpacr_el1(cptr);
618 
619 	return cptr;
620 }
621 
____cptr_xen_trap_enabled(const struct kvm_vcpu * vcpu,unsigned int xen)622 static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
623 					     unsigned int xen)
624 {
625 	switch (xen) {
626 	case 0b00:
627 	case 0b10:
628 		return true;
629 	case 0b01:
630 		return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
631 	case 0b11:
632 	default:
633 		return false;
634 	}
635 }
636 
637 #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen)				\
638 	(!vcpu_has_nv(vcpu) ? false :						\
639 	 ____cptr_xen_trap_enabled(vcpu,					\
640 				   SYS_FIELD_GET(CPACR_EL1, xen,		\
641 						 vcpu_sanitised_cptr_el2(vcpu))))
642 
guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu * vcpu)643 static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
644 {
645 	return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
646 }
647 
guest_hyp_sve_traps_enabled(const struct kvm_vcpu * vcpu)648 static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
649 {
650 	return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
651 }
652 #endif /* __ARM64_KVM_EMULATE_H__ */
653