1/* SPDX-License-Identifier: GPL-2.0 */
2#include <linux/linkage.h>
3#include <asm/asm.h>
4#include <asm/asm-offsets.h>
5#include <asm/bitsperlong.h>
6#include <asm/frame.h>
7#include <asm/kvm_vcpu_regs.h>
8#include <asm/nospec-branch.h>
9#include "kvm-asm-offsets.h"
10
11#define WORD_SIZE (BITS_PER_LONG / 8)
12
13/* Intentionally omit RAX as it's context switched by hardware */
14#define VCPU_RCX	(SVM_vcpu_arch_regs + __VCPU_REGS_RCX * WORD_SIZE)
15#define VCPU_RDX	(SVM_vcpu_arch_regs + __VCPU_REGS_RDX * WORD_SIZE)
16#define VCPU_RBX	(SVM_vcpu_arch_regs + __VCPU_REGS_RBX * WORD_SIZE)
17/* Intentionally omit RSP as it's context switched by hardware */
18#define VCPU_RBP	(SVM_vcpu_arch_regs + __VCPU_REGS_RBP * WORD_SIZE)
19#define VCPU_RSI	(SVM_vcpu_arch_regs + __VCPU_REGS_RSI * WORD_SIZE)
20#define VCPU_RDI	(SVM_vcpu_arch_regs + __VCPU_REGS_RDI * WORD_SIZE)
21
22#ifdef CONFIG_X86_64
23#define VCPU_R8		(SVM_vcpu_arch_regs + __VCPU_REGS_R8  * WORD_SIZE)
24#define VCPU_R9		(SVM_vcpu_arch_regs + __VCPU_REGS_R9  * WORD_SIZE)
25#define VCPU_R10	(SVM_vcpu_arch_regs + __VCPU_REGS_R10 * WORD_SIZE)
26#define VCPU_R11	(SVM_vcpu_arch_regs + __VCPU_REGS_R11 * WORD_SIZE)
27#define VCPU_R12	(SVM_vcpu_arch_regs + __VCPU_REGS_R12 * WORD_SIZE)
28#define VCPU_R13	(SVM_vcpu_arch_regs + __VCPU_REGS_R13 * WORD_SIZE)
29#define VCPU_R14	(SVM_vcpu_arch_regs + __VCPU_REGS_R14 * WORD_SIZE)
30#define VCPU_R15	(SVM_vcpu_arch_regs + __VCPU_REGS_R15 * WORD_SIZE)
31#endif
32
33#define SVM_vmcb01_pa	(SVM_vmcb01 + KVM_VMCB_pa)
34
35.section .noinstr.text, "ax"
36
37.macro RESTORE_GUEST_SPEC_CTRL
38	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
39	ALTERNATIVE_2 "", \
40		"jmp 800f", X86_FEATURE_MSR_SPEC_CTRL, \
41		"", X86_FEATURE_V_SPEC_CTRL
42801:
43.endm
44.macro RESTORE_GUEST_SPEC_CTRL_BODY
45800:
46	/*
47	 * SPEC_CTRL handling: if the guest's SPEC_CTRL value differs from the
48	 * host's, write the MSR.  This is kept out-of-line so that the common
49	 * case does not have to jump.
50	 *
51	 * IMPORTANT: To avoid RSB underflow attacks and any other nastiness,
52	 * there must not be any returns or indirect branches between this code
53	 * and vmentry.
54	 */
55	movl SVM_spec_ctrl(%_ASM_DI), %eax
56	cmp PER_CPU_VAR(x86_spec_ctrl_current), %eax
57	je 801b
58	mov $MSR_IA32_SPEC_CTRL, %ecx
59	xor %edx, %edx
60	wrmsr
61	jmp 801b
62.endm
63
64.macro RESTORE_HOST_SPEC_CTRL
65	/* No need to do anything if SPEC_CTRL is unset or V_SPEC_CTRL is set */
66	ALTERNATIVE_2 "", \
67		"jmp 900f", X86_FEATURE_MSR_SPEC_CTRL, \
68		"", X86_FEATURE_V_SPEC_CTRL
69901:
70.endm
71.macro RESTORE_HOST_SPEC_CTRL_BODY spec_ctrl_intercepted:req
72900:
73	/* Same for after vmexit.  */
74	mov $MSR_IA32_SPEC_CTRL, %ecx
75
76	/*
77	 * Load the value that the guest had written into MSR_IA32_SPEC_CTRL,
78	 * if it was not intercepted during guest execution.
79	 */
80	cmpb $0, \spec_ctrl_intercepted
81	jnz 998f
82	rdmsr
83	movl %eax, SVM_spec_ctrl(%_ASM_DI)
84998:
85
86	/* Now restore the host value of the MSR if different from the guest's.  */
87	movl PER_CPU_VAR(x86_spec_ctrl_current), %eax
88	cmp SVM_spec_ctrl(%_ASM_DI), %eax
89	je 901b
90	xor %edx, %edx
91	wrmsr
92	jmp 901b
93.endm
94
95
96/**
97 * __svm_vcpu_run - Run a vCPU via a transition to SVM guest mode
98 * @svm:	struct vcpu_svm *
99 * @spec_ctrl_intercepted: bool
100 */
101SYM_FUNC_START(__svm_vcpu_run)
102	push %_ASM_BP
103	mov  %_ASM_SP, %_ASM_BP
104#ifdef CONFIG_X86_64
105	push %r15
106	push %r14
107	push %r13
108	push %r12
109#else
110	push %edi
111	push %esi
112#endif
113	push %_ASM_BX
114
115	/*
116	 * Save variables needed after vmexit on the stack, in inverse
117	 * order compared to when they are needed.
118	 */
119
120	/* Accessed directly from the stack in RESTORE_HOST_SPEC_CTRL.  */
121	push %_ASM_ARG2
122
123	/* Needed to restore access to percpu variables.  */
124	__ASM_SIZE(push) PER_CPU_VAR(svm_data + SD_save_area_pa)
125
126	/* Finally save @svm. */
127	push %_ASM_ARG1
128
129.ifnc _ASM_ARG1, _ASM_DI
130	/*
131	 * Stash @svm in RDI early. On 32-bit, arguments are in RAX, RCX
132	 * and RDX which are clobbered by RESTORE_GUEST_SPEC_CTRL.
133	 */
134	mov %_ASM_ARG1, %_ASM_DI
135.endif
136
137	/* Clobbers RAX, RCX, RDX.  */
138	RESTORE_GUEST_SPEC_CTRL
139
140	/*
141	 * Use a single vmcb (vmcb01 because it's always valid) for
142	 * context switching guest state via VMLOAD/VMSAVE, that way
143	 * the state doesn't need to be copied between vmcb01 and
144	 * vmcb02 when switching vmcbs for nested virtualization.
145	 */
146	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
1471:	vmload %_ASM_AX
1482:
149
150	/* Get svm->current_vmcb->pa into RAX. */
151	mov SVM_current_vmcb(%_ASM_DI), %_ASM_AX
152	mov KVM_VMCB_pa(%_ASM_AX), %_ASM_AX
153
154	/* Load guest registers. */
155	mov VCPU_RCX(%_ASM_DI), %_ASM_CX
156	mov VCPU_RDX(%_ASM_DI), %_ASM_DX
157	mov VCPU_RBX(%_ASM_DI), %_ASM_BX
158	mov VCPU_RBP(%_ASM_DI), %_ASM_BP
159	mov VCPU_RSI(%_ASM_DI), %_ASM_SI
160#ifdef CONFIG_X86_64
161	mov VCPU_R8 (%_ASM_DI),  %r8
162	mov VCPU_R9 (%_ASM_DI),  %r9
163	mov VCPU_R10(%_ASM_DI), %r10
164	mov VCPU_R11(%_ASM_DI), %r11
165	mov VCPU_R12(%_ASM_DI), %r12
166	mov VCPU_R13(%_ASM_DI), %r13
167	mov VCPU_R14(%_ASM_DI), %r14
168	mov VCPU_R15(%_ASM_DI), %r15
169#endif
170	mov VCPU_RDI(%_ASM_DI), %_ASM_DI
171
172	/* Enter guest mode */
1733:	vmrun %_ASM_AX
1744:
175	/* Pop @svm to RAX while it's the only available register. */
176	pop %_ASM_AX
177
178	/* Save all guest registers.  */
179	mov %_ASM_CX,   VCPU_RCX(%_ASM_AX)
180	mov %_ASM_DX,   VCPU_RDX(%_ASM_AX)
181	mov %_ASM_BX,   VCPU_RBX(%_ASM_AX)
182	mov %_ASM_BP,   VCPU_RBP(%_ASM_AX)
183	mov %_ASM_SI,   VCPU_RSI(%_ASM_AX)
184	mov %_ASM_DI,   VCPU_RDI(%_ASM_AX)
185#ifdef CONFIG_X86_64
186	mov %r8,  VCPU_R8 (%_ASM_AX)
187	mov %r9,  VCPU_R9 (%_ASM_AX)
188	mov %r10, VCPU_R10(%_ASM_AX)
189	mov %r11, VCPU_R11(%_ASM_AX)
190	mov %r12, VCPU_R12(%_ASM_AX)
191	mov %r13, VCPU_R13(%_ASM_AX)
192	mov %r14, VCPU_R14(%_ASM_AX)
193	mov %r15, VCPU_R15(%_ASM_AX)
194#endif
195
196	/* @svm can stay in RDI from now on.  */
197	mov %_ASM_AX, %_ASM_DI
198
199	mov SVM_vmcb01_pa(%_ASM_DI), %_ASM_AX
2005:	vmsave %_ASM_AX
2016:
202
203	/* Restores GSBASE among other things, allowing access to percpu data.  */
204	pop %_ASM_AX
2057:	vmload %_ASM_AX
2068:
207
208	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
209	FILL_RETURN_BUFFER %_ASM_AX, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
210
211	/* Clobbers RAX, RCX, RDX.  */
212	RESTORE_HOST_SPEC_CTRL
213
214	/*
215	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
216	 * untrained as soon as we exit the VM and are back to the
217	 * kernel. This should be done before re-enabling interrupts
218	 * because interrupt handlers won't sanitize 'ret' if the return is
219	 * from the kernel.
220	 */
221	UNTRAIN_RET_VM
222
223	/*
224	 * Clear all general purpose registers except RSP and RAX to prevent
225	 * speculative use of the guest's values, even those that are reloaded
226	 * via the stack.  In theory, an L1 cache miss when restoring registers
227	 * could lead to speculative execution with the guest's values.
228	 * Zeroing XORs are dirt cheap, i.e. the extra paranoia is essentially
229	 * free.  RSP and RAX are exempt as they are restored by hardware
230	 * during VM-Exit.
231	 */
232	xor %ecx, %ecx
233	xor %edx, %edx
234	xor %ebx, %ebx
235	xor %ebp, %ebp
236	xor %esi, %esi
237	xor %edi, %edi
238#ifdef CONFIG_X86_64
239	xor %r8d,  %r8d
240	xor %r9d,  %r9d
241	xor %r10d, %r10d
242	xor %r11d, %r11d
243	xor %r12d, %r12d
244	xor %r13d, %r13d
245	xor %r14d, %r14d
246	xor %r15d, %r15d
247#endif
248
249	/* "Pop" @spec_ctrl_intercepted.  */
250	pop %_ASM_BX
251
252	pop %_ASM_BX
253
254#ifdef CONFIG_X86_64
255	pop %r12
256	pop %r13
257	pop %r14
258	pop %r15
259#else
260	pop %esi
261	pop %edi
262#endif
263	pop %_ASM_BP
264	RET
265
266	RESTORE_GUEST_SPEC_CTRL_BODY
267	RESTORE_HOST_SPEC_CTRL_BODY (%_ASM_SP)
268
26910:	cmpb $0, _ASM_RIP(kvm_rebooting)
270	jne 2b
271	ud2
27230:	cmpb $0, _ASM_RIP(kvm_rebooting)
273	jne 4b
274	ud2
27550:	cmpb $0, _ASM_RIP(kvm_rebooting)
276	jne 6b
277	ud2
27870:	cmpb $0, _ASM_RIP(kvm_rebooting)
279	jne 8b
280	ud2
281
282	_ASM_EXTABLE(1b, 10b)
283	_ASM_EXTABLE(3b, 30b)
284	_ASM_EXTABLE(5b, 50b)
285	_ASM_EXTABLE(7b, 70b)
286
287SYM_FUNC_END(__svm_vcpu_run)
288
289#ifdef CONFIG_KVM_AMD_SEV
290
291
292#ifdef CONFIG_X86_64
293#define SEV_ES_GPRS_BASE 0x300
294#define SEV_ES_RBX	(SEV_ES_GPRS_BASE + __VCPU_REGS_RBX * WORD_SIZE)
295#define SEV_ES_RBP	(SEV_ES_GPRS_BASE + __VCPU_REGS_RBP * WORD_SIZE)
296#define SEV_ES_RSI	(SEV_ES_GPRS_BASE + __VCPU_REGS_RSI * WORD_SIZE)
297#define SEV_ES_RDI	(SEV_ES_GPRS_BASE + __VCPU_REGS_RDI * WORD_SIZE)
298#define SEV_ES_R12	(SEV_ES_GPRS_BASE + __VCPU_REGS_R12 * WORD_SIZE)
299#define SEV_ES_R13	(SEV_ES_GPRS_BASE + __VCPU_REGS_R13 * WORD_SIZE)
300#define SEV_ES_R14	(SEV_ES_GPRS_BASE + __VCPU_REGS_R14 * WORD_SIZE)
301#define SEV_ES_R15	(SEV_ES_GPRS_BASE + __VCPU_REGS_R15 * WORD_SIZE)
302#endif
303
304/**
305 * __svm_sev_es_vcpu_run - Run a SEV-ES vCPU via a transition to SVM guest mode
306 * @svm:	struct vcpu_svm *
307 * @spec_ctrl_intercepted: bool
308 */
309SYM_FUNC_START(__svm_sev_es_vcpu_run)
310	FRAME_BEGIN
311
312	/*
313	 * Save non-volatile (callee-saved) registers to the host save area.
314	 * Except for RAX and RSP, all GPRs are restored on #VMEXIT, but not
315	 * saved on VMRUN.
316	 */
317	mov %rbp, SEV_ES_RBP (%rdx)
318	mov %r15, SEV_ES_R15 (%rdx)
319	mov %r14, SEV_ES_R14 (%rdx)
320	mov %r13, SEV_ES_R13 (%rdx)
321	mov %r12, SEV_ES_R12 (%rdx)
322	mov %rbx, SEV_ES_RBX (%rdx)
323
324	/*
325	 * Save volatile registers that hold arguments that are needed after
326	 * #VMEXIT (RDI=@svm and RSI=@spec_ctrl_intercepted).
327	 */
328	mov %rdi, SEV_ES_RDI (%rdx)
329	mov %rsi, SEV_ES_RSI (%rdx)
330
331	/* Clobbers RAX, RCX, RDX (@hostsa). */
332	RESTORE_GUEST_SPEC_CTRL
333
334	/* Get svm->current_vmcb->pa into RAX. */
335	mov SVM_current_vmcb(%rdi), %rax
336	mov KVM_VMCB_pa(%rax), %rax
337
338	/* Enter guest mode */
3391:	vmrun %rax
3402:
341	/* IMPORTANT: Stuff the RSB immediately after VM-Exit, before RET! */
342	FILL_RETURN_BUFFER %rax, RSB_CLEAR_LOOPS, X86_FEATURE_RSB_VMEXIT
343
344	/* Clobbers RAX, RCX, RDX, consumes RDI (@svm) and RSI (@spec_ctrl_intercepted). */
345	RESTORE_HOST_SPEC_CTRL
346
347	/*
348	 * Mitigate RETBleed for AMD/Hygon Zen uarch. RET should be
349	 * untrained as soon as we exit the VM and are back to the
350	 * kernel. This should be done before re-enabling interrupts
351	 * because interrupt handlers won't sanitize RET if the return is
352	 * from the kernel.
353	 */
354	UNTRAIN_RET_VM
355
356	FRAME_END
357	RET
358
359	RESTORE_GUEST_SPEC_CTRL_BODY
360	RESTORE_HOST_SPEC_CTRL_BODY %sil
361
3623:	cmpb $0, kvm_rebooting(%rip)
363	jne 2b
364	ud2
365
366	_ASM_EXTABLE(1b, 3b)
367
368SYM_FUNC_END(__svm_sev_es_vcpu_run)
369#endif /* CONFIG_KVM_AMD_SEV */
370