1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Helpers used for nested SVM testing
4 * Largely inspired from KVM unit test svm.c
5 *
6 * Copyright (C) 2020, Red Hat, Inc.
7 */
8
9 #include "test_util.h"
10 #include "kvm_util.h"
11 #include "processor.h"
12 #include "svm_util.h"
13
14 #define SEV_DEV_PATH "/dev/sev"
15
16 struct gpr64_regs guest_regs;
17 u64 rflags;
18
19 /* Allocate memory regions for nested SVM tests.
20 *
21 * Input Args:
22 * vm - The VM to allocate guest-virtual addresses in.
23 *
24 * Output Args:
25 * p_svm_gva - The guest virtual address for the struct svm_test_data.
26 *
27 * Return:
28 * Pointer to structure with the addresses of the SVM areas.
29 */
30 struct svm_test_data *
vcpu_alloc_svm(struct kvm_vm * vm,vm_vaddr_t * p_svm_gva)31 vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
32 {
33 vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
34 struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
35
36 svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
37 svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
38 svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
39
40 svm->save_area = (void *)vm_vaddr_alloc_page(vm);
41 svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
42 svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
43
44 svm->msr = (void *)vm_vaddr_alloc_page(vm);
45 svm->msr_hva = addr_gva2hva(vm, (uintptr_t)svm->msr);
46 svm->msr_gpa = addr_gva2gpa(vm, (uintptr_t)svm->msr);
47 memset(svm->msr_hva, 0, getpagesize());
48
49 *p_svm_gva = svm_gva;
50 return svm;
51 }
52
vmcb_set_seg(struct vmcb_seg * seg,u16 selector,u64 base,u32 limit,u32 attr)53 static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
54 u64 base, u32 limit, u32 attr)
55 {
56 seg->selector = selector;
57 seg->attrib = attr;
58 seg->limit = limit;
59 seg->base = base;
60 }
61
generic_svm_setup(struct svm_test_data * svm,void * guest_rip,void * guest_rsp)62 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
63 {
64 struct vmcb *vmcb = svm->vmcb;
65 uint64_t vmcb_gpa = svm->vmcb_gpa;
66 struct vmcb_save_area *save = &vmcb->save;
67 struct vmcb_control_area *ctrl = &vmcb->control;
68 u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
69 | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
70 u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
71 | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
72 uint64_t efer;
73
74 efer = rdmsr(MSR_EFER);
75 wrmsr(MSR_EFER, efer | EFER_SVME);
76 wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa);
77
78 memset(vmcb, 0, sizeof(*vmcb));
79 asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory");
80 vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr);
81 vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr);
82 vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr);
83 vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr);
84 vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0);
85 vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0);
86
87 ctrl->asid = 1;
88 save->cpl = 0;
89 save->efer = rdmsr(MSR_EFER);
90 asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory");
91 asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory");
92 asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory");
93 asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory");
94 asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory");
95 asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory");
96 save->g_pat = rdmsr(MSR_IA32_CR_PAT);
97 save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
98 ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
99 (1ULL << INTERCEPT_VMMCALL);
100 ctrl->msrpm_base_pa = svm->msr_gpa;
101
102 vmcb->save.rip = (u64)guest_rip;
103 vmcb->save.rsp = (u64)guest_rsp;
104 guest_regs.rdi = (u64)svm;
105 }
106
107 /*
108 * save/restore 64-bit general registers except rax, rip, rsp
109 * which are directly handed through the VMCB guest processor state
110 */
111 #define SAVE_GPR_C \
112 "xchg %%rbx, guest_regs+0x20\n\t" \
113 "xchg %%rcx, guest_regs+0x10\n\t" \
114 "xchg %%rdx, guest_regs+0x18\n\t" \
115 "xchg %%rbp, guest_regs+0x30\n\t" \
116 "xchg %%rsi, guest_regs+0x38\n\t" \
117 "xchg %%rdi, guest_regs+0x40\n\t" \
118 "xchg %%r8, guest_regs+0x48\n\t" \
119 "xchg %%r9, guest_regs+0x50\n\t" \
120 "xchg %%r10, guest_regs+0x58\n\t" \
121 "xchg %%r11, guest_regs+0x60\n\t" \
122 "xchg %%r12, guest_regs+0x68\n\t" \
123 "xchg %%r13, guest_regs+0x70\n\t" \
124 "xchg %%r14, guest_regs+0x78\n\t" \
125 "xchg %%r15, guest_regs+0x80\n\t"
126
127 #define LOAD_GPR_C SAVE_GPR_C
128
129 /*
130 * selftests do not use interrupts so we dropped clgi/sti/cli/stgi
131 * for now. registers involved in LOAD/SAVE_GPR_C are eventually
132 * unmodified so they do not need to be in the clobber list.
133 */
run_guest(struct vmcb * vmcb,uint64_t vmcb_gpa)134 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
135 {
136 asm volatile (
137 "vmload %[vmcb_gpa]\n\t"
138 "mov rflags, %%r15\n\t" // rflags
139 "mov %%r15, 0x170(%[vmcb])\n\t"
140 "mov guest_regs, %%r15\n\t" // rax
141 "mov %%r15, 0x1f8(%[vmcb])\n\t"
142 LOAD_GPR_C
143 "vmrun %[vmcb_gpa]\n\t"
144 SAVE_GPR_C
145 "mov 0x170(%[vmcb]), %%r15\n\t" // rflags
146 "mov %%r15, rflags\n\t"
147 "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax
148 "mov %%r15, guest_regs\n\t"
149 "vmsave %[vmcb_gpa]\n\t"
150 : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa)
151 : "r15", "memory");
152 }
153
154 /*
155 * Open SEV_DEV_PATH if available, otherwise exit the entire program.
156 *
157 * Return:
158 * The opened file descriptor of /dev/sev.
159 */
open_sev_dev_path_or_exit(void)160 int open_sev_dev_path_or_exit(void)
161 {
162 return open_path_or_exit(SEV_DEV_PATH, 0);
163 }
164