1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2023 SUSE LLC <[email protected]>
4 */
5
6 /*\
7 * [Description]
8 *
9 * Functional test for VMSAVE/VMLOAD instructions in KVM environment. Verify
10 * that both instructions save/load the CPU state according to CPU
11 * documentation.
12 */
13
14 #include "kvm_test.h"
15
16 #ifdef COMPILE_PAYLOAD
17 #if defined(__i386__) || defined(__x86_64__)
18
19 #include "kvm_x86_svm.h"
20
21 static struct kvm_vmcb *src_vmcb, *dest_vmcb, *msr_vmcb;
22 static struct kvm_sregs sregs_buf;
23
check_descriptor(const char * name,const struct kvm_vmcb_descriptor * data,const struct kvm_vmcb_descriptor * exp)24 static int check_descriptor(const char *name,
25 const struct kvm_vmcb_descriptor *data,
26 const struct kvm_vmcb_descriptor *exp)
27 {
28 int ret = 0;
29
30 if (data->selector != exp->selector) {
31 tst_res(TFAIL, "%s.selector = %hx (expected %hx)",
32 name, data->selector, exp->selector);
33 ret = 1;
34 }
35
36 if (data->attrib != exp->attrib) {
37 tst_res(TFAIL, "%s.attrib = 0x%hx (expected 0x%hx)",
38 name, data->attrib, exp->attrib);
39 ret = 1;
40 }
41
42 if (data->limit != exp->limit) {
43 tst_res(TFAIL, "%s.limit = 0x%x (expected 0x%x)",
44 name, data->limit, exp->limit);
45 ret = 1;
46 }
47
48 if (data->base != exp->base) {
49 tst_res(TFAIL, "%s.base = 0x%llx (expected 0x%llx)",
50 name, data->base, exp->base);
51 ret = 1;
52 }
53
54 return ret;
55 }
56
check_value(const char * name,uint64_t val,uint64_t exp,uint64_t backup,uint64_t reg,uint64_t nested_val)57 static int check_value(const char *name, uint64_t val, uint64_t exp,
58 uint64_t backup, uint64_t reg, uint64_t nested_val)
59 {
60 int ret = 0;
61
62 if (exp != backup) {
63 tst_res(TFAIL, "%s source was modified (0x%llx != 0x%llx)",
64 name, exp, backup);
65 ret = 1;
66 }
67
68 if (reg != exp) {
69 tst_res(TFAIL, "%s was not loaded (0x%llx != 0x%llx)",
70 name, reg, exp);
71 ret = 1;
72 }
73
74 if (val != exp) {
75 tst_res(TFAIL, "%s was not saved (0x%llx != 0x%llx)",
76 name, val, exp);
77 ret = 1;
78 }
79
80 if (val != nested_val) {
81 tst_res(TFAIL, "Inconsistent %s on VM exit (0x%llx != 0x%llx)",
82 name, val, nested_val);
83 ret = 1;
84 }
85
86 if (!ret)
87 tst_res(TPASS, "%s has correct value 0x%llx", name, val);
88
89 return ret;
90 }
91
vmsave_copy(void)92 static int vmsave_copy(void)
93 {
94 kvm_svm_vmload(src_vmcb);
95 kvm_read_sregs(&sregs_buf);
96 msr_vmcb->star = kvm_rdmsr(MSR_STAR);
97 msr_vmcb->lstar = kvm_rdmsr(MSR_LSTAR);
98 msr_vmcb->cstar = kvm_rdmsr(MSR_CSTAR);
99 msr_vmcb->sfmask = kvm_rdmsr(MSR_SFMASK);
100 msr_vmcb->fs.base = kvm_rdmsr(MSR_FS_BASE);
101 msr_vmcb->gs.base = kvm_rdmsr(MSR_GS_BASE);
102 msr_vmcb->kernel_gs_base = kvm_rdmsr(MSR_KERNEL_GS_BASE);
103 msr_vmcb->sysenter_cs = kvm_rdmsr(MSR_SYSENTER_CS);
104 msr_vmcb->sysenter_esp = kvm_rdmsr(MSR_SYSENTER_ESP);
105 msr_vmcb->sysenter_eip = kvm_rdmsr(MSR_SYSENTER_EIP);
106 kvm_svm_vmsave(dest_vmcb);
107 return 0;
108 }
109
check_vmsave_result(struct kvm_vmcb * copy_vmcb,struct kvm_vmcb * nested_vmcb)110 static int check_vmsave_result(struct kvm_vmcb *copy_vmcb,
111 struct kvm_vmcb *nested_vmcb)
112 {
113 int ret = 0;
114
115 /* Nested VMCB is only compared to dest VMCB, bypass the check */
116 if (!nested_vmcb)
117 nested_vmcb = dest_vmcb;
118
119 ret = check_descriptor("FS", &dest_vmcb->fs, &src_vmcb->fs);
120 ret = check_value("FS.selector", dest_vmcb->fs.selector,
121 src_vmcb->fs.selector, copy_vmcb->fs.selector,
122 sregs_buf.fs, nested_vmcb->fs.selector) || ret;
123 ret = check_descriptor("GS", &dest_vmcb->gs, &src_vmcb->gs) || ret;
124 ret = check_value("GS.selector", dest_vmcb->gs.selector,
125 src_vmcb->gs.selector, copy_vmcb->gs.selector,
126 sregs_buf.gs, nested_vmcb->gs.selector) || ret;
127 ret = check_descriptor("LDTR", &dest_vmcb->ldtr, &src_vmcb->ldtr) ||
128 ret;
129 ret = check_descriptor("TR", &dest_vmcb->tr, &src_vmcb->tr) || ret;
130 ret = check_value("STAR", dest_vmcb->star, src_vmcb->star,
131 copy_vmcb->star, msr_vmcb->star, nested_vmcb->star) || ret;
132 ret = check_value("LSTAR", dest_vmcb->lstar, src_vmcb->lstar,
133 copy_vmcb->lstar, msr_vmcb->lstar, nested_vmcb->lstar) || ret;
134 ret = check_value("CSTAR", dest_vmcb->cstar, src_vmcb->cstar,
135 copy_vmcb->cstar, msr_vmcb->cstar, nested_vmcb->cstar) || ret;
136 ret = check_value("SFMASK", dest_vmcb->sfmask, src_vmcb->sfmask,
137 copy_vmcb->sfmask, msr_vmcb->sfmask, nested_vmcb->sfmask) ||
138 ret;
139 ret = check_value("FS.base", dest_vmcb->fs.base, src_vmcb->fs.base,
140 copy_vmcb->fs.base, msr_vmcb->fs.base, nested_vmcb->fs.base) ||
141 ret;
142 ret = check_value("GS.base", dest_vmcb->gs.base, src_vmcb->gs.base,
143 copy_vmcb->gs.base, msr_vmcb->gs.base, nested_vmcb->gs.base) ||
144 ret;
145 ret = check_value("KernelGSBase", dest_vmcb->kernel_gs_base,
146 src_vmcb->kernel_gs_base, copy_vmcb->kernel_gs_base,
147 msr_vmcb->kernel_gs_base, nested_vmcb->kernel_gs_base) || ret;
148 ret = check_value("Sysenter_CS", dest_vmcb->sysenter_cs,
149 src_vmcb->sysenter_cs, copy_vmcb->sysenter_cs,
150 msr_vmcb->sysenter_cs, nested_vmcb->sysenter_cs) || ret;
151 ret = check_value("Sysenter_ESP", dest_vmcb->sysenter_esp,
152 src_vmcb->sysenter_esp, copy_vmcb->sysenter_esp,
153 msr_vmcb->sysenter_esp, nested_vmcb->sysenter_esp) || ret;
154 ret = check_value("Sysenter_EIP", dest_vmcb->sysenter_eip,
155 src_vmcb->sysenter_eip, copy_vmcb->sysenter_eip,
156 msr_vmcb->sysenter_eip, nested_vmcb->sysenter_eip) || ret;
157
158 return ret;
159 }
160
create_segment_descriptor(uint64_t baseaddr,uint32_t limit,unsigned int flags)161 static int create_segment_descriptor(uint64_t baseaddr, uint32_t limit,
162 unsigned int flags)
163 {
164 int ret = kvm_find_free_descriptor(kvm_gdt, KVM_GDT_SIZE);
165
166 if (ret < 0)
167 tst_brk(TBROK, "Descriptor table is full");
168
169 kvm_set_segment_descriptor(kvm_gdt + ret, baseaddr, limit, flags);
170 return ret;
171 }
172
dirty_vmcb(struct kvm_vmcb * buf)173 static void dirty_vmcb(struct kvm_vmcb *buf)
174 {
175 buf->fs.selector = 0x60;
176 buf->fs.attrib = SEGTYPE_RWDATA | SEGFLAG_PRESENT;
177 buf->fs.limit = 0xffff;
178 buf->fs.base = 0xfff000;
179 buf->gs.selector = 0x68;
180 buf->gs.attrib = SEGTYPE_RWDATA | SEGFLAG_PRESENT;
181 buf->gs.limit = 0xffff;
182 buf->gs.base = 0xfff000;
183 buf->ldtr.selector = 0x70;
184 buf->ldtr.attrib = SEGTYPE_LDT | SEGFLAG_PRESENT;
185 buf->ldtr.limit = 0xffff;
186 buf->ldtr.base = 0xfff000;
187 buf->tr.selector = 0x78;
188 buf->tr.attrib = SEGTYPE_TSS | SEGFLAG_PRESENT;
189 buf->tr.limit = 0xffff;
190 buf->tr.base = 0xfff000;
191 buf->star = 0xffff;
192 buf->lstar = 0xffff;
193 buf->cstar = 0xffff;
194 buf->sfmask = 0xffff;
195 buf->fs.base = 0xffff;
196 buf->gs.base = 0xffff;
197 buf->kernel_gs_base = 0xffff;
198 buf->sysenter_cs = 0xffff;
199 buf->sysenter_esp = 0xffff;
200 buf->sysenter_eip = 0xffff;
201 }
202
main(void)203 void main(void)
204 {
205 uint16_t ss;
206 uint64_t rsp;
207 struct kvm_svm_vcpu *vcpu;
208 int data_seg1, data_seg2, ldt_seg, task_seg;
209 struct segment_descriptor *ldt;
210 struct kvm_vmcb *backup_vmcb, *zero_vmcb;
211 unsigned int ldt_size = KVM_GDT_SIZE*sizeof(struct segment_descriptor);
212
213 kvm_init_svm();
214
215 src_vmcb = kvm_alloc_vmcb();
216 dest_vmcb = kvm_alloc_vmcb();
217 msr_vmcb = kvm_alloc_vmcb();
218 backup_vmcb = kvm_alloc_vmcb();
219 zero_vmcb = kvm_alloc_vmcb();
220
221 vcpu = kvm_create_svm_vcpu(vmsave_copy, 1);
222 kvm_vmcb_set_intercept(vcpu->vmcb, SVM_INTERCEPT_VMLOAD, 0);
223 kvm_vmcb_set_intercept(vcpu->vmcb, SVM_INTERCEPT_VMSAVE, 0);
224 /* Save allocated stack for later VM reinit */
225 ss = vcpu->vmcb->ss.selector >> 3;
226 rsp = vcpu->vmcb->rsp;
227
228 ldt = tst_heap_alloc_aligned(ldt_size, 8);
229 memset(ldt, 0, ldt_size);
230 data_seg1 = create_segment_descriptor(0xda7a1000, 0x1000,
231 SEGTYPE_RODATA | SEGFLAG_PRESENT);
232 data_seg2 = create_segment_descriptor(0xda7a2000, 2,
233 SEGTYPE_RWDATA | SEGFLAG_PRESENT | SEGFLAG_PAGE_LIMIT);
234 ldt_seg = create_segment_descriptor((uintptr_t)ldt, ldt_size,
235 SEGTYPE_LDT | SEGFLAG_PRESENT);
236 task_seg = create_segment_descriptor(0x7a53000, 0x1000,
237 SEGTYPE_TSS | SEGFLAG_PRESENT);
238 kvm_vmcb_copy_gdt_descriptor(&src_vmcb->fs, data_seg1);
239 kvm_vmcb_copy_gdt_descriptor(&src_vmcb->gs, data_seg2);
240 kvm_vmcb_copy_gdt_descriptor(&src_vmcb->ldtr, ldt_seg);
241 kvm_vmcb_copy_gdt_descriptor(&src_vmcb->tr, task_seg);
242
243 src_vmcb->star = 0x5742;
244 src_vmcb->lstar = 0x15742;
245 src_vmcb->cstar = 0xc5742;
246 src_vmcb->sfmask = 0xf731;
247 src_vmcb->fs.base = 0xf000;
248 src_vmcb->gs.base = 0x10000;
249 src_vmcb->kernel_gs_base = 0x20000;
250 src_vmcb->sysenter_cs = 0x595c5;
251 src_vmcb->sysenter_esp = 0x595e50;
252 src_vmcb->sysenter_eip = 0x595e10;
253
254 memcpy(backup_vmcb, src_vmcb, sizeof(struct kvm_vmcb));
255 tst_res(TINFO, "VMLOAD/VMSAVE non-zero values");
256 vmsave_copy();
257 check_vmsave_result(backup_vmcb, NULL);
258
259 memset(src_vmcb, 0, sizeof(struct kvm_vmcb));
260 tst_res(TINFO, "VMLOAD/VMSAVE zero values");
261 dirty_vmcb(dest_vmcb);
262 vmsave_copy();
263 check_vmsave_result(zero_vmcb, NULL);
264
265 memcpy(src_vmcb, backup_vmcb, sizeof(struct kvm_vmcb));
266 tst_res(TINFO, "Nested VMLOAD/VMSAVE non-zero values");
267 dirty_vmcb(vcpu->vmcb);
268 memset(dest_vmcb, 0, sizeof(struct kvm_vmcb));
269 kvm_svm_vmrun(vcpu);
270
271 if (vcpu->vmcb->exitcode != SVM_EXIT_HLT)
272 tst_brk(TBROK, "Nested VM exited unexpectedly");
273
274 check_vmsave_result(backup_vmcb, vcpu->vmcb);
275
276 memset(src_vmcb, 0, sizeof(struct kvm_vmcb));
277 tst_res(TINFO, "Nested VMLOAD/VMSAVE zero values");
278 kvm_init_guest_vmcb(vcpu->vmcb, 1, ss, (void *)rsp, vmsave_copy);
279 kvm_vmcb_set_intercept(vcpu->vmcb, SVM_INTERCEPT_VMLOAD, 0);
280 kvm_vmcb_set_intercept(vcpu->vmcb, SVM_INTERCEPT_VMSAVE, 0);
281 dirty_vmcb(vcpu->vmcb);
282 kvm_svm_vmrun(vcpu);
283
284 if (vcpu->vmcb->exitcode != SVM_EXIT_HLT)
285 tst_brk(TBROK, "Nested VM exited unexpectedly");
286
287 check_vmsave_result(zero_vmcb, vcpu->vmcb);
288 }
289
290 #else /* defined(__i386__) || defined(__x86_64__) */
291 TST_TEST_TCONF("Test supported only on x86");
292 #endif /* defined(__i386__) || defined(__x86_64__) */
293
294 #else /* COMPILE_PAYLOAD */
295
296 static struct tst_test test = {
297 .test_all = tst_kvm_run,
298 .setup = tst_kvm_setup,
299 .cleanup = tst_kvm_cleanup,
300 .supported_archs = (const char *const []) {
301 "x86_64",
302 "x86",
303 NULL
304 },
305 };
306
307 #endif /* COMPILE_PAYLOAD */
308