1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2018, Google LLC.
4 */
5
6 #ifndef SELFTEST_KVM_VMX_H
7 #define SELFTEST_KVM_VMX_H
8
9 #include <asm/vmx.h>
10
11 #include <stdint.h>
12 #include "processor.h"
13 #include "apic.h"
14
15 /*
16 * Definitions of Primary Processor-Based VM-Execution Controls.
17 */
18 #define CPU_BASED_INTR_WINDOW_EXITING 0x00000004
19 #define CPU_BASED_USE_TSC_OFFSETTING 0x00000008
20 #define CPU_BASED_HLT_EXITING 0x00000080
21 #define CPU_BASED_INVLPG_EXITING 0x00000200
22 #define CPU_BASED_MWAIT_EXITING 0x00000400
23 #define CPU_BASED_RDPMC_EXITING 0x00000800
24 #define CPU_BASED_RDTSC_EXITING 0x00001000
25 #define CPU_BASED_CR3_LOAD_EXITING 0x00008000
26 #define CPU_BASED_CR3_STORE_EXITING 0x00010000
27 #define CPU_BASED_CR8_LOAD_EXITING 0x00080000
28 #define CPU_BASED_CR8_STORE_EXITING 0x00100000
29 #define CPU_BASED_TPR_SHADOW 0x00200000
30 #define CPU_BASED_NMI_WINDOW_EXITING 0x00400000
31 #define CPU_BASED_MOV_DR_EXITING 0x00800000
32 #define CPU_BASED_UNCOND_IO_EXITING 0x01000000
33 #define CPU_BASED_USE_IO_BITMAPS 0x02000000
34 #define CPU_BASED_MONITOR_TRAP 0x08000000
35 #define CPU_BASED_USE_MSR_BITMAPS 0x10000000
36 #define CPU_BASED_MONITOR_EXITING 0x20000000
37 #define CPU_BASED_PAUSE_EXITING 0x40000000
38 #define CPU_BASED_ACTIVATE_SECONDARY_CONTROLS 0x80000000
39
40 #define CPU_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x0401e172
41
42 /*
43 * Definitions of Secondary Processor-Based VM-Execution Controls.
44 */
45 #define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
46 #define SECONDARY_EXEC_ENABLE_EPT 0x00000002
47 #define SECONDARY_EXEC_DESC 0x00000004
48 #define SECONDARY_EXEC_ENABLE_RDTSCP 0x00000008
49 #define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
50 #define SECONDARY_EXEC_ENABLE_VPID 0x00000020
51 #define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
52 #define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
53 #define SECONDARY_EXEC_APIC_REGISTER_VIRT 0x00000100
54 #define SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY 0x00000200
55 #define SECONDARY_EXEC_PAUSE_LOOP_EXITING 0x00000400
56 #define SECONDARY_EXEC_RDRAND_EXITING 0x00000800
57 #define SECONDARY_EXEC_ENABLE_INVPCID 0x00001000
58 #define SECONDARY_EXEC_ENABLE_VMFUNC 0x00002000
59 #define SECONDARY_EXEC_SHADOW_VMCS 0x00004000
60 #define SECONDARY_EXEC_RDSEED_EXITING 0x00010000
61 #define SECONDARY_EXEC_ENABLE_PML 0x00020000
62 #define SECONDARY_EPT_VE 0x00040000
63 #define SECONDARY_ENABLE_XSAV_RESTORE 0x00100000
64 #define SECONDARY_EXEC_TSC_SCALING 0x02000000
65
66 #define PIN_BASED_EXT_INTR_MASK 0x00000001
67 #define PIN_BASED_NMI_EXITING 0x00000008
68 #define PIN_BASED_VIRTUAL_NMIS 0x00000020
69 #define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
70 #define PIN_BASED_POSTED_INTR 0x00000080
71
72 #define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
73
74 #define VM_EXIT_SAVE_DEBUG_CONTROLS 0x00000004
75 #define VM_EXIT_HOST_ADDR_SPACE_SIZE 0x00000200
76 #define VM_EXIT_LOAD_IA32_PERF_GLOBAL_CTRL 0x00001000
77 #define VM_EXIT_ACK_INTR_ON_EXIT 0x00008000
78 #define VM_EXIT_SAVE_IA32_PAT 0x00040000
79 #define VM_EXIT_LOAD_IA32_PAT 0x00080000
80 #define VM_EXIT_SAVE_IA32_EFER 0x00100000
81 #define VM_EXIT_LOAD_IA32_EFER 0x00200000
82 #define VM_EXIT_SAVE_VMX_PREEMPTION_TIMER 0x00400000
83
84 #define VM_EXIT_ALWAYSON_WITHOUT_TRUE_MSR 0x00036dff
85
86 #define VM_ENTRY_LOAD_DEBUG_CONTROLS 0x00000004
87 #define VM_ENTRY_IA32E_MODE 0x00000200
88 #define VM_ENTRY_SMM 0x00000400
89 #define VM_ENTRY_DEACT_DUAL_MONITOR 0x00000800
90 #define VM_ENTRY_LOAD_IA32_PERF_GLOBAL_CTRL 0x00002000
91 #define VM_ENTRY_LOAD_IA32_PAT 0x00004000
92 #define VM_ENTRY_LOAD_IA32_EFER 0x00008000
93
94 #define VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR 0x000011ff
95
96 #define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
97 #define VMX_MISC_SAVE_EFER_LMA 0x00000020
98
99 #define VMX_EPT_VPID_CAP_1G_PAGES 0x00020000
100 #define VMX_EPT_VPID_CAP_AD_BITS 0x00200000
101
102 #define EXIT_REASON_FAILED_VMENTRY 0x80000000
103
104 enum vmcs_field {
105 VIRTUAL_PROCESSOR_ID = 0x00000000,
106 POSTED_INTR_NV = 0x00000002,
107 GUEST_ES_SELECTOR = 0x00000800,
108 GUEST_CS_SELECTOR = 0x00000802,
109 GUEST_SS_SELECTOR = 0x00000804,
110 GUEST_DS_SELECTOR = 0x00000806,
111 GUEST_FS_SELECTOR = 0x00000808,
112 GUEST_GS_SELECTOR = 0x0000080a,
113 GUEST_LDTR_SELECTOR = 0x0000080c,
114 GUEST_TR_SELECTOR = 0x0000080e,
115 GUEST_INTR_STATUS = 0x00000810,
116 GUEST_PML_INDEX = 0x00000812,
117 HOST_ES_SELECTOR = 0x00000c00,
118 HOST_CS_SELECTOR = 0x00000c02,
119 HOST_SS_SELECTOR = 0x00000c04,
120 HOST_DS_SELECTOR = 0x00000c06,
121 HOST_FS_SELECTOR = 0x00000c08,
122 HOST_GS_SELECTOR = 0x00000c0a,
123 HOST_TR_SELECTOR = 0x00000c0c,
124 IO_BITMAP_A = 0x00002000,
125 IO_BITMAP_A_HIGH = 0x00002001,
126 IO_BITMAP_B = 0x00002002,
127 IO_BITMAP_B_HIGH = 0x00002003,
128 MSR_BITMAP = 0x00002004,
129 MSR_BITMAP_HIGH = 0x00002005,
130 VM_EXIT_MSR_STORE_ADDR = 0x00002006,
131 VM_EXIT_MSR_STORE_ADDR_HIGH = 0x00002007,
132 VM_EXIT_MSR_LOAD_ADDR = 0x00002008,
133 VM_EXIT_MSR_LOAD_ADDR_HIGH = 0x00002009,
134 VM_ENTRY_MSR_LOAD_ADDR = 0x0000200a,
135 VM_ENTRY_MSR_LOAD_ADDR_HIGH = 0x0000200b,
136 PML_ADDRESS = 0x0000200e,
137 PML_ADDRESS_HIGH = 0x0000200f,
138 TSC_OFFSET = 0x00002010,
139 TSC_OFFSET_HIGH = 0x00002011,
140 VIRTUAL_APIC_PAGE_ADDR = 0x00002012,
141 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
142 APIC_ACCESS_ADDR = 0x00002014,
143 APIC_ACCESS_ADDR_HIGH = 0x00002015,
144 POSTED_INTR_DESC_ADDR = 0x00002016,
145 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
146 EPT_POINTER = 0x0000201a,
147 EPT_POINTER_HIGH = 0x0000201b,
148 EOI_EXIT_BITMAP0 = 0x0000201c,
149 EOI_EXIT_BITMAP0_HIGH = 0x0000201d,
150 EOI_EXIT_BITMAP1 = 0x0000201e,
151 EOI_EXIT_BITMAP1_HIGH = 0x0000201f,
152 EOI_EXIT_BITMAP2 = 0x00002020,
153 EOI_EXIT_BITMAP2_HIGH = 0x00002021,
154 EOI_EXIT_BITMAP3 = 0x00002022,
155 EOI_EXIT_BITMAP3_HIGH = 0x00002023,
156 VMREAD_BITMAP = 0x00002026,
157 VMREAD_BITMAP_HIGH = 0x00002027,
158 VMWRITE_BITMAP = 0x00002028,
159 VMWRITE_BITMAP_HIGH = 0x00002029,
160 XSS_EXIT_BITMAP = 0x0000202C,
161 XSS_EXIT_BITMAP_HIGH = 0x0000202D,
162 ENCLS_EXITING_BITMAP = 0x0000202E,
163 ENCLS_EXITING_BITMAP_HIGH = 0x0000202F,
164 TSC_MULTIPLIER = 0x00002032,
165 TSC_MULTIPLIER_HIGH = 0x00002033,
166 GUEST_PHYSICAL_ADDRESS = 0x00002400,
167 GUEST_PHYSICAL_ADDRESS_HIGH = 0x00002401,
168 VMCS_LINK_POINTER = 0x00002800,
169 VMCS_LINK_POINTER_HIGH = 0x00002801,
170 GUEST_IA32_DEBUGCTL = 0x00002802,
171 GUEST_IA32_DEBUGCTL_HIGH = 0x00002803,
172 GUEST_IA32_PAT = 0x00002804,
173 GUEST_IA32_PAT_HIGH = 0x00002805,
174 GUEST_IA32_EFER = 0x00002806,
175 GUEST_IA32_EFER_HIGH = 0x00002807,
176 GUEST_IA32_PERF_GLOBAL_CTRL = 0x00002808,
177 GUEST_IA32_PERF_GLOBAL_CTRL_HIGH= 0x00002809,
178 GUEST_PDPTR0 = 0x0000280a,
179 GUEST_PDPTR0_HIGH = 0x0000280b,
180 GUEST_PDPTR1 = 0x0000280c,
181 GUEST_PDPTR1_HIGH = 0x0000280d,
182 GUEST_PDPTR2 = 0x0000280e,
183 GUEST_PDPTR2_HIGH = 0x0000280f,
184 GUEST_PDPTR3 = 0x00002810,
185 GUEST_PDPTR3_HIGH = 0x00002811,
186 GUEST_BNDCFGS = 0x00002812,
187 GUEST_BNDCFGS_HIGH = 0x00002813,
188 HOST_IA32_PAT = 0x00002c00,
189 HOST_IA32_PAT_HIGH = 0x00002c01,
190 HOST_IA32_EFER = 0x00002c02,
191 HOST_IA32_EFER_HIGH = 0x00002c03,
192 HOST_IA32_PERF_GLOBAL_CTRL = 0x00002c04,
193 HOST_IA32_PERF_GLOBAL_CTRL_HIGH = 0x00002c05,
194 PIN_BASED_VM_EXEC_CONTROL = 0x00004000,
195 CPU_BASED_VM_EXEC_CONTROL = 0x00004002,
196 EXCEPTION_BITMAP = 0x00004004,
197 PAGE_FAULT_ERROR_CODE_MASK = 0x00004006,
198 PAGE_FAULT_ERROR_CODE_MATCH = 0x00004008,
199 CR3_TARGET_COUNT = 0x0000400a,
200 VM_EXIT_CONTROLS = 0x0000400c,
201 VM_EXIT_MSR_STORE_COUNT = 0x0000400e,
202 VM_EXIT_MSR_LOAD_COUNT = 0x00004010,
203 VM_ENTRY_CONTROLS = 0x00004012,
204 VM_ENTRY_MSR_LOAD_COUNT = 0x00004014,
205 VM_ENTRY_INTR_INFO_FIELD = 0x00004016,
206 VM_ENTRY_EXCEPTION_ERROR_CODE = 0x00004018,
207 VM_ENTRY_INSTRUCTION_LEN = 0x0000401a,
208 TPR_THRESHOLD = 0x0000401c,
209 SECONDARY_VM_EXEC_CONTROL = 0x0000401e,
210 PLE_GAP = 0x00004020,
211 PLE_WINDOW = 0x00004022,
212 VM_INSTRUCTION_ERROR = 0x00004400,
213 VM_EXIT_REASON = 0x00004402,
214 VM_EXIT_INTR_INFO = 0x00004404,
215 VM_EXIT_INTR_ERROR_CODE = 0x00004406,
216 IDT_VECTORING_INFO_FIELD = 0x00004408,
217 IDT_VECTORING_ERROR_CODE = 0x0000440a,
218 VM_EXIT_INSTRUCTION_LEN = 0x0000440c,
219 VMX_INSTRUCTION_INFO = 0x0000440e,
220 GUEST_ES_LIMIT = 0x00004800,
221 GUEST_CS_LIMIT = 0x00004802,
222 GUEST_SS_LIMIT = 0x00004804,
223 GUEST_DS_LIMIT = 0x00004806,
224 GUEST_FS_LIMIT = 0x00004808,
225 GUEST_GS_LIMIT = 0x0000480a,
226 GUEST_LDTR_LIMIT = 0x0000480c,
227 GUEST_TR_LIMIT = 0x0000480e,
228 GUEST_GDTR_LIMIT = 0x00004810,
229 GUEST_IDTR_LIMIT = 0x00004812,
230 GUEST_ES_AR_BYTES = 0x00004814,
231 GUEST_CS_AR_BYTES = 0x00004816,
232 GUEST_SS_AR_BYTES = 0x00004818,
233 GUEST_DS_AR_BYTES = 0x0000481a,
234 GUEST_FS_AR_BYTES = 0x0000481c,
235 GUEST_GS_AR_BYTES = 0x0000481e,
236 GUEST_LDTR_AR_BYTES = 0x00004820,
237 GUEST_TR_AR_BYTES = 0x00004822,
238 GUEST_INTERRUPTIBILITY_INFO = 0x00004824,
239 GUEST_ACTIVITY_STATE = 0X00004826,
240 GUEST_SYSENTER_CS = 0x0000482A,
241 VMX_PREEMPTION_TIMER_VALUE = 0x0000482E,
242 HOST_IA32_SYSENTER_CS = 0x00004c00,
243 CR0_GUEST_HOST_MASK = 0x00006000,
244 CR4_GUEST_HOST_MASK = 0x00006002,
245 CR0_READ_SHADOW = 0x00006004,
246 CR4_READ_SHADOW = 0x00006006,
247 CR3_TARGET_VALUE0 = 0x00006008,
248 CR3_TARGET_VALUE1 = 0x0000600a,
249 CR3_TARGET_VALUE2 = 0x0000600c,
250 CR3_TARGET_VALUE3 = 0x0000600e,
251 EXIT_QUALIFICATION = 0x00006400,
252 GUEST_LINEAR_ADDRESS = 0x0000640a,
253 GUEST_CR0 = 0x00006800,
254 GUEST_CR3 = 0x00006802,
255 GUEST_CR4 = 0x00006804,
256 GUEST_ES_BASE = 0x00006806,
257 GUEST_CS_BASE = 0x00006808,
258 GUEST_SS_BASE = 0x0000680a,
259 GUEST_DS_BASE = 0x0000680c,
260 GUEST_FS_BASE = 0x0000680e,
261 GUEST_GS_BASE = 0x00006810,
262 GUEST_LDTR_BASE = 0x00006812,
263 GUEST_TR_BASE = 0x00006814,
264 GUEST_GDTR_BASE = 0x00006816,
265 GUEST_IDTR_BASE = 0x00006818,
266 GUEST_DR7 = 0x0000681a,
267 GUEST_RSP = 0x0000681c,
268 GUEST_RIP = 0x0000681e,
269 GUEST_RFLAGS = 0x00006820,
270 GUEST_PENDING_DBG_EXCEPTIONS = 0x00006822,
271 GUEST_SYSENTER_ESP = 0x00006824,
272 GUEST_SYSENTER_EIP = 0x00006826,
273 HOST_CR0 = 0x00006c00,
274 HOST_CR3 = 0x00006c02,
275 HOST_CR4 = 0x00006c04,
276 HOST_FS_BASE = 0x00006c06,
277 HOST_GS_BASE = 0x00006c08,
278 HOST_TR_BASE = 0x00006c0a,
279 HOST_GDTR_BASE = 0x00006c0c,
280 HOST_IDTR_BASE = 0x00006c0e,
281 HOST_IA32_SYSENTER_ESP = 0x00006c10,
282 HOST_IA32_SYSENTER_EIP = 0x00006c12,
283 HOST_RSP = 0x00006c14,
284 HOST_RIP = 0x00006c16,
285 };
286
287 struct vmx_msr_entry {
288 uint32_t index;
289 uint32_t reserved;
290 uint64_t value;
291 } __attribute__ ((aligned(16)));
292
293 #include "evmcs.h"
294
vmxon(uint64_t phys)295 static inline int vmxon(uint64_t phys)
296 {
297 uint8_t ret;
298
299 __asm__ __volatile__ ("vmxon %[pa]; setna %[ret]"
300 : [ret]"=rm"(ret)
301 : [pa]"m"(phys)
302 : "cc", "memory");
303
304 return ret;
305 }
306
vmxoff(void)307 static inline void vmxoff(void)
308 {
309 __asm__ __volatile__("vmxoff");
310 }
311
vmclear(uint64_t vmcs_pa)312 static inline int vmclear(uint64_t vmcs_pa)
313 {
314 uint8_t ret;
315
316 __asm__ __volatile__ ("vmclear %[pa]; setna %[ret]"
317 : [ret]"=rm"(ret)
318 : [pa]"m"(vmcs_pa)
319 : "cc", "memory");
320
321 return ret;
322 }
323
vmptrld(uint64_t vmcs_pa)324 static inline int vmptrld(uint64_t vmcs_pa)
325 {
326 uint8_t ret;
327
328 if (enable_evmcs)
329 return -1;
330
331 __asm__ __volatile__ ("vmptrld %[pa]; setna %[ret]"
332 : [ret]"=rm"(ret)
333 : [pa]"m"(vmcs_pa)
334 : "cc", "memory");
335
336 return ret;
337 }
338
vmptrst(uint64_t * value)339 static inline int vmptrst(uint64_t *value)
340 {
341 uint64_t tmp;
342 uint8_t ret;
343
344 if (enable_evmcs)
345 return evmcs_vmptrst(value);
346
347 __asm__ __volatile__("vmptrst %[value]; setna %[ret]"
348 : [value]"=m"(tmp), [ret]"=rm"(ret)
349 : : "cc", "memory");
350
351 *value = tmp;
352 return ret;
353 }
354
355 /*
356 * A wrapper around vmptrst that ignores errors and returns zero if the
357 * vmptrst instruction fails.
358 */
vmptrstz(void)359 static inline uint64_t vmptrstz(void)
360 {
361 uint64_t value = 0;
362 vmptrst(&value);
363 return value;
364 }
365
366 /*
367 * No guest state (e.g. GPRs) is established by this vmlaunch.
368 */
vmlaunch(void)369 static inline int vmlaunch(void)
370 {
371 int ret;
372
373 if (enable_evmcs)
374 return evmcs_vmlaunch();
375
376 __asm__ __volatile__("push %%rbp;"
377 "push %%rcx;"
378 "push %%rdx;"
379 "push %%rsi;"
380 "push %%rdi;"
381 "push $0;"
382 "vmwrite %%rsp, %[host_rsp];"
383 "lea 1f(%%rip), %%rax;"
384 "vmwrite %%rax, %[host_rip];"
385 "vmlaunch;"
386 "incq (%%rsp);"
387 "1: pop %%rax;"
388 "pop %%rdi;"
389 "pop %%rsi;"
390 "pop %%rdx;"
391 "pop %%rcx;"
392 "pop %%rbp;"
393 : [ret]"=&a"(ret)
394 : [host_rsp]"r"((uint64_t)HOST_RSP),
395 [host_rip]"r"((uint64_t)HOST_RIP)
396 : "memory", "cc", "rbx", "r8", "r9", "r10",
397 "r11", "r12", "r13", "r14", "r15");
398 return ret;
399 }
400
401 /*
402 * No guest state (e.g. GPRs) is established by this vmresume.
403 */
vmresume(void)404 static inline int vmresume(void)
405 {
406 int ret;
407
408 if (enable_evmcs)
409 return evmcs_vmresume();
410
411 __asm__ __volatile__("push %%rbp;"
412 "push %%rcx;"
413 "push %%rdx;"
414 "push %%rsi;"
415 "push %%rdi;"
416 "push $0;"
417 "vmwrite %%rsp, %[host_rsp];"
418 "lea 1f(%%rip), %%rax;"
419 "vmwrite %%rax, %[host_rip];"
420 "vmresume;"
421 "incq (%%rsp);"
422 "1: pop %%rax;"
423 "pop %%rdi;"
424 "pop %%rsi;"
425 "pop %%rdx;"
426 "pop %%rcx;"
427 "pop %%rbp;"
428 : [ret]"=&a"(ret)
429 : [host_rsp]"r"((uint64_t)HOST_RSP),
430 [host_rip]"r"((uint64_t)HOST_RIP)
431 : "memory", "cc", "rbx", "r8", "r9", "r10",
432 "r11", "r12", "r13", "r14", "r15");
433 return ret;
434 }
435
vmcall(void)436 static inline void vmcall(void)
437 {
438 /*
439 * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
440 * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
441 * use of this function is to exit to L1 from L2. Clobber all other
442 * GPRs as L1 doesn't correctly preserve them during vmexits.
443 */
444 __asm__ __volatile__("push %%rbp; vmcall; pop %%rbp"
445 : : "a"(0xdeadbeef), "c"(0xbeefdead)
446 : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
447 "r10", "r11", "r12", "r13", "r14", "r15");
448 }
449
vmread(uint64_t encoding,uint64_t * value)450 static inline int vmread(uint64_t encoding, uint64_t *value)
451 {
452 uint64_t tmp;
453 uint8_t ret;
454
455 if (enable_evmcs)
456 return evmcs_vmread(encoding, value);
457
458 __asm__ __volatile__("vmread %[encoding], %[value]; setna %[ret]"
459 : [value]"=rm"(tmp), [ret]"=rm"(ret)
460 : [encoding]"r"(encoding)
461 : "cc", "memory");
462
463 *value = tmp;
464 return ret;
465 }
466
467 /*
468 * A wrapper around vmread that ignores errors and returns zero if the
469 * vmread instruction fails.
470 */
vmreadz(uint64_t encoding)471 static inline uint64_t vmreadz(uint64_t encoding)
472 {
473 uint64_t value = 0;
474 vmread(encoding, &value);
475 return value;
476 }
477
vmwrite(uint64_t encoding,uint64_t value)478 static inline int vmwrite(uint64_t encoding, uint64_t value)
479 {
480 uint8_t ret;
481
482 if (enable_evmcs)
483 return evmcs_vmwrite(encoding, value);
484
485 __asm__ __volatile__ ("vmwrite %[value], %[encoding]; setna %[ret]"
486 : [ret]"=rm"(ret)
487 : [value]"rm"(value), [encoding]"r"(encoding)
488 : "cc", "memory");
489
490 return ret;
491 }
492
vmcs_revision(void)493 static inline uint32_t vmcs_revision(void)
494 {
495 return rdmsr(MSR_IA32_VMX_BASIC);
496 }
497
498 struct vmx_pages {
499 void *vmxon_hva;
500 uint64_t vmxon_gpa;
501 void *vmxon;
502
503 void *vmcs_hva;
504 uint64_t vmcs_gpa;
505 void *vmcs;
506
507 void *msr_hva;
508 uint64_t msr_gpa;
509 void *msr;
510
511 void *shadow_vmcs_hva;
512 uint64_t shadow_vmcs_gpa;
513 void *shadow_vmcs;
514
515 void *vmread_hva;
516 uint64_t vmread_gpa;
517 void *vmread;
518
519 void *vmwrite_hva;
520 uint64_t vmwrite_gpa;
521 void *vmwrite;
522
523 void *eptp_hva;
524 uint64_t eptp_gpa;
525 void *eptp;
526
527 void *apic_access_hva;
528 uint64_t apic_access_gpa;
529 void *apic_access;
530 };
531
532 union vmx_basic {
533 u64 val;
534 struct {
535 u32 revision;
536 u32 size:13,
537 reserved1:3,
538 width:1,
539 dual:1,
540 type:4,
541 insouts:1,
542 ctrl:1,
543 vm_entry_exception_ctrl:1,
544 reserved2:7;
545 };
546 };
547
548 union vmx_ctrl_msr {
549 u64 val;
550 struct {
551 u32 set, clr;
552 };
553 };
554
555 struct vmx_pages *vcpu_alloc_vmx(struct kvm_vm *vm, vm_vaddr_t *p_vmx_gva);
556 bool prepare_for_vmx_operation(struct vmx_pages *vmx);
557 void prepare_vmcs(struct vmx_pages *vmx, void *guest_rip, void *guest_rsp);
558 bool load_vmcs(struct vmx_pages *vmx);
559
560 bool ept_1g_pages_supported(void);
561
562 void nested_pg_map(struct vmx_pages *vmx, struct kvm_vm *vm,
563 uint64_t nested_paddr, uint64_t paddr);
564 void nested_map(struct vmx_pages *vmx, struct kvm_vm *vm,
565 uint64_t nested_paddr, uint64_t paddr, uint64_t size);
566 void nested_map_memslot(struct vmx_pages *vmx, struct kvm_vm *vm,
567 uint32_t memslot);
568 void nested_identity_map_1g(struct vmx_pages *vmx, struct kvm_vm *vm,
569 uint64_t addr, uint64_t size);
570 bool kvm_cpu_has_ept(void);
571 void prepare_eptp(struct vmx_pages *vmx, struct kvm_vm *vm,
572 uint32_t eptp_memslot);
573 void prepare_virtualize_apic_accesses(struct vmx_pages *vmx, struct kvm_vm *vm);
574
575 #endif /* SELFTEST_KVM_VMX_H */
576