1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2020, Red Hat, Inc.
4 */
5
6 #ifndef SELFTEST_KVM_SVM_UTILS_H
7 #define SELFTEST_KVM_SVM_UTILS_H
8
9 #include <asm/svm.h>
10
11 #include <stdint.h>
12 #include "svm.h"
13 #include "processor.h"
14
15 struct svm_test_data {
16 /* VMCB */
17 struct vmcb *vmcb; /* gva */
18 void *vmcb_hva;
19 uint64_t vmcb_gpa;
20
21 /* host state-save area */
22 struct vmcb_save_area *save_area; /* gva */
23 void *save_area_hva;
24 uint64_t save_area_gpa;
25
26 /* MSR-Bitmap */
27 void *msr; /* gva */
28 void *msr_hva;
29 uint64_t msr_gpa;
30 };
31
vmmcall(void)32 static inline void vmmcall(void)
33 {
34 /*
35 * Stuff RAX and RCX with "safe" values to make sure L0 doesn't handle
36 * it as a valid hypercall (e.g. Hyper-V L2 TLB flush) as the intended
37 * use of this function is to exit to L1 from L2. Clobber all other
38 * GPRs as L1 doesn't correctly preserve them during vmexits.
39 */
40 __asm__ __volatile__("push %%rbp; vmmcall; pop %%rbp"
41 : : "a"(0xdeadbeef), "c"(0xbeefdead)
42 : "rbx", "rdx", "rsi", "rdi", "r8", "r9",
43 "r10", "r11", "r12", "r13", "r14", "r15");
44 }
45
46 #define stgi() \
47 __asm__ __volatile__( \
48 "stgi\n" \
49 )
50
51 #define clgi() \
52 __asm__ __volatile__( \
53 "clgi\n" \
54 )
55
56 struct svm_test_data *vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva);
57 void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp);
58 void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa);
59
60 int open_sev_dev_path_or_exit(void);
61
62 #endif /* SELFTEST_KVM_SVM_UTILS_H */
63