1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for KVM_CAP_EXIT_ON_EMULATION_FAILURE capability.
6 */
7
8 #define _GNU_SOURCE /* for program_invocation_short_name */
9
10 #include "test_util.h"
11 #include "kvm_util.h"
12 #include "vmx.h"
13
14 #define MAXPHYADDR 36
15
16 #define MEM_REGION_GVA 0x0000123456789000
17 #define MEM_REGION_GPA 0x0000000700000000
18 #define MEM_REGION_SLOT 10
19 #define MEM_REGION_SIZE PAGE_SIZE
20
guest_code(void)21 static void guest_code(void)
22 {
23 __asm__ __volatile__("flds (%[addr])"
24 :: [addr]"r"(MEM_REGION_GVA));
25
26 GUEST_DONE();
27 }
28
29 /*
30 * Accessors to get R/M, REG, and Mod bits described in the SDM vol 2,
31 * figure 2-2 "Table Interpretation of ModR/M Byte (C8H)".
32 */
33 #define GET_RM(insn_byte) (insn_byte & 0x7)
34 #define GET_REG(insn_byte) ((insn_byte & 0x38) >> 3)
35 #define GET_MOD(insn_byte) ((insn_byte & 0xc) >> 6)
36
37 /* Ensure we are dealing with a simple 2-byte flds instruction. */
is_flds(uint8_t * insn_bytes,uint8_t insn_size)38 static bool is_flds(uint8_t *insn_bytes, uint8_t insn_size)
39 {
40 return insn_size >= 2 &&
41 insn_bytes[0] == 0xd9 &&
42 GET_REG(insn_bytes[1]) == 0x0 &&
43 GET_MOD(insn_bytes[1]) == 0x0 &&
44 /* Ensure there is no SIB byte. */
45 GET_RM(insn_bytes[1]) != 0x4 &&
46 /* Ensure there is no displacement byte. */
47 GET_RM(insn_bytes[1]) != 0x5;
48 }
49
process_exit_on_emulation_error(struct kvm_vcpu * vcpu)50 static void process_exit_on_emulation_error(struct kvm_vcpu *vcpu)
51 {
52 struct kvm_run *run = vcpu->run;
53 struct kvm_regs regs;
54 uint8_t *insn_bytes;
55 uint8_t insn_size;
56 uint64_t flags;
57
58 TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
59 "Unexpected exit reason: %u (%s)",
60 run->exit_reason,
61 exit_reason_str(run->exit_reason));
62
63 TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
64 "Unexpected suberror: %u",
65 run->emulation_failure.suberror);
66
67 if (run->emulation_failure.ndata >= 1) {
68 flags = run->emulation_failure.flags;
69 if ((flags & KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES) &&
70 run->emulation_failure.ndata >= 3) {
71 insn_size = run->emulation_failure.insn_size;
72 insn_bytes = run->emulation_failure.insn_bytes;
73
74 TEST_ASSERT(insn_size <= 15 && insn_size > 0,
75 "Unexpected instruction size: %u",
76 insn_size);
77
78 TEST_ASSERT(is_flds(insn_bytes, insn_size),
79 "Unexpected instruction. Expected 'flds' (0xd9 /0)");
80
81 /*
82 * If is_flds() succeeded then the instruction bytes
83 * contained an flds instruction that is 2-bytes in
84 * length (ie: no prefix, no SIB, no displacement).
85 */
86 vcpu_regs_get(vcpu, ®s);
87 regs.rip += 2;
88 vcpu_regs_set(vcpu, ®s);
89 }
90 }
91 }
92
do_guest_assert(struct ucall * uc)93 static void do_guest_assert(struct ucall *uc)
94 {
95 REPORT_GUEST_ASSERT(*uc);
96 }
97
check_for_guest_assert(struct kvm_vcpu * vcpu)98 static void check_for_guest_assert(struct kvm_vcpu *vcpu)
99 {
100 struct ucall uc;
101
102 if (vcpu->run->exit_reason == KVM_EXIT_IO &&
103 get_ucall(vcpu, &uc) == UCALL_ABORT) {
104 do_guest_assert(&uc);
105 }
106 }
107
process_ucall_done(struct kvm_vcpu * vcpu)108 static void process_ucall_done(struct kvm_vcpu *vcpu)
109 {
110 struct kvm_run *run = vcpu->run;
111 struct ucall uc;
112
113 check_for_guest_assert(vcpu);
114
115 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
116 "Unexpected exit reason: %u (%s)",
117 run->exit_reason,
118 exit_reason_str(run->exit_reason));
119
120 TEST_ASSERT(get_ucall(vcpu, &uc) == UCALL_DONE,
121 "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
122 uc.cmd, UCALL_DONE);
123 }
124
process_ucall(struct kvm_vcpu * vcpu)125 static uint64_t process_ucall(struct kvm_vcpu *vcpu)
126 {
127 struct kvm_run *run = vcpu->run;
128 struct ucall uc;
129
130 TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
131 "Unexpected exit reason: %u (%s)",
132 run->exit_reason,
133 exit_reason_str(run->exit_reason));
134
135 switch (get_ucall(vcpu, &uc)) {
136 case UCALL_SYNC:
137 break;
138 case UCALL_ABORT:
139 do_guest_assert(&uc);
140 break;
141 case UCALL_DONE:
142 process_ucall_done(vcpu);
143 break;
144 default:
145 TEST_ASSERT(false, "Unexpected ucall");
146 }
147
148 return uc.cmd;
149 }
150
main(int argc,char * argv[])151 int main(int argc, char *argv[])
152 {
153 struct kvm_vcpu *vcpu;
154 struct kvm_vm *vm;
155 uint64_t gpa, pte;
156 uint64_t *hva;
157 int rc;
158
159 /* Tell stdout not to buffer its content */
160 setbuf(stdout, NULL);
161
162 TEST_REQUIRE(kvm_has_cap(KVM_CAP_SMALLER_MAXPHYADDR));
163
164 vm = vm_create_with_one_vcpu(&vcpu, guest_code);
165
166 vcpu_set_cpuid_maxphyaddr(vcpu, MAXPHYADDR);
167
168 rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
169 TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
170 vm_enable_cap(vm, KVM_CAP_EXIT_ON_EMULATION_FAILURE, 1);
171
172 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
173 MEM_REGION_GPA, MEM_REGION_SLOT,
174 MEM_REGION_SIZE / PAGE_SIZE, 0);
175 gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
176 MEM_REGION_GPA, MEM_REGION_SLOT);
177 TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
178 virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
179 hva = addr_gpa2hva(vm, MEM_REGION_GPA);
180 memset(hva, 0, PAGE_SIZE);
181 pte = vm_get_page_table_entry(vm, vcpu, MEM_REGION_GVA);
182 vm_set_page_table_entry(vm, vcpu, MEM_REGION_GVA, pte | (1ull << 36));
183
184 vcpu_run(vcpu);
185 process_exit_on_emulation_error(vcpu);
186 vcpu_run(vcpu);
187
188 TEST_ASSERT(process_ucall(vcpu) == UCALL_DONE, "Expected UCALL_DONE");
189
190 kvm_vm_free(vm);
191
192 return 0;
193 }
194