1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2020, Google LLC.
4 *
5 * Tests for KVM paravirtual feature disablement
6 */
7 #include <asm/kvm_para.h>
8 #include <linux/kvm_para.h>
9 #include <linux/stringify.h>
10 #include <stdint.h>
11
12 #include "kvm_test_harness.h"
13 #include "apic.h"
14 #include "test_util.h"
15 #include "kvm_util.h"
16 #include "processor.h"
17
18 /* VMCALL and VMMCALL are both 3-byte opcodes. */
19 #define HYPERCALL_INSN_SIZE 3
20
21 static bool quirk_disabled;
22
guest_ud_handler(struct ex_regs * regs)23 static void guest_ud_handler(struct ex_regs *regs)
24 {
25 regs->rax = -EFAULT;
26 regs->rip += HYPERCALL_INSN_SIZE;
27 }
28
29 static const uint8_t vmx_vmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xc1 };
30 static const uint8_t svm_vmmcall[HYPERCALL_INSN_SIZE] = { 0x0f, 0x01, 0xd9 };
31
32 extern uint8_t hypercall_insn[HYPERCALL_INSN_SIZE];
do_sched_yield(uint8_t apic_id)33 static uint64_t do_sched_yield(uint8_t apic_id)
34 {
35 uint64_t ret;
36
37 asm volatile("hypercall_insn:\n\t"
38 ".byte 0xcc,0xcc,0xcc\n\t"
39 : "=a"(ret)
40 : "a"((uint64_t)KVM_HC_SCHED_YIELD), "b"((uint64_t)apic_id)
41 : "memory");
42
43 return ret;
44 }
45
guest_main(void)46 static void guest_main(void)
47 {
48 const uint8_t *native_hypercall_insn;
49 const uint8_t *other_hypercall_insn;
50 uint64_t ret;
51
52 if (host_cpu_is_intel) {
53 native_hypercall_insn = vmx_vmcall;
54 other_hypercall_insn = svm_vmmcall;
55 } else if (host_cpu_is_amd) {
56 native_hypercall_insn = svm_vmmcall;
57 other_hypercall_insn = vmx_vmcall;
58 } else {
59 GUEST_ASSERT(0);
60 /* unreachable */
61 return;
62 }
63
64 memcpy(hypercall_insn, other_hypercall_insn, HYPERCALL_INSN_SIZE);
65
66 ret = do_sched_yield(GET_APIC_ID_FIELD(xapic_read_reg(APIC_ID)));
67
68 /*
69 * If the quirk is disabled, verify that guest_ud_handler() "returned"
70 * -EFAULT and that KVM did NOT patch the hypercall. If the quirk is
71 * enabled, verify that the hypercall succeeded and that KVM patched in
72 * the "right" hypercall.
73 */
74 if (quirk_disabled) {
75 GUEST_ASSERT(ret == (uint64_t)-EFAULT);
76 GUEST_ASSERT(!memcmp(other_hypercall_insn, hypercall_insn,
77 HYPERCALL_INSN_SIZE));
78 } else {
79 GUEST_ASSERT(!ret);
80 GUEST_ASSERT(!memcmp(native_hypercall_insn, hypercall_insn,
81 HYPERCALL_INSN_SIZE));
82 }
83
84 GUEST_DONE();
85 }
86
87 KVM_ONE_VCPU_TEST_SUITE(fix_hypercall);
88
enter_guest(struct kvm_vcpu * vcpu)89 static void enter_guest(struct kvm_vcpu *vcpu)
90 {
91 struct kvm_run *run = vcpu->run;
92 struct ucall uc;
93
94 vcpu_run(vcpu);
95 switch (get_ucall(vcpu, &uc)) {
96 case UCALL_SYNC:
97 pr_info("%s: %016lx\n", (const char *)uc.args[2], uc.args[3]);
98 break;
99 case UCALL_DONE:
100 return;
101 case UCALL_ABORT:
102 REPORT_GUEST_ASSERT(uc);
103 default:
104 TEST_FAIL("Unhandled ucall: %ld\nexit_reason: %u (%s)",
105 uc.cmd, run->exit_reason, exit_reason_str(run->exit_reason));
106 }
107 }
108
test_fix_hypercall(struct kvm_vcpu * vcpu,bool disable_quirk)109 static void test_fix_hypercall(struct kvm_vcpu *vcpu, bool disable_quirk)
110 {
111 struct kvm_vm *vm = vcpu->vm;
112
113 vm_install_exception_handler(vcpu->vm, UD_VECTOR, guest_ud_handler);
114
115 if (disable_quirk)
116 vm_enable_cap(vm, KVM_CAP_DISABLE_QUIRKS2,
117 KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
118
119 quirk_disabled = disable_quirk;
120 sync_global_to_guest(vm, quirk_disabled);
121
122 virt_pg_map(vm, APIC_DEFAULT_GPA, APIC_DEFAULT_GPA);
123
124 enter_guest(vcpu);
125 }
126
KVM_ONE_VCPU_TEST(fix_hypercall,enable_quirk,guest_main)127 KVM_ONE_VCPU_TEST(fix_hypercall, enable_quirk, guest_main)
128 {
129 test_fix_hypercall(vcpu, false);
130 }
131
KVM_ONE_VCPU_TEST(fix_hypercall,disable_quirk,guest_main)132 KVM_ONE_VCPU_TEST(fix_hypercall, disable_quirk, guest_main)
133 {
134 test_fix_hypercall(vcpu, true);
135 }
136
main(int argc,char * argv[])137 int main(int argc, char *argv[])
138 {
139 TEST_REQUIRE(kvm_check_cap(KVM_CAP_DISABLE_QUIRKS2) & KVM_X86_QUIRK_FIX_HYPERCALL_INSN);
140
141 return test_harness_run(argc, argv);
142 }
143