1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * x86_64-specific extensions to perf_test_util.c.
4 *
5 * Copyright (C) 2022, Google, Inc.
6 */
7 #include <stdio.h>
8 #include <stdlib.h>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11
12 #include "test_util.h"
13 #include "kvm_util.h"
14 #include "perf_test_util.h"
15 #include "processor.h"
16 #include "vmx.h"
17
perf_test_l2_guest_code(uint64_t vcpu_id)18 void perf_test_l2_guest_code(uint64_t vcpu_id)
19 {
20 perf_test_guest_code(vcpu_id);
21 vmcall();
22 }
23
24 extern char perf_test_l2_guest_entry[];
25 __asm__(
26 "perf_test_l2_guest_entry:"
27 " mov (%rsp), %rdi;"
28 " call perf_test_l2_guest_code;"
29 " ud2;"
30 );
31
perf_test_l1_guest_code(struct vmx_pages * vmx,uint64_t vcpu_id)32 static void perf_test_l1_guest_code(struct vmx_pages *vmx, uint64_t vcpu_id)
33 {
34 #define L2_GUEST_STACK_SIZE 64
35 unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
36 unsigned long *rsp;
37
38 GUEST_ASSERT(vmx->vmcs_gpa);
39 GUEST_ASSERT(prepare_for_vmx_operation(vmx));
40 GUEST_ASSERT(load_vmcs(vmx));
41 GUEST_ASSERT(ept_1g_pages_supported());
42
43 rsp = &l2_guest_stack[L2_GUEST_STACK_SIZE - 1];
44 *rsp = vcpu_id;
45 prepare_vmcs(vmx, perf_test_l2_guest_entry, rsp);
46
47 GUEST_ASSERT(!vmlaunch());
48 GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
49 GUEST_DONE();
50 }
51
perf_test_nested_pages(int nr_vcpus)52 uint64_t perf_test_nested_pages(int nr_vcpus)
53 {
54 /*
55 * 513 page tables is enough to identity-map 256 TiB of L2 with 1G
56 * pages and 4-level paging, plus a few pages per-vCPU for data
57 * structures such as the VMCS.
58 */
59 return 513 + 10 * nr_vcpus;
60 }
61
perf_test_setup_ept(struct vmx_pages * vmx,struct kvm_vm * vm)62 void perf_test_setup_ept(struct vmx_pages *vmx, struct kvm_vm *vm)
63 {
64 uint64_t start, end;
65
66 prepare_eptp(vmx, vm, 0);
67
68 /*
69 * Identity map the first 4G and the test region with 1G pages so that
70 * KVM can shadow the EPT12 with the maximum huge page size supported
71 * by the backing source.
72 */
73 nested_identity_map_1g(vmx, vm, 0, 0x100000000ULL);
74
75 start = align_down(perf_test_args.gpa, PG_SIZE_1G);
76 end = align_up(perf_test_args.gpa + perf_test_args.size, PG_SIZE_1G);
77 nested_identity_map_1g(vmx, vm, start, end - start);
78 }
79
perf_test_setup_nested(struct kvm_vm * vm,int nr_vcpus,struct kvm_vcpu * vcpus[])80 void perf_test_setup_nested(struct kvm_vm *vm, int nr_vcpus, struct kvm_vcpu *vcpus[])
81 {
82 struct vmx_pages *vmx, *vmx0 = NULL;
83 struct kvm_regs regs;
84 vm_vaddr_t vmx_gva;
85 int vcpu_id;
86
87 TEST_REQUIRE(kvm_cpu_has(X86_FEATURE_VMX));
88
89 for (vcpu_id = 0; vcpu_id < nr_vcpus; vcpu_id++) {
90 vmx = vcpu_alloc_vmx(vm, &vmx_gva);
91
92 if (vcpu_id == 0) {
93 perf_test_setup_ept(vmx, vm);
94 vmx0 = vmx;
95 } else {
96 /* Share the same EPT table across all vCPUs. */
97 vmx->eptp = vmx0->eptp;
98 vmx->eptp_hva = vmx0->eptp_hva;
99 vmx->eptp_gpa = vmx0->eptp_gpa;
100 }
101
102 /*
103 * Override the vCPU to run perf_test_l1_guest_code() which will
104 * bounce it into L2 before calling perf_test_guest_code().
105 */
106 vcpu_regs_get(vcpus[vcpu_id], ®s);
107 regs.rip = (unsigned long) perf_test_l1_guest_code;
108 vcpu_regs_set(vcpus[vcpu_id], ®s);
109 vcpu_args_set(vcpus[vcpu_id], 2, vmx_gva, vcpu_id);
110 }
111 }
112