1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * KVM selftest s390x library code - CPU-related functions (page tables...)
4 *
5 * Copyright (C) 2019, Red Hat, Inc.
6 */
7
8 #include "processor.h"
9 #include "kvm_util.h"
10
11 #define PAGES_PER_REGION 4
12
virt_arch_pgd_alloc(struct kvm_vm * vm)13 void virt_arch_pgd_alloc(struct kvm_vm *vm)
14 {
15 vm_paddr_t paddr;
16
17 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
18 vm->page_size);
19
20 if (vm->pgd_created)
21 return;
22
23 paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
24 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
25 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
26
27 vm->pgd = paddr;
28 vm->pgd_created = true;
29 }
30
31 /*
32 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
33 * a page table (ri == 4). Returns a suitable region/segment table entry
34 * which points to the freshly allocated pages.
35 */
virt_alloc_region(struct kvm_vm * vm,int ri)36 static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri)
37 {
38 uint64_t taddr;
39
40 taddr = vm_phy_pages_alloc(vm, ri < 4 ? PAGES_PER_REGION : 1,
41 KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
42 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
43
44 return (taddr & REGION_ENTRY_ORIGIN)
45 | (((4 - ri) << 2) & REGION_ENTRY_TYPE)
46 | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
47 }
48
virt_arch_pg_map(struct kvm_vm * vm,uint64_t gva,uint64_t gpa)49 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa)
50 {
51 int ri, idx;
52 uint64_t *entry;
53
54 TEST_ASSERT((gva % vm->page_size) == 0,
55 "Virtual address not on page boundary,\n"
56 " vaddr: 0x%lx vm->page_size: 0x%x",
57 gva, vm->page_size);
58 TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
59 (gva >> vm->page_shift)),
60 "Invalid virtual address, vaddr: 0x%lx",
61 gva);
62 TEST_ASSERT((gpa % vm->page_size) == 0,
63 "Physical address not on page boundary,\n"
64 " paddr: 0x%lx vm->page_size: 0x%x",
65 gva, vm->page_size);
66 TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
67 "Physical address beyond beyond maximum supported,\n"
68 " paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
69 gva, vm->max_gfn, vm->page_size);
70
71 /* Walk through region and segment tables */
72 entry = addr_gpa2hva(vm, vm->pgd);
73 for (ri = 1; ri <= 4; ri++) {
74 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
75 if (entry[idx] & REGION_ENTRY_INVALID)
76 entry[idx] = virt_alloc_region(vm, ri);
77 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
78 }
79
80 /* Fill in page table entry */
81 idx = (gva >> 12) & 0x0ffu; /* page index */
82 if (!(entry[idx] & PAGE_INVALID))
83 fprintf(stderr,
84 "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
85 entry[idx] = gpa;
86 }
87
addr_arch_gva2gpa(struct kvm_vm * vm,vm_vaddr_t gva)88 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
89 {
90 int ri, idx;
91 uint64_t *entry;
92
93 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
94 vm->page_size);
95
96 entry = addr_gpa2hva(vm, vm->pgd);
97 for (ri = 1; ri <= 4; ri++) {
98 idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
99 TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
100 "No region mapping for vm virtual address 0x%lx",
101 gva);
102 entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
103 }
104
105 idx = (gva >> 12) & 0x0ffu; /* page index */
106
107 TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
108 "No page mapping for vm virtual address 0x%lx", gva);
109
110 return (entry[idx] & ~0xffful) + (gva & 0xffful);
111 }
112
virt_dump_ptes(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t ptea_start)113 static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
114 uint64_t ptea_start)
115 {
116 uint64_t *pte, ptea;
117
118 for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
119 pte = addr_gpa2hva(vm, ptea);
120 if (*pte & PAGE_INVALID)
121 continue;
122 fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
123 indent, "", ptea, *pte);
124 }
125 }
126
virt_dump_region(FILE * stream,struct kvm_vm * vm,uint8_t indent,uint64_t reg_tab_addr)127 static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
128 uint64_t reg_tab_addr)
129 {
130 uint64_t addr, *entry;
131
132 for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
133 entry = addr_gpa2hva(vm, addr);
134 if (*entry & REGION_ENTRY_INVALID)
135 continue;
136 fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
137 indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
138 addr, *entry);
139 if (*entry & REGION_ENTRY_TYPE) {
140 virt_dump_region(stream, vm, indent + 2,
141 *entry & REGION_ENTRY_ORIGIN);
142 } else {
143 virt_dump_ptes(stream, vm, indent + 2,
144 *entry & REGION_ENTRY_ORIGIN);
145 }
146 }
147 }
148
virt_arch_dump(FILE * stream,struct kvm_vm * vm,uint8_t indent)149 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
150 {
151 if (!vm->pgd_created)
152 return;
153
154 virt_dump_region(stream, vm, indent, vm->pgd);
155 }
156
vm_arch_vcpu_add(struct kvm_vm * vm,uint32_t vcpu_id,void * guest_code)157 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id,
158 void *guest_code)
159 {
160 size_t stack_size = DEFAULT_STACK_PGS * getpagesize();
161 uint64_t stack_vaddr;
162 struct kvm_regs regs;
163 struct kvm_sregs sregs;
164 struct kvm_vcpu *vcpu;
165 struct kvm_run *run;
166
167 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
168 vm->page_size);
169
170 stack_vaddr = vm_vaddr_alloc(vm, stack_size,
171 DEFAULT_GUEST_STACK_VADDR_MIN);
172
173 vcpu = __vm_vcpu_add(vm, vcpu_id);
174
175 /* Setup guest registers */
176 vcpu_regs_get(vcpu, ®s);
177 regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
178 vcpu_regs_set(vcpu, ®s);
179
180 vcpu_sregs_get(vcpu, &sregs);
181 sregs.crs[0] |= 0x00040000; /* Enable floating point regs */
182 sregs.crs[1] = vm->pgd | 0xf; /* Primary region table */
183 vcpu_sregs_set(vcpu, &sregs);
184
185 run = vcpu->run;
186 run->psw_mask = 0x0400000180000000ULL; /* DAT enabled + 64 bit mode */
187 run->psw_addr = (uintptr_t)guest_code;
188
189 return vcpu;
190 }
191
vcpu_args_set(struct kvm_vcpu * vcpu,unsigned int num,...)192 void vcpu_args_set(struct kvm_vcpu *vcpu, unsigned int num, ...)
193 {
194 va_list ap;
195 struct kvm_regs regs;
196 int i;
197
198 TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
199 " num: %u\n",
200 num);
201
202 va_start(ap, num);
203 vcpu_regs_get(vcpu, ®s);
204
205 for (i = 0; i < num; i++)
206 regs.gprs[i + 2] = va_arg(ap, uint64_t);
207
208 vcpu_regs_set(vcpu, ®s);
209 va_end(ap);
210 }
211
vcpu_arch_dump(FILE * stream,struct kvm_vcpu * vcpu,uint8_t indent)212 void vcpu_arch_dump(FILE *stream, struct kvm_vcpu *vcpu, uint8_t indent)
213 {
214 fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
215 indent, "", vcpu->run->psw_mask, vcpu->run->psw_addr);
216 }
217
assert_on_unhandled_exception(struct kvm_vcpu * vcpu)218 void assert_on_unhandled_exception(struct kvm_vcpu *vcpu)
219 {
220 }
221