1 // SPDX-License-Identifier: GPL-2.0
2 #include <stdio.h>
3 #include <stdlib.h>
4 #include <pthread.h>
5 #include <semaphore.h>
6 #include <sys/types.h>
7 #include <signal.h>
8 #include <errno.h>
9 #include <linux/bitmap.h>
10 #include <linux/bitops.h>
11 #include <linux/atomic.h>
12 #include <linux/sizes.h>
13 
14 #include "kvm_util.h"
15 #include "test_util.h"
16 #include "guest_modes.h"
17 #include "processor.h"
18 #include "ucall_common.h"
19 
20 static bool mprotect_ro_done;
21 static bool all_vcpus_hit_ro_fault;
22 
guest_code(uint64_t start_gpa,uint64_t end_gpa,uint64_t stride)23 static void guest_code(uint64_t start_gpa, uint64_t end_gpa, uint64_t stride)
24 {
25 	uint64_t gpa;
26 	int i;
27 
28 	for (i = 0; i < 2; i++) {
29 		for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
30 			vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
31 		GUEST_SYNC(i);
32 	}
33 
34 	for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
35 		*((volatile uint64_t *)gpa);
36 	GUEST_SYNC(2);
37 
38 	/*
39 	 * Write to the region while mprotect(PROT_READ) is underway.  Keep
40 	 * looping until the memory is guaranteed to be read-only and a fault
41 	 * has occurred, otherwise vCPUs may complete their writes and advance
42 	 * to the next stage prematurely.
43 	 *
44 	 * For architectures that support skipping the faulting instruction,
45 	 * generate the store via inline assembly to ensure the exact length
46 	 * of the instruction is known and stable (vcpu_arch_put_guest() on
47 	 * fixed-length architectures should work, but the cost of paranoia
48 	 * is low in this case).  For x86, hand-code the exact opcode so that
49 	 * there is no room for variability in the generated instruction.
50 	 */
51 	do {
52 		for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
53 #ifdef __x86_64__
54 			asm volatile(".byte 0x48,0x89,0x00" :: "a"(gpa) : "memory"); /* mov %rax, (%rax) */
55 #elif defined(__aarch64__)
56 			asm volatile("str %0, [%0]" :: "r" (gpa) : "memory");
57 #else
58 			vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
59 #endif
60 	} while (!READ_ONCE(mprotect_ro_done) || !READ_ONCE(all_vcpus_hit_ro_fault));
61 
62 	/*
63 	 * Only architectures that write the entire range can explicitly sync,
64 	 * as other architectures will be stuck on the write fault.
65 	 */
66 #if defined(__x86_64__) || defined(__aarch64__)
67 	GUEST_SYNC(3);
68 #endif
69 
70 	for (gpa = start_gpa; gpa < end_gpa; gpa += stride)
71 		vcpu_arch_put_guest(*((volatile uint64_t *)gpa), gpa);
72 	GUEST_SYNC(4);
73 
74 	GUEST_ASSERT(0);
75 }
76 
77 struct vcpu_info {
78 	struct kvm_vcpu *vcpu;
79 	uint64_t start_gpa;
80 	uint64_t end_gpa;
81 };
82 
83 static int nr_vcpus;
84 static atomic_t rendezvous;
85 static atomic_t nr_ro_faults;
86 
rendezvous_with_boss(void)87 static void rendezvous_with_boss(void)
88 {
89 	int orig = atomic_read(&rendezvous);
90 
91 	if (orig > 0) {
92 		atomic_dec_and_test(&rendezvous);
93 		while (atomic_read(&rendezvous) > 0)
94 			cpu_relax();
95 	} else {
96 		atomic_inc(&rendezvous);
97 		while (atomic_read(&rendezvous) < 0)
98 			cpu_relax();
99 	}
100 }
101 
assert_sync_stage(struct kvm_vcpu * vcpu,int stage)102 static void assert_sync_stage(struct kvm_vcpu *vcpu, int stage)
103 {
104 	struct ucall uc;
105 
106 	TEST_ASSERT_EQ(get_ucall(vcpu, &uc), UCALL_SYNC);
107 	TEST_ASSERT_EQ(uc.args[1], stage);
108 }
109 
run_vcpu(struct kvm_vcpu * vcpu,int stage)110 static void run_vcpu(struct kvm_vcpu *vcpu, int stage)
111 {
112 	vcpu_run(vcpu);
113 	assert_sync_stage(vcpu, stage);
114 }
115 
vcpu_worker(void * data)116 static void *vcpu_worker(void *data)
117 {
118 	struct kvm_sregs __maybe_unused sregs;
119 	struct vcpu_info *info = data;
120 	struct kvm_vcpu *vcpu = info->vcpu;
121 	struct kvm_vm *vm = vcpu->vm;
122 	int r;
123 
124 	vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size);
125 
126 	rendezvous_with_boss();
127 
128 	/* Stage 0, write all of guest memory. */
129 	run_vcpu(vcpu, 0);
130 	rendezvous_with_boss();
131 #ifdef __x86_64__
132 	vcpu_sregs_get(vcpu, &sregs);
133 	/* Toggle CR0.WP to trigger a MMU context reset. */
134 	sregs.cr0 ^= X86_CR0_WP;
135 	vcpu_sregs_set(vcpu, &sregs);
136 #endif
137 	rendezvous_with_boss();
138 
139 	/* Stage 1, re-write all of guest memory. */
140 	run_vcpu(vcpu, 1);
141 	rendezvous_with_boss();
142 
143 	/* Stage 2, read all of guest memory, which is now read-only. */
144 	run_vcpu(vcpu, 2);
145 
146 	/*
147 	 * Stage 3, write guest memory and verify KVM returns -EFAULT for once
148 	 * the mprotect(PROT_READ) lands.  Only architectures that support
149 	 * validating *all* of guest memory sync for this stage, as vCPUs will
150 	 * be stuck on the faulting instruction for other architectures.  Go to
151 	 * stage 3 without a rendezvous
152 	 */
153 	r = _vcpu_run(vcpu);
154 	TEST_ASSERT(r == -1 && errno == EFAULT,
155 		    "Expected EFAULT on write to RO memory, got r = %d, errno = %d", r, errno);
156 
157 	atomic_inc(&nr_ro_faults);
158 	if (atomic_read(&nr_ro_faults) == nr_vcpus) {
159 		WRITE_ONCE(all_vcpus_hit_ro_fault, true);
160 		sync_global_to_guest(vm, all_vcpus_hit_ro_fault);
161 	}
162 
163 #if defined(__x86_64__) || defined(__aarch64__)
164 	/*
165 	 * Verify *all* writes from the guest hit EFAULT due to the VMA now
166 	 * being read-only.  x86 and arm64 only at this time as skipping the
167 	 * instruction that hits the EFAULT requires advancing the program
168 	 * counter, which is arch specific and relies on inline assembly.
169 	 */
170 #ifdef __x86_64__
171 	vcpu->run->kvm_valid_regs = KVM_SYNC_X86_REGS;
172 #endif
173 	for (;;) {
174 		r = _vcpu_run(vcpu);
175 		if (!r)
176 			break;
177 		TEST_ASSERT_EQ(errno, EFAULT);
178 #if defined(__x86_64__)
179 		WRITE_ONCE(vcpu->run->kvm_dirty_regs, KVM_SYNC_X86_REGS);
180 		vcpu->run->s.regs.regs.rip += 3;
181 #elif defined(__aarch64__)
182 		vcpu_set_reg(vcpu, ARM64_CORE_REG(regs.pc),
183 			     vcpu_get_reg(vcpu, ARM64_CORE_REG(regs.pc)) + 4);
184 #endif
185 
186 	}
187 	assert_sync_stage(vcpu, 3);
188 #endif /* __x86_64__ || __aarch64__ */
189 	rendezvous_with_boss();
190 
191 	/*
192 	 * Stage 4.  Run to completion, waiting for mprotect(PROT_WRITE) to
193 	 * make the memory writable again.
194 	 */
195 	do {
196 		r = _vcpu_run(vcpu);
197 	} while (r && errno == EFAULT);
198 	TEST_ASSERT_EQ(r, 0);
199 	assert_sync_stage(vcpu, 4);
200 	rendezvous_with_boss();
201 
202 	return NULL;
203 }
204 
spawn_workers(struct kvm_vm * vm,struct kvm_vcpu ** vcpus,uint64_t start_gpa,uint64_t end_gpa)205 static pthread_t *spawn_workers(struct kvm_vm *vm, struct kvm_vcpu **vcpus,
206 				uint64_t start_gpa, uint64_t end_gpa)
207 {
208 	struct vcpu_info *info;
209 	uint64_t gpa, nr_bytes;
210 	pthread_t *threads;
211 	int i;
212 
213 	threads = malloc(nr_vcpus * sizeof(*threads));
214 	TEST_ASSERT(threads, "Failed to allocate vCPU threads");
215 
216 	info = malloc(nr_vcpus * sizeof(*info));
217 	TEST_ASSERT(info, "Failed to allocate vCPU gpa ranges");
218 
219 	nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) &
220 			~((uint64_t)vm->page_size - 1);
221 	TEST_ASSERT(nr_bytes, "C'mon, no way you have %d CPUs", nr_vcpus);
222 
223 	for (i = 0, gpa = start_gpa; i < nr_vcpus; i++, gpa += nr_bytes) {
224 		info[i].vcpu = vcpus[i];
225 		info[i].start_gpa = gpa;
226 		info[i].end_gpa = gpa + nr_bytes;
227 		pthread_create(&threads[i], NULL, vcpu_worker, &info[i]);
228 	}
229 	return threads;
230 }
231 
rendezvous_with_vcpus(struct timespec * time,const char * name)232 static void rendezvous_with_vcpus(struct timespec *time, const char *name)
233 {
234 	int i, rendezvoused;
235 
236 	pr_info("Waiting for vCPUs to finish %s...\n", name);
237 
238 	rendezvoused = atomic_read(&rendezvous);
239 	for (i = 0; abs(rendezvoused) != 1; i++) {
240 		usleep(100);
241 		if (!(i & 0x3f))
242 			pr_info("\r%d vCPUs haven't rendezvoused...",
243 				abs(rendezvoused) - 1);
244 		rendezvoused = atomic_read(&rendezvous);
245 	}
246 
247 	clock_gettime(CLOCK_MONOTONIC, time);
248 
249 	/* Release the vCPUs after getting the time of the previous action. */
250 	pr_info("\rAll vCPUs finished %s, releasing...\n", name);
251 	if (rendezvoused > 0)
252 		atomic_set(&rendezvous, -nr_vcpus - 1);
253 	else
254 		atomic_set(&rendezvous, nr_vcpus + 1);
255 }
256 
calc_default_nr_vcpus(void)257 static void calc_default_nr_vcpus(void)
258 {
259 	cpu_set_t possible_mask;
260 	int r;
261 
262 	r = sched_getaffinity(0, sizeof(possible_mask), &possible_mask);
263 	TEST_ASSERT(!r, "sched_getaffinity failed, errno = %d (%s)",
264 		    errno, strerror(errno));
265 
266 	nr_vcpus = CPU_COUNT(&possible_mask) * 3/4;
267 	TEST_ASSERT(nr_vcpus > 0, "Uh, no CPUs?");
268 }
269 
main(int argc,char * argv[])270 int main(int argc, char *argv[])
271 {
272 	/*
273 	 * Skip the first 4gb and slot0.  slot0 maps <1gb and is used to back
274 	 * the guest's code, stack, and page tables.  Because selftests creates
275 	 * an IRQCHIP, a.k.a. a local APIC, KVM creates an internal memslot
276 	 * just below the 4gb boundary.  This test could create memory at
277 	 * 1gb-3gb,but it's simpler to skip straight to 4gb.
278 	 */
279 	const uint64_t start_gpa = SZ_4G;
280 	const int first_slot = 1;
281 
282 	struct timespec time_start, time_run1, time_reset, time_run2, time_ro, time_rw;
283 	uint64_t max_gpa, gpa, slot_size, max_mem, i;
284 	int max_slots, slot, opt, fd;
285 	bool hugepages = false;
286 	struct kvm_vcpu **vcpus;
287 	pthread_t *threads;
288 	struct kvm_vm *vm;
289 	void *mem;
290 
291 	/*
292 	 * Default to 2gb so that maxing out systems with MAXPHADDR=46, which
293 	 * are quite common for x86, requires changing only max_mem (KVM allows
294 	 * 32k memslots, 32k * 2gb == ~64tb of guest memory).
295 	 */
296 	slot_size = SZ_2G;
297 
298 	max_slots = kvm_check_cap(KVM_CAP_NR_MEMSLOTS);
299 	TEST_ASSERT(max_slots > first_slot, "KVM is broken");
300 
301 	/* All KVM MMUs should be able to survive a 128gb guest. */
302 	max_mem = 128ull * SZ_1G;
303 
304 	calc_default_nr_vcpus();
305 
306 	while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) {
307 		switch (opt) {
308 		case 'c':
309 			nr_vcpus = atoi_positive("Number of vCPUs", optarg);
310 			break;
311 		case 'm':
312 			max_mem = 1ull * atoi_positive("Memory size", optarg) * SZ_1G;
313 			break;
314 		case 's':
315 			slot_size = 1ull * atoi_positive("Slot size", optarg) * SZ_1G;
316 			break;
317 		case 'H':
318 			hugepages = true;
319 			break;
320 		case 'h':
321 		default:
322 			printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]);
323 			exit(1);
324 		}
325 	}
326 
327 	vcpus = malloc(nr_vcpus * sizeof(*vcpus));
328 	TEST_ASSERT(vcpus, "Failed to allocate vCPU array");
329 
330 	vm = __vm_create_with_vcpus(VM_SHAPE_DEFAULT, nr_vcpus,
331 #ifdef __x86_64__
332 				    max_mem / SZ_1G,
333 #else
334 				    max_mem / vm_guest_mode_params[VM_MODE_DEFAULT].page_size,
335 #endif
336 				    guest_code, vcpus);
337 
338 	max_gpa = vm->max_gfn << vm->page_shift;
339 	TEST_ASSERT(max_gpa > (4 * slot_size), "MAXPHYADDR <4gb ");
340 
341 	fd = kvm_memfd_alloc(slot_size, hugepages);
342 	mem = mmap(NULL, slot_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
343 	TEST_ASSERT(mem != MAP_FAILED, "mmap() failed");
344 
345 	TEST_ASSERT(!madvise(mem, slot_size, MADV_NOHUGEPAGE), "madvise() failed");
346 
347 	/* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */
348 	for (i = 0; i < slot_size; i += vm->page_size)
349 		((uint8_t *)mem)[i] = 0xaa;
350 
351 	gpa = 0;
352 	for (slot = first_slot; slot < max_slots; slot++) {
353 		gpa = start_gpa + ((slot - first_slot) * slot_size);
354 		if (gpa + slot_size > max_gpa)
355 			break;
356 
357 		if ((gpa - start_gpa) >= max_mem)
358 			break;
359 
360 		vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem);
361 
362 #ifdef __x86_64__
363 		/* Identity map memory in the guest using 1gb pages. */
364 		for (i = 0; i < slot_size; i += SZ_1G)
365 			__virt_pg_map(vm, gpa + i, gpa + i, PG_LEVEL_1G);
366 #else
367 		for (i = 0; i < slot_size; i += vm->page_size)
368 			virt_pg_map(vm, gpa + i, gpa + i);
369 #endif
370 	}
371 
372 	atomic_set(&rendezvous, nr_vcpus + 1);
373 	threads = spawn_workers(vm, vcpus, start_gpa, gpa);
374 
375 	free(vcpus);
376 	vcpus = NULL;
377 
378 	pr_info("Running with %lugb of guest memory and %u vCPUs\n",
379 		(gpa - start_gpa) / SZ_1G, nr_vcpus);
380 
381 	rendezvous_with_vcpus(&time_start, "spawning");
382 	rendezvous_with_vcpus(&time_run1, "run 1");
383 	rendezvous_with_vcpus(&time_reset, "reset");
384 	rendezvous_with_vcpus(&time_run2, "run 2");
385 
386 	mprotect(mem, slot_size, PROT_READ);
387 	mprotect_ro_done = true;
388 	sync_global_to_guest(vm, mprotect_ro_done);
389 
390 	rendezvous_with_vcpus(&time_ro, "mprotect RO");
391 	mprotect(mem, slot_size, PROT_READ | PROT_WRITE);
392 	rendezvous_with_vcpus(&time_rw, "mprotect RW");
393 
394 	time_rw    = timespec_sub(time_rw,     time_ro);
395 	time_ro    = timespec_sub(time_ro,     time_run2);
396 	time_run2  = timespec_sub(time_run2,   time_reset);
397 	time_reset = timespec_sub(time_reset,  time_run1);
398 	time_run1  = timespec_sub(time_run1,   time_start);
399 
400 	pr_info("run1 = %ld.%.9lds, reset = %ld.%.9lds, run2 = %ld.%.9lds, "
401 		"ro = %ld.%.9lds, rw = %ld.%.9lds\n",
402 		time_run1.tv_sec, time_run1.tv_nsec,
403 		time_reset.tv_sec, time_reset.tv_nsec,
404 		time_run2.tv_sec, time_run2.tv_nsec,
405 		time_ro.tv_sec, time_ro.tv_nsec,
406 		time_rw.tv_sec, time_rw.tv_nsec);
407 
408 	/*
409 	 * Delete even numbered slots (arbitrary) and unmap the first half of
410 	 * the backing (also arbitrary) to verify KVM correctly drops all
411 	 * references to the removed regions.
412 	 */
413 	for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2)
414 		vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL);
415 
416 	munmap(mem, slot_size / 2);
417 
418 	/* Sanity check that the vCPUs actually ran. */
419 	for (i = 0; i < nr_vcpus; i++)
420 		pthread_join(threads[i], NULL);
421 
422 	/*
423 	 * Deliberately exit without deleting the remaining memslots or closing
424 	 * kvm_fd to test cleanup via mmu_notifier.release.
425 	 */
426 }
427