Lines Matching +full:time +full:- +full:slot
1 // SPDX-License-Identifier: GPL-2.0
40 * looping until the memory is guaranteed to be read-only and a fault in guest_code()
47 * fixed-length architectures should work, but the cost of paranoia in guest_code()
48 * is low in this case). For x86, hand-code the exact opcode so that in guest_code()
120 struct kvm_vcpu *vcpu = info->vcpu; in vcpu_worker()
121 struct kvm_vm *vm = vcpu->vm; in vcpu_worker()
124 vcpu_args_set(vcpu, 3, info->start_gpa, info->end_gpa, vm->page_size); in vcpu_worker()
139 /* Stage 1, re-write all of guest memory. */ in vcpu_worker()
143 /* Stage 2, read all of guest memory, which is now read-only. */ in vcpu_worker()
147 * Stage 3, write guest memory and verify KVM returns -EFAULT for once in vcpu_worker()
154 TEST_ASSERT(r == -1 && errno == EFAULT, in vcpu_worker()
166 * being read-only. x86 and arm64 only at this time as skipping the in vcpu_worker()
171 vcpu->run->kvm_valid_regs = KVM_SYNC_X86_REGS; in vcpu_worker()
179 WRITE_ONCE(vcpu->run->kvm_dirty_regs, KVM_SYNC_X86_REGS); in vcpu_worker()
180 vcpu->run->s.regs.regs.rip += 3; in vcpu_worker()
219 nr_bytes = ((end_gpa - start_gpa) / nr_vcpus) & in spawn_workers()
220 ~((uint64_t)vm->page_size - 1); in spawn_workers()
232 static void rendezvous_with_vcpus(struct timespec *time, const char *name) in rendezvous_with_vcpus() argument
243 abs(rendezvoused) - 1); in rendezvous_with_vcpus()
247 clock_gettime(CLOCK_MONOTONIC, time); in rendezvous_with_vcpus()
249 /* Release the vCPUs after getting the time of the previous action. */ in rendezvous_with_vcpus()
252 atomic_set(&rendezvous, -nr_vcpus - 1); in rendezvous_with_vcpus()
277 * 1gb-3gb,but it's simpler to skip straight to 4gb. in main()
284 int max_slots, slot, opt, fd; in main() local
306 while ((opt = getopt(argc, argv, "c:h:m:s:H")) != -1) { in main()
315 slot_size = 1ull * atoi_positive("Slot size", optarg) * SZ_1G; in main()
322 printf("usage: %s [-c nr_vcpus] [-m max_mem_in_gb] [-s slot_size_in_gb] [-H]\n", argv[0]); in main()
338 max_gpa = vm->max_gfn << vm->page_shift; in main()
347 /* Pre-fault the memory to avoid taking mmap_sem on guest page faults. */ in main()
348 for (i = 0; i < slot_size; i += vm->page_size) in main()
352 for (slot = first_slot; slot < max_slots; slot++) { in main()
353 gpa = start_gpa + ((slot - first_slot) * slot_size); in main()
357 if ((gpa - start_gpa) >= max_mem) in main()
360 vm_set_user_memory_region(vm, slot, 0, gpa, slot_size, mem); in main()
367 for (i = 0; i < slot_size; i += vm->page_size) in main()
379 (gpa - start_gpa) / SZ_1G, nr_vcpus); in main()
413 for (slot = (slot - 1) & ~1ull; slot >= first_slot; slot -= 2) in main()
414 vm_set_user_memory_region(vm, slot, 0, 0, 0, NULL); in main()