Lines Matching full:vm

182 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)  in vm_enable_dirty_ring()  argument
184 if (vm_check_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL)) in vm_enable_dirty_ring()
185 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING_ACQ_REL, ring_size); in vm_enable_dirty_ring()
187 vm_enable_cap(vm, KVM_CAP_DIRTY_LOG_RING, ring_size); in vm_enable_dirty_ring()
188 vm->dirty_ring_size = ring_size; in vm_enable_dirty_ring()
191 static void vm_open(struct kvm_vm *vm) in vm_open() argument
193 vm->kvm_fd = _open_kvm_dev_path_or_exit(O_RDWR); in vm_open()
197 vm->fd = __kvm_ioctl(vm->kvm_fd, KVM_CREATE_VM, (void *)vm->type); in vm_open()
198 TEST_ASSERT(vm->fd >= 0, KVM_IOCTL_ERROR(KVM_CREATE_VM, vm->fd)); in vm_open()
251 * Initializes vm->vpages_valid to match the canonical VA space of the
259 __weak void vm_vaddr_populate_bitmap(struct kvm_vm *vm) in vm_vaddr_populate_bitmap() argument
261 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
262 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
263 sparsebit_set_num(vm->vpages_valid, in vm_vaddr_populate_bitmap()
264 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift, in vm_vaddr_populate_bitmap()
265 (1ULL << (vm->va_bits - 1)) >> vm->page_shift); in vm_vaddr_populate_bitmap()
270 struct kvm_vm *vm; in ____vm_create() local
272 vm = calloc(1, sizeof(*vm)); in ____vm_create()
273 TEST_ASSERT(vm != NULL, "Insufficient Memory"); in ____vm_create()
275 INIT_LIST_HEAD(&vm->vcpus); in ____vm_create()
276 vm->regions.gpa_tree = RB_ROOT; in ____vm_create()
277 vm->regions.hva_tree = RB_ROOT; in ____vm_create()
278 hash_init(vm->regions.slot_hash); in ____vm_create()
280 vm->mode = shape.mode; in ____vm_create()
281 vm->type = shape.type; in ____vm_create()
283 vm->pa_bits = vm_guest_mode_params[vm->mode].pa_bits; in ____vm_create()
284 vm->va_bits = vm_guest_mode_params[vm->mode].va_bits; in ____vm_create()
285 vm->page_size = vm_guest_mode_params[vm->mode].page_size; in ____vm_create()
286 vm->page_shift = vm_guest_mode_params[vm->mode].page_shift; in ____vm_create()
289 switch (vm->mode) { in ____vm_create()
291 vm->pgtable_levels = 4; in ____vm_create()
294 vm->pgtable_levels = 3; in ____vm_create()
297 vm->pgtable_levels = 4; in ____vm_create()
300 vm->pgtable_levels = 3; in ____vm_create()
304 vm->pgtable_levels = 4; in ____vm_create()
308 vm->pgtable_levels = 3; in ____vm_create()
314 vm->pgtable_levels = 4; in ____vm_create()
317 vm->pgtable_levels = 3; in ____vm_create()
321 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits); in ____vm_create()
322 kvm_init_vm_address_properties(vm); in ____vm_create()
324 * Ignore KVM support for 5-level paging (vm->va_bits == 57), in ____vm_create()
328 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57, in ____vm_create()
330 vm->va_bits); in ____vm_create()
332 vm->pa_bits); in ____vm_create()
333 vm->pgtable_levels = 4; in ____vm_create()
334 vm->va_bits = 48; in ____vm_create()
340 vm->pgtable_levels = 5; in ____vm_create()
343 vm->pgtable_levels = 5; in ____vm_create()
346 TEST_FAIL("Unknown guest mode: 0x%x", vm->mode); in ____vm_create()
350 TEST_ASSERT(!vm->type, "ARM doesn't support test-provided types"); in ____vm_create()
351 if (vm->pa_bits != 40) in ____vm_create()
352 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits); in ____vm_create()
355 vm_open(vm); in ____vm_create()
358 vm->vpages_valid = sparsebit_alloc(); in ____vm_create()
359 vm_vaddr_populate_bitmap(vm); in ____vm_create()
362 vm->max_gfn = vm_compute_max_gfn(vm); in ____vm_create()
365 vm->vpages_mapped = sparsebit_alloc(); in ____vm_create()
367 return vm; in ____vm_create()
386 * test code and other per-VM assets that will be loaded into memslot0. in vm_nr_pages_required()
415 struct kvm_vm *vm; in __vm_create() local
421 vm = ____vm_create(shape); in __vm_create()
423 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS, 0, 0, nr_pages, 0); in __vm_create()
425 vm->memslots[i] = 0; in __vm_create()
427 kvm_vm_elf_load(vm, program_invocation_name); in __vm_create()
435 slot0 = memslot2region(vm, 0); in __vm_create()
436 ucall_init(vm, slot0->region.guest_phys_addr + slot0->region.memory_size); in __vm_create()
443 sync_global_to_guest(vm, guest_rng); in __vm_create()
445 kvm_arch_vm_post_create(vm); in __vm_create()
447 return vm; in __vm_create()
451 * VM Create with customized parameters
454 * mode - VM Mode (e.g. VM_MODE_P52V48_4K)
463 * Pointer to opaque structure that describes the created VM.
465 * Creates a VM with the mode specified by mode (e.g. VM_MODE_P52V48_4K).
473 struct kvm_vm *vm; in __vm_create_with_vcpus() local
478 vm = __vm_create(shape, nr_vcpus, extra_mem_pages); in __vm_create_with_vcpus()
481 vcpus[i] = vm_vcpu_add(vm, i, guest_code); in __vm_create_with_vcpus()
483 return vm; in __vm_create_with_vcpus()
492 struct kvm_vm *vm; in __vm_create_shape_with_one_vcpu() local
494 vm = __vm_create_with_vcpus(shape, 1, extra_mem_pages, guest_code, vcpus); in __vm_create_shape_with_one_vcpu()
497 return vm; in __vm_create_shape_with_one_vcpu()
501 * VM Restart
504 * vm - VM that has been released before
508 * Reopens the file descriptors associated to the VM and reinstates the
535 __weak struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, in vm_arch_vcpu_recreate() argument
538 return __vm_vcpu_add(vm, vcpu_id); in vm_arch_vcpu_recreate()
541 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm) in vm_recreate_with_one_vcpu() argument
543 kvm_vm_restart(vm); in vm_recreate_with_one_vcpu()
545 return vm_vcpu_recreate(vm, 0); in vm_recreate_with_one_vcpu()
622 * vm - Virtual Machine
623 * start - Starting VM physical address
624 * end - Ending VM physical address, inclusive.
638 userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end) in userspace_mem_region_find() argument
642 for (node = vm->regions.gpa_tree.rb_node; node; ) { in userspace_mem_region_find()
666 * VM VCPU Remove
675 * Removes a vCPU from a VM and frees its resources.
677 static void vm_vcpu_rm(struct kvm_vm *vm, struct kvm_vcpu *vcpu) in vm_vcpu_rm() argument
682 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size); in vm_vcpu_rm()
714 static void __vm_mem_region_delete(struct kvm_vm *vm, in __vm_mem_region_delete() argument
719 rb_erase(&region->gpa_node, &vm->regions.gpa_tree); in __vm_mem_region_delete()
720 rb_erase(&region->hva_node, &vm->regions.hva_tree); in __vm_mem_region_delete()
740 * Destroys and frees the VM pointed to by vmp.
767 /* Free the structure describing the VM. */ in kvm_vm_free()
843 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in __vm_set_user_memory_region() argument
854 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, &region); in __vm_set_user_memory_region()
857 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in vm_set_user_memory_region() argument
860 int ret = __vm_set_user_memory_region(vm, slot, flags, gpa, size, hva); in vm_set_user_memory_region()
870 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in __vm_set_user_memory_region2() argument
886 return ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION2, &region); in __vm_set_user_memory_region2()
889 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags, in vm_set_user_memory_region2() argument
893 int ret = __vm_set_user_memory_region2(vm, slot, flags, gpa, size, hva, in vm_set_user_memory_region2()
902 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type, in vm_mem_add() argument
909 size_t mem_size = npages * vm->page_size; in vm_mem_add()
914 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages, in vm_mem_add()
916 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages)); in vm_mem_add()
918 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_mem_add()
920 " guest_paddr: 0x%lx vm->page_size: 0x%x", in vm_mem_add()
921 guest_paddr, vm->page_size); in vm_mem_add()
922 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1) in vm_mem_add()
923 <= vm->max_gfn, "Physical range beyond maximum " in vm_mem_add()
926 " vm->max_gfn: 0x%lx vm->page_size: 0x%x", in vm_mem_add()
927 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_mem_add()
934 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_mem_add()
941 guest_paddr, npages, vm->page_size, in vm_mem_add()
946 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in vm_mem_add()
1025 guest_memfd = vm_create_guest_memfd(vm, mem_size, guest_memfd_flags); in vm_mem_add()
1044 if (vm_arch_has_protected_memory(vm)) in vm_mem_add()
1047 guest_paddr >> vm->page_shift, npages); in vm_mem_add()
1051 region->region.memory_size = npages * vm->page_size; in vm_mem_add()
1053 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_add()
1063 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region); in vm_mem_add()
1064 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region); in vm_mem_add()
1065 hash_add(vm->regions.slot_hash, &region->slot_node, slot); in vm_mem_add()
1081 void vm_userspace_mem_region_add(struct kvm_vm *vm, in vm_userspace_mem_region_add() argument
1086 vm_mem_add(vm, src_type, guest_paddr, slot, npages, flags, -1, 0); in vm_userspace_mem_region_add()
1093 * vm - Virtual Machine
1105 memslot2region(struct kvm_vm *vm, uint32_t memslot) in memslot2region() argument
1109 hash_for_each_possible(vm->regions.slot_hash, region, slot_node, in memslot2region()
1116 fputs("---- vm dump ----\n", stderr); in memslot2region()
1117 vm_dump(stderr, vm, 2); in memslot2region()
1123 * VM Memory Region Flags Set
1126 * vm - Virtual Machine
1136 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags) in vm_mem_region_set_flags() argument
1141 region = memslot2region(vm, slot); in vm_mem_region_set_flags()
1145 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_set_flags()
1153 * VM Memory Region Move
1156 * vm - Virtual Machine
1166 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa) in vm_mem_region_move() argument
1171 region = memslot2region(vm, slot); in vm_mem_region_move()
1175 ret = __vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_move()
1183 * VM Memory Region Delete
1186 * vm - Virtual Machine
1195 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot) in vm_mem_region_delete() argument
1197 struct userspace_mem_region *region = memslot2region(vm, slot); in vm_mem_region_delete()
1200 vm_ioctl(vm, KVM_SET_USER_MEMORY_REGION2, &region->region); in vm_mem_region_delete()
1202 __vm_mem_region_delete(vm, region); in vm_mem_region_delete()
1205 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t base, uint64_t size, in vm_guest_mem_fallocate() argument
1218 region = userspace_mem_region_find(vm, gpa, gpa); in vm_guest_mem_fallocate()
1249 static bool vcpu_exists(struct kvm_vm *vm, uint32_t vcpu_id) in vcpu_exists() argument
1253 list_for_each_entry(vcpu, &vm->vcpus, list) { in vcpu_exists()
1262 * Adds a virtual CPU to the VM specified by vm with the ID given by vcpu_id.
1265 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id) in __vm_vcpu_add() argument
1270 TEST_ASSERT(!vcpu_exists(vm, vcpu_id), "vCPU%d already exists", vcpu_id); in __vm_vcpu_add()
1276 vcpu->vm = vm; in __vm_vcpu_add()
1278 vcpu->fd = __vm_ioctl(vm, KVM_CREATE_VCPU, (void *)(unsigned long)vcpu_id); in __vm_vcpu_add()
1279 TEST_ASSERT_VM_VCPU_IOCTL(vcpu->fd >= 0, KVM_CREATE_VCPU, vcpu->fd, vm); in __vm_vcpu_add()
1290 list_add(&vcpu->list, &vm->vcpus); in __vm_vcpu_add()
1296 * VM Virtual Address Unused Gap
1299 * vm - Virtual Machine
1310 * Within the VM specified by vm, locates the lowest starting virtual
1315 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, in vm_vaddr_unused_gap() argument
1318 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1321 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift; in vm_vaddr_unused_gap()
1322 if ((pgidx_start * vm->page_size) < vaddr_min) in vm_vaddr_unused_gap()
1326 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1328 pgidx_start = sparsebit_next_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1337 if (sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1340 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1349 if (!sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1352 vm->vpages_valid, pgidx_start, pages); in vm_vaddr_unused_gap()
1365 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid, in vm_vaddr_unused_gap()
1371 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped, in vm_vaddr_unused_gap()
1378 return pgidx_start * vm->page_size; in vm_vaddr_unused_gap()
1381 static vm_vaddr_t ____vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, in ____vm_vaddr_alloc() argument
1386 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0); in ____vm_vaddr_alloc()
1388 virt_pgd_alloc(vm); in ____vm_vaddr_alloc()
1389 vm_paddr_t paddr = __vm_phy_pages_alloc(vm, pages, in ____vm_vaddr_alloc()
1390 KVM_UTIL_MIN_PFN * vm->page_size, in ____vm_vaddr_alloc()
1391 vm->memslots[type], protected); in ____vm_vaddr_alloc()
1397 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min); in ____vm_vaddr_alloc()
1401 pages--, vaddr += vm->page_size, paddr += vm->page_size) { in ____vm_vaddr_alloc()
1403 virt_pg_map(vm, vaddr, paddr); in ____vm_vaddr_alloc()
1405 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in ____vm_vaddr_alloc()
1411 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min, in __vm_vaddr_alloc() argument
1414 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, in __vm_vaddr_alloc()
1415 vm_arch_has_protected_memory(vm)); in __vm_vaddr_alloc()
1418 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz, in vm_vaddr_alloc_shared() argument
1422 return ____vm_vaddr_alloc(vm, sz, vaddr_min, type, false); in vm_vaddr_alloc_shared()
1426 * VM Virtual Address Allocate
1429 * vm - Virtual Machine
1438 * Allocates at least sz bytes within the virtual address space of the vm
1439 * given by vm. The allocated bytes are mapped to a virtual address >=
1444 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min) in vm_vaddr_alloc() argument
1446 return __vm_vaddr_alloc(vm, sz, vaddr_min, MEM_REGION_TEST_DATA); in vm_vaddr_alloc()
1450 * VM Virtual Address Allocate Pages
1453 * vm - Virtual Machine
1461 * space of the vm.
1463 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages) in vm_vaddr_alloc_pages() argument
1465 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR); in vm_vaddr_alloc_pages()
1468 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm, enum kvm_mem_region_type type) in __vm_vaddr_alloc_page() argument
1470 return __vm_vaddr_alloc(vm, getpagesize(), KVM_UTIL_MIN_VADDR, type); in __vm_vaddr_alloc_page()
1474 * VM Virtual Address Allocate Page
1477 * vm - Virtual Machine
1485 * space of the vm.
1487 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm) in vm_vaddr_alloc_page() argument
1489 return vm_vaddr_alloc_pages(vm, 1); in vm_vaddr_alloc_page()
1493 * Map a range of VM virtual address to the VM's physical address
1496 * vm - Virtual Machine
1498 * paddr - VM Physical Address
1505 * Within the VM given by @vm, creates a virtual translation for
1508 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr, in virt_map() argument
1511 size_t page_size = vm->page_size; in virt_map()
1518 virt_pg_map(vm, vaddr, paddr); in virt_map()
1519 sparsebit_set(vm->vpages_mapped, vaddr >> vm->page_shift); in virt_map()
1527 * Address VM Physical to Host Virtual
1530 * vm - Virtual Machine
1531 * gpa - VM physical address
1538 * Locates the memory region containing the VM physical address given
1539 * by gpa, within the VM given by vm. When found, the host virtual
1540 * address providing the memory to the vm physical address is returned.
1543 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2hva() argument
1547 gpa = vm_untag_gpa(vm, gpa); in addr_gpa2hva()
1549 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2hva()
1551 TEST_FAIL("No vm physical memory at 0x%lx", gpa); in addr_gpa2hva()
1560 * Address Host Virtual to VM Physical
1563 * vm - Virtual Machine
1569 * Equivalent VM physical address
1572 * by hva, within the VM given by vm. When found, the equivalent
1573 * VM physical address is returned. A TEST_ASSERT failure occurs if no
1576 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva) in addr_hva2gpa() argument
1580 for (node = vm->regions.hva_tree.rb_node; node; ) { in addr_hva2gpa()
1601 * Address VM physical to Host Virtual *alias*.
1604 * vm - Virtual Machine
1605 * gpa - VM physical address
1619 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa) in addr_gpa2alias() argument
1624 region = userspace_mem_region_find(vm, gpa, gpa); in addr_gpa2alias()
1635 /* Create an interrupt controller chip for the specified VM. */
1636 void vm_create_irqchip(struct kvm_vm *vm) in vm_create_irqchip() argument
1638 vm_ioctl(vm, KVM_CREATE_IRQCHIP, NULL); in vm_create_irqchip()
1640 vm->has_irqchip = true; in vm_create_irqchip()
1703 uint32_t size = vcpu->vm->dirty_ring_size; in vcpu_map_dirty_ring()
1744 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_test_create_device() argument
1751 return __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_test_create_device()
1754 int __kvm_create_device(struct kvm_vm *vm, uint64_t type) in __kvm_create_device() argument
1763 err = __vm_ioctl(vm, KVM_CREATE_DEVICE, &create_dev); in __kvm_create_device()
1796 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in _kvm_irq_line() argument
1803 return __vm_ioctl(vm, KVM_IRQ_LINE, &irq_level); in _kvm_irq_line()
1806 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level) in kvm_irq_line() argument
1808 int ret = _kvm_irq_line(vm, irq, level); in kvm_irq_line()
1844 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in _kvm_gsi_routing_write() argument
1849 ret = __vm_ioctl(vm, KVM_SET_GSI_ROUTING, routing); in _kvm_gsi_routing_write()
1855 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing) in kvm_gsi_routing_write() argument
1859 ret = _kvm_gsi_routing_write(vm, routing); in kvm_gsi_routing_write()
1864 * VM Dump
1867 * vm - Virtual Machine
1875 * Dumps the current state of the VM given by vm, to the FILE stream
1878 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in vm_dump() argument
1884 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode); in vm_dump()
1885 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd); in vm_dump()
1886 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size); in vm_dump()
1888 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) { in vm_dump()
1902 sparsebit_dump(stream, vm->vpages_mapped, indent + 2); in vm_dump()
1904 vm->pgd_created); in vm_dump()
1905 if (vm->pgd_created) { in vm_dump()
1908 virt_dump(stream, vm, indent + 4); in vm_dump()
1912 list_for_each_entry(vcpu, &vm->vcpus, list) in vm_dump()
1997 * vm - Virtual Machine
2008 * Within the VM specified by vm, locates a range of available physical
2013 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in __vm_phy_pages_alloc() argument
2022 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address " in __vm_phy_pages_alloc()
2025 paddr_min, vm->page_size); in __vm_phy_pages_alloc()
2027 region = memslot2region(vm, memslot); in __vm_phy_pages_alloc()
2031 base = pg = paddr_min >> vm->page_shift; in __vm_phy_pages_alloc()
2044 paddr_min, vm->page_size, memslot); in __vm_phy_pages_alloc()
2045 fputs("---- vm dump ----\n", stderr); in __vm_phy_pages_alloc()
2046 vm_dump(stderr, vm, 2); in __vm_phy_pages_alloc()
2056 return base * vm->page_size; in __vm_phy_pages_alloc()
2059 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min, in vm_phy_page_alloc() argument
2062 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot); in vm_phy_page_alloc()
2065 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm) in vm_alloc_page_table() argument
2067 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, in vm_alloc_page_table()
2068 vm->memslots[MEM_REGION_PT]); in vm_alloc_page_table()
2075 * vm - Virtual Machine
2076 * gva - VM virtual address
2083 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2hva() argument
2085 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva)); in addr_gva2hva()
2088 unsigned long __weak vm_compute_max_gfn(struct kvm_vm *vm) in vm_compute_max_gfn() argument
2090 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1; in vm_compute_max_gfn()
2205 * vm - the VM for which the stat should be read
2214 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data, in __vm_get_stat() argument
2221 if (!vm->stats_fd) { in __vm_get_stat()
2222 vm->stats_fd = vm_get_stats_fd(vm); in __vm_get_stat()
2223 read_stats_header(vm->stats_fd, &vm->stats_header); in __vm_get_stat()
2224 vm->stats_desc = read_stats_descriptors(vm->stats_fd, in __vm_get_stat()
2225 &vm->stats_header); in __vm_get_stat()
2228 size_desc = get_stats_descriptor_size(&vm->stats_header); in __vm_get_stat()
2230 for (i = 0; i < vm->stats_header.num_desc; ++i) { in __vm_get_stat()
2231 desc = (void *)vm->stats_desc + (i * size_desc); in __vm_get_stat()
2236 read_stat_data(vm->stats_fd, &vm->stats_header, desc, in __vm_get_stat()
2243 __weak void kvm_arch_vm_post_create(struct kvm_vm *vm) in kvm_arch_vm_post_create() argument
2262 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr) in vm_is_gpa_protected() argument
2267 if (!vm_arch_has_protected_memory(vm)) in vm_is_gpa_protected()
2270 region = userspace_mem_region_find(vm, paddr, paddr); in vm_is_gpa_protected()
2271 TEST_ASSERT(region, "No vm physical memory at 0x%lx", paddr); in vm_is_gpa_protected()
2273 pg = paddr >> vm->page_shift; in vm_is_gpa_protected()