Lines Matching full:vm

53 	struct kvm_vm *vm;  member
137 #define kvm_for_each_vcpu(vm, i, vcpu) \ argument
138 for ((i) = 0; (i) <= (vm)->last_vcpu_id; (i)++) \
139 if (!((vcpu) = vm->vcpus[i])) \
144 memslot2region(struct kvm_vm *vm, uint32_t memslot);
146 static inline struct userspace_mem_region *vm_get_mem_region(struct kvm_vm *vm, in vm_get_mem_region() argument
150 return memslot2region(vm, vm->memslots[type]); in vm_get_mem_region()
293 static __always_inline void static_assert_is_vm(struct kvm_vm *vm) { } in static_assert_is_vm() argument
295 #define __vm_ioctl(vm, cmd, arg) \ argument
297 static_assert_is_vm(vm); \
298 kvm_do_ioctl((vm)->fd, cmd, arg); \
302 * Assert that a VM or vCPU ioctl() succeeded, with extra magic to detect if
303 * the ioctl() failed because KVM killed/bugged the VM. To detect a dead VM,
306 * return 0 or 1. If KVM kills a VM, KVM returns -EIO for all ioctl()s for the
307 * VM and its vCPUs, including KVM_CHECK_EXTENSION.
309 #define __TEST_ASSERT_VM_VCPU_IOCTL(cond, name, ret, vm) \ argument
313 static_assert_is_vm(vm); \
319 __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)KVM_CAP_USER_MEMORY) < 0) { \
320 TEST_ASSERT(errno == EIO, "KVM killed the VM, should return -EIO"); \
321 TEST_FAIL("KVM killed/bugged the VM, check the kernel log for clues"); \
327 #define TEST_ASSERT_VM_VCPU_IOCTL(cond, cmd, ret, vm) \ argument
328 __TEST_ASSERT_VM_VCPU_IOCTL(cond, #cmd, ret, vm)
330 #define vm_ioctl(vm, cmd, arg) \ argument
332 int ret = __vm_ioctl(vm, cmd, arg); \
334 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, vm); \
349 __TEST_ASSERT_VM_VCPU_IOCTL(!ret, #cmd, ret, (vcpu)->vm); \
356 static inline int vm_check_cap(struct kvm_vm *vm, long cap) in vm_check_cap() argument
358 int ret = __vm_ioctl(vm, KVM_CHECK_EXTENSION, (void *)cap); in vm_check_cap()
360 TEST_ASSERT_VM_VCPU_IOCTL(ret >= 0, KVM_CHECK_EXTENSION, ret, vm); in vm_check_cap()
364 static inline int __vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in __vm_enable_cap() argument
368 return __vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); in __vm_enable_cap()
370 static inline void vm_enable_cap(struct kvm_vm *vm, uint32_t cap, uint64_t arg0) in vm_enable_cap() argument
374 vm_ioctl(vm, KVM_ENABLE_CAP, &enable_cap); in vm_enable_cap()
377 static inline void vm_set_memory_attributes(struct kvm_vm *vm, uint64_t gpa, in vm_set_memory_attributes() argument
394 vm_ioctl(vm, KVM_SET_MEMORY_ATTRIBUTES, &attr); in vm_set_memory_attributes()
398 static inline void vm_mem_set_private(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_private() argument
401 vm_set_memory_attributes(vm, gpa, size, KVM_MEMORY_ATTRIBUTE_PRIVATE); in vm_mem_set_private()
404 static inline void vm_mem_set_shared(struct kvm_vm *vm, uint64_t gpa, in vm_mem_set_shared() argument
407 vm_set_memory_attributes(vm, gpa, size, 0); in vm_mem_set_shared()
410 void vm_guest_mem_fallocate(struct kvm_vm *vm, uint64_t gpa, uint64_t size,
413 static inline void vm_guest_mem_punch_hole(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_punch_hole() argument
416 vm_guest_mem_fallocate(vm, gpa, size, true); in vm_guest_mem_punch_hole()
419 static inline void vm_guest_mem_allocate(struct kvm_vm *vm, uint64_t gpa, in vm_guest_mem_allocate() argument
422 vm_guest_mem_fallocate(vm, gpa, size, false); in vm_guest_mem_allocate()
425 void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size);
431 void kvm_vm_elf_load(struct kvm_vm *vm, const char *filename);
434 void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
436 static inline void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log) in kvm_vm_get_dirty_log() argument
440 vm_ioctl(vm, KVM_GET_DIRTY_LOG, &args); in kvm_vm_get_dirty_log()
443 static inline void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log, in kvm_vm_clear_dirty_log() argument
453 vm_ioctl(vm, KVM_CLEAR_DIRTY_LOG, &args); in kvm_vm_clear_dirty_log()
456 static inline uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm) in kvm_vm_reset_dirty_ring() argument
458 return __vm_ioctl(vm, KVM_RESET_DIRTY_RINGS, NULL); in kvm_vm_reset_dirty_ring()
461 static inline void kvm_vm_register_coalesced_io(struct kvm_vm *vm, in kvm_vm_register_coalesced_io() argument
471 vm_ioctl(vm, KVM_REGISTER_COALESCED_MMIO, &zone); in kvm_vm_register_coalesced_io()
474 static inline void kvm_vm_unregister_coalesced_io(struct kvm_vm *vm, in kvm_vm_unregister_coalesced_io() argument
484 vm_ioctl(vm, KVM_UNREGISTER_COALESCED_MMIO, &zone); in kvm_vm_unregister_coalesced_io()
487 static inline int vm_get_stats_fd(struct kvm_vm *vm) in vm_get_stats_fd() argument
489 int fd = __vm_ioctl(vm, KVM_GET_STATS_FD, NULL); in vm_get_stats_fd()
491 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_GET_STATS_FD, fd, vm); in vm_get_stats_fd()
534 void __vm_get_stat(struct kvm_vm *vm, const char *stat_name, uint64_t *data,
537 static inline uint64_t vm_get_stat(struct kvm_vm *vm, const char *stat_name) in vm_get_stat() argument
541 __vm_get_stat(vm, stat_name, &data, 1); in vm_get_stat()
545 void vm_create_irqchip(struct kvm_vm *vm);
547 static inline int __vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in __vm_create_guest_memfd() argument
555 return __vm_ioctl(vm, KVM_CREATE_GUEST_MEMFD, &guest_memfd); in __vm_create_guest_memfd()
558 static inline int vm_create_guest_memfd(struct kvm_vm *vm, uint64_t size, in vm_create_guest_memfd() argument
561 int fd = __vm_create_guest_memfd(vm, size, flags); in vm_create_guest_memfd()
567 void vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
569 int __vm_set_user_memory_region(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
571 void vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
574 int __vm_set_user_memory_region2(struct kvm_vm *vm, uint32_t slot, uint32_t flags,
578 void vm_userspace_mem_region_add(struct kvm_vm *vm,
582 void vm_mem_add(struct kvm_vm *vm, enum vm_mem_backing_src_type src_type,
587 static inline bool vm_arch_has_protected_memory(struct kvm_vm *vm) in vm_arch_has_protected_memory() argument
593 void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags);
594 void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa);
595 void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot);
596 struct kvm_vcpu *__vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
597 void vm_populate_vaddr_bitmap(struct kvm_vm *vm);
598 vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
599 vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min);
600 vm_vaddr_t __vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
602 vm_vaddr_t vm_vaddr_alloc_shared(struct kvm_vm *vm, size_t sz,
605 vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages);
606 vm_vaddr_t __vm_vaddr_alloc_page(struct kvm_vm *vm,
608 vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm);
610 void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
612 void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa);
613 void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva);
614 vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva);
615 void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa);
621 static inline vm_paddr_t vm_untag_gpa(struct kvm_vm *vm, vm_paddr_t gpa) in vm_untag_gpa() argument
623 return gpa & ~vm->gpa_tag_mask; in vm_untag_gpa()
758 TEST_ASSERT_VM_VCPU_IOCTL(fd >= 0, KVM_CHECK_EXTENSION, fd, vcpu->vm); in vcpu_get_stats_fd()
827 int __kvm_test_create_device(struct kvm_vm *vm, uint64_t type);
828 int __kvm_create_device(struct kvm_vm *vm, uint64_t type);
830 static inline int kvm_create_device(struct kvm_vm *vm, uint64_t type) in kvm_create_device() argument
832 int fd = __kvm_create_device(vm, type); in kvm_create_device()
841 * VM VCPU Args Set
844 * vm - Virtual Machine
859 void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
860 int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level);
867 int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
868 void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing);
872 vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
874 vm_paddr_t __vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
877 vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm);
879 static inline vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num, in vm_phy_pages_alloc() argument
887 return __vm_phy_pages_alloc(vm, num, paddr_min, memslot, in vm_phy_pages_alloc()
888 vm_arch_has_protected_memory(vm)); in vm_phy_pages_alloc()
940 * Create a VM with a single vCPU with reasonable defaults and @extra_mem_pages
941 * additional pages of guest memory. Returns the VM and vCPU (via out param).
964 struct kvm_vcpu *vm_recreate_with_one_vcpu(struct kvm_vm *vm);
971 unsigned long vm_compute_max_gfn(struct kvm_vm *vm);
987 #define sync_global_to_guest(vm, g) ({ \ argument
988 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
992 #define sync_global_from_guest(vm, g) ({ \ argument
993 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
998 * Write a global value, but only in the VM's (guest's) domain. Primarily used
999 * for "globals" that hold per-VM values (VMs always duplicate code and global
1003 #define write_guest_global(vm, g, val) ({ \ argument
1004 typeof(g) *_p = addr_gva2hva(vm, (vm_vaddr_t)&(g)); \
1025 * vm - Virtual Machine
1026 * vcpu_id - The id of the VCPU to add to the VM.
1028 struct kvm_vcpu *vm_arch_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id);
1031 static inline struct kvm_vcpu *vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpu_id, in vm_vcpu_add() argument
1034 struct kvm_vcpu *vcpu = vm_arch_vcpu_add(vm, vcpu_id); in vm_vcpu_add()
1041 /* Re-create a vCPU after restarting a VM, e.g. for state save/restore tests. */
1042 struct kvm_vcpu *vm_arch_vcpu_recreate(struct kvm_vm *vm, uint32_t vcpu_id);
1044 static inline struct kvm_vcpu *vm_vcpu_recreate(struct kvm_vm *vm, in vm_vcpu_recreate() argument
1047 return vm_arch_vcpu_recreate(vm, vcpu_id); in vm_vcpu_recreate()
1052 void virt_arch_pgd_alloc(struct kvm_vm *vm);
1054 static inline void virt_pgd_alloc(struct kvm_vm *vm) in virt_pgd_alloc() argument
1056 virt_arch_pgd_alloc(vm); in virt_pgd_alloc()
1060 * VM Virtual Page Map
1063 * vm - Virtual Machine
1064 * vaddr - VM Virtual Address
1065 * paddr - VM Physical Address
1072 * Within @vm, creates a virtual translation for the page starting
1075 void virt_arch_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr);
1077 static inline void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr) in virt_pg_map() argument
1079 virt_arch_pg_map(vm, vaddr, paddr); in virt_pg_map()
1087 * vm - Virtual Machine
1088 * gva - VM virtual address
1093 * Equivalent VM physical address
1095 * Returns the VM physical address of the translated VM virtual
1098 vm_paddr_t addr_arch_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva);
1100 static inline vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva) in addr_gva2gpa() argument
1102 return addr_arch_gva2gpa(vm, gva); in addr_gva2gpa()
1110 * vm - Virtual Machine
1118 * virtual translation tables for the VM given by @vm.
1120 void virt_arch_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent);
1122 static inline void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent) in virt_dump() argument
1124 virt_arch_dump(stream, vm, indent); in virt_dump()
1128 static inline int __vm_disable_nx_huge_pages(struct kvm_vm *vm) in __vm_disable_nx_huge_pages() argument
1130 return __vm_enable_cap(vm, KVM_CAP_VM_DISABLE_NX_HUGE_PAGES, 0); in __vm_disable_nx_huge_pages()
1140 void kvm_arch_vm_post_create(struct kvm_vm *vm);
1142 bool vm_is_gpa_protected(struct kvm_vm *vm, vm_paddr_t paddr);