Lines Matching +full:fiq +full:- +full:index

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 #include <linux/entry-kvm.h>
86 int r = -EINVAL; in kvm_vm_ioctl_enable_cap()
88 if (cap->flags) in kvm_vm_ioctl_enable_cap()
89 return -EINVAL; in kvm_vm_ioctl_enable_cap()
91 if (kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(cap->cap)) in kvm_vm_ioctl_enable_cap()
92 return -EINVAL; in kvm_vm_ioctl_enable_cap()
94 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
98 &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
101 mutex_lock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
102 if (system_supports_mte() && !kvm->created_vcpus) { in kvm_vm_ioctl_enable_cap()
104 set_bit(KVM_ARCH_FLAG_MTE_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
106 mutex_unlock(&kvm->lock); in kvm_vm_ioctl_enable_cap()
110 set_bit(KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, &kvm->arch.flags); in kvm_vm_ioctl_enable_cap()
113 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
119 u64 new_cap = cap->args[0]; in kvm_vm_ioctl_enable_cap()
123 kvm->arch.mmu.split_page_chunk_size = new_cap; in kvm_vm_ioctl_enable_cap()
126 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_enable_cap()
141 * kvm_arch_init_vm - initializes a VM data structure
149 mutex_init(&kvm->arch.config_lock); in kvm_arch_init_vm()
152 /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ in kvm_arch_init_vm()
153 mutex_lock(&kvm->lock); in kvm_arch_init_vm()
154 mutex_lock(&kvm->arch.config_lock); in kvm_arch_init_vm()
155 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_init_vm()
156 mutex_unlock(&kvm->lock); in kvm_arch_init_vm()
169 if (!zalloc_cpumask_var(&kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { in kvm_arch_init_vm()
170 ret = -ENOMEM; in kvm_arch_init_vm()
173 cpumask_copy(kvm->arch.supported_cpus, cpu_possible_mask); in kvm_arch_init_vm()
175 ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); in kvm_arch_init_vm()
184 kvm->max_vcpus = kvm_arm_default_max_vcpus(); in kvm_arch_init_vm()
188 bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES); in kvm_arch_init_vm()
193 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_init_vm()
214 mutex_lock(&kvm->arch.config_lock); in kvm_destroy_mpidr_data()
216 data = rcu_dereference_protected(kvm->arch.mpidr_data, in kvm_destroy_mpidr_data()
217 lockdep_is_held(&kvm->arch.config_lock)); in kvm_destroy_mpidr_data()
219 rcu_assign_pointer(kvm->arch.mpidr_data, NULL); in kvm_destroy_mpidr_data()
224 mutex_unlock(&kvm->arch.config_lock); in kvm_destroy_mpidr_data()
228 * kvm_arch_destroy_vm - destroy the VM data structure
233 bitmap_free(kvm->arch.pmu_filter); in kvm_arch_destroy_vm()
234 free_cpumask_var(kvm->arch.supported_cpus); in kvm_arch_destroy_vm()
243 kfree(kvm->arch.sysreg_masks); in kvm_arch_destroy_vm()
259 * - both Address and Generic auth are implemented for a given in kvm_has_full_ptr_auth()
261 * - only a single algorithm is implemented. in kvm_has_full_ptr_auth()
336 r = kvm->max_vcpus; in kvm_vm_ioctl_check_extension()
342 r = -EINVAL; in kvm_vm_ioctl_check_extension()
344 r = kvm->arch.vgic.msis_require_devid; in kvm_vm_ioctl_check_extension()
386 r = kvm->arch.mmu.split_page_chunk_size; in kvm_vm_ioctl_check_extension()
406 return -EINVAL; in kvm_arch_dev_ioctl()
422 return -EBUSY; in kvm_arch_vcpu_precreate()
424 if (id >= kvm->max_vcpus) in kvm_arch_vcpu_precreate()
425 return -EINVAL; in kvm_arch_vcpu_precreate()
434 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
437 /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ in kvm_arch_vcpu_create()
438 mutex_lock(&vcpu->mutex); in kvm_arch_vcpu_create()
439 mutex_lock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
440 mutex_unlock(&vcpu->kvm->arch.config_lock); in kvm_arch_vcpu_create()
441 mutex_unlock(&vcpu->mutex); in kvm_arch_vcpu_create()
447 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
454 kvm_arm_pvtime_vcpu_init(&vcpu->arch); in kvm_arch_vcpu_create()
456 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; in kvm_arch_vcpu_create()
460 * Throw out the pre-computed mappings if that is the case which forces in kvm_arch_vcpu_create()
463 kvm_destroy_mpidr_data(vcpu->kvm); in kvm_arch_vcpu_create()
483 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
485 free_hyp_memcache(&vcpu->arch.pkvm_memcache); in kvm_arch_vcpu_destroy()
514 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); in vcpu_set_pauth_traps()
515 vcpu->arch.hcr_el2 |= val; in vcpu_set_pauth_traps()
517 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); in vcpu_set_pauth_traps()
525 if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { in vcpu_set_pauth_traps()
540 (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || in kvm_vcpu_should_clear_twi()
541 vcpu->kvm->arch.vgic.nassgireq); in kvm_vcpu_should_clear_twi()
563 mmu = vcpu->arch.hw_mmu; in kvm_arch_vcpu_load()
564 last_ran = this_cpu_ptr(mmu->last_vcpu_ran); in kvm_arch_vcpu_load()
574 kvm_arm_vmid_update(&mmu->vmid); in kvm_arch_vcpu_load()
577 * We guarantee that both TLBs and I-cache are private to each in kvm_arch_vcpu_load()
583 * over-invalidation doesn't affect correctness. in kvm_arch_vcpu_load()
585 if (*last_ran != vcpu->vcpu_idx) { in kvm_arch_vcpu_load()
587 *last_ran = vcpu->vcpu_idx; in kvm_arch_vcpu_load()
591 vcpu->cpu = cpu; in kvm_arch_vcpu_load()
600 if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) in kvm_arch_vcpu_load()
604 vcpu->arch.hcr_el2 &= ~HCR_TWE; in kvm_arch_vcpu_load()
606 vcpu->arch.hcr_el2 |= HCR_TWE; in kvm_arch_vcpu_load()
609 vcpu->arch.hcr_el2 &= ~HCR_TWI; in kvm_arch_vcpu_load()
611 vcpu->arch.hcr_el2 |= HCR_TWI; in kvm_arch_vcpu_load()
617 vcpu->kvm->arch.pkvm.handle, in kvm_arch_vcpu_load()
618 vcpu->vcpu_idx, vcpu->arch.hcr_el2); in kvm_arch_vcpu_load()
620 &vcpu->arch.vgic_cpu.vgic_v3); in kvm_arch_vcpu_load()
623 if (!cpumask_test_cpu(cpu, vcpu->kvm->arch.supported_cpus)) in kvm_arch_vcpu_load()
631 &vcpu->arch.vgic_cpu.vgic_v3); in kvm_arch_vcpu_put()
647 vcpu->cpu = -1; in kvm_arch_vcpu_put()
652 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_arm_vcpu_power_off()
659 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
661 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arm_vcpu_power_off()
666 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_arm_vcpu_stopped()
671 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); in kvm_arm_vcpu_suspend()
678 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; in kvm_arm_vcpu_suspended()
684 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
694 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
696 switch (mp_state->mp_state) { in kvm_arch_vcpu_ioctl_set_mpstate()
698 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
707 ret = -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
710 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
716 * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled
726 && !kvm_arm_vcpu_stopped(v) && !v->arch.pause); in kvm_arch_vcpu_runnable()
748 mutex_lock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
750 if (rcu_access_pointer(kvm->arch.mpidr_data) || in kvm_init_mpidr_data()
751 atomic_read(&kvm->online_vcpus) == 1) in kvm_init_mpidr_data()
779 data->mpidr_mask = mask; in kvm_init_mpidr_data()
783 u16 index = kvm_mpidr_index(data, aff); in kvm_init_mpidr_data() local
785 data->cmpidr_to_idx[index] = c; in kvm_init_mpidr_data()
788 rcu_assign_pointer(kvm->arch.mpidr_data, data); in kvm_init_mpidr_data()
790 mutex_unlock(&kvm->arch.config_lock); in kvm_init_mpidr_data()
800 struct kvm *kvm = vcpu->kvm; in kvm_arch_vcpu_run_pid_change()
804 return -ENOEXEC; in kvm_arch_vcpu_run_pid_change()
807 return -EPERM; in kvm_arch_vcpu_run_pid_change()
852 mutex_lock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
853 set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); in kvm_arch_vcpu_run_pid_change()
854 mutex_unlock(&kvm->arch.config_lock); in kvm_arch_vcpu_run_pid_change()
870 vcpu->arch.pause = true; in kvm_arm_halt_guest()
880 vcpu->arch.pause = false; in kvm_arm_resume_guest()
890 (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_vcpu_sleep()
893 if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_vcpu_sleep()
907 * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior
919 * kvm_arch_vcpu_runnable has up-to-date data to decide whether in kvm_vcpu_wfi()
959 memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); in kvm_vcpu_suspend()
960 vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; in kvm_vcpu_suspend()
961 vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; in kvm_vcpu_suspend()
974 * check_vcpu_requests - check and handle pending vCPU requests
986 return -EIO; in check_vcpu_requests()
1041 * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest
1052 * for pending work and re-enter), return true without writing to ret.
1056 struct kvm_run *run = vcpu->run; in kvm_vcpu_exit_request()
1065 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_vcpu_exit_request()
1068 *ret = -EINTR; in kvm_vcpu_exit_request()
1069 run->exit_reason = KVM_EXIT_INTR; in kvm_vcpu_exit_request()
1075 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvm_vcpu_exit_request()
1076 run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; in kvm_vcpu_exit_request()
1077 run->fail_entry.cpu = smp_processor_id(); in kvm_vcpu_exit_request()
1105 * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code
1116 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1119 if (run->exit_reason == KVM_EXIT_MMIO) { in kvm_arch_vcpu_ioctl_run()
1127 if (!vcpu->wants_to_run) { in kvm_arch_vcpu_ioctl_run()
1128 ret = -EINTR; in kvm_arch_vcpu_ioctl_run()
1135 run->exit_reason = KVM_EXIT_UNKNOWN; in kvm_arch_vcpu_ioctl_run()
1136 run->flags = 0; in kvm_arch_vcpu_ioctl_run()
1151 * non-preemptible context. in kvm_arch_vcpu_ioctl_run()
1167 * Documentation/virt/kvm/vcpu-requests.rst in kvm_arch_vcpu_ioctl_run()
1169 smp_store_mb(vcpu->mode, IN_GUEST_MODE); in kvm_arch_vcpu_ioctl_run()
1172 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1175 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_ioctl_run()
1193 vcpu->mode = OUTSIDE_GUEST_MODE; in kvm_arch_vcpu_ioctl_run()
1194 vcpu->stat.exits++; in kvm_arch_vcpu_ioctl_run()
1218 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) in kvm_arch_vcpu_ioctl_run()
1263 * As we have caught the guest red-handed, decide that in kvm_arch_vcpu_ioctl_run()
1275 /* Tell userspace about in-kernel device output levels */ in kvm_arch_vcpu_ioctl_run()
1276 if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { in kvm_arch_vcpu_ioctl_run()
1289 * being preempt-safe on VHE. in kvm_arch_vcpu_ioctl_run()
1324 * trigger a world-switch round on the running physical CPU to set the in vcpu_interrupt_line()
1325 * virtual IRQ/FIQ fields in the HCR appropriately. in vcpu_interrupt_line()
1336 u32 irq = irq_level->irq; in kvm_vm_ioctl_irq_line()
1339 bool level = irq_level->level; in kvm_vm_ioctl_irq_line()
1346 trace_kvm_irq_line(irq_type, vcpu_id, irq_num, irq_level->level); in kvm_vm_ioctl_irq_line()
1351 return -ENXIO; in kvm_vm_ioctl_irq_line()
1355 return -EINVAL; in kvm_vm_ioctl_irq_line()
1358 return -EINVAL; in kvm_vm_ioctl_irq_line()
1363 return -ENXIO; in kvm_vm_ioctl_irq_line()
1367 return -EINVAL; in kvm_vm_ioctl_irq_line()
1370 return -EINVAL; in kvm_vm_ioctl_irq_line()
1375 return -ENXIO; in kvm_vm_ioctl_irq_line()
1378 return -EINVAL; in kvm_vm_ioctl_irq_line()
1383 return -EINVAL; in kvm_vm_ioctl_irq_line()
1413 unsigned long features = init->features[0]; in kvm_vcpu_init_check_features()
1417 return -ENOENT; in kvm_vcpu_init_check_features()
1419 for (i = 1; i < ARRAY_SIZE(init->features); i++) { in kvm_vcpu_init_check_features()
1420 if (init->features[i]) in kvm_vcpu_init_check_features()
1421 return -ENOENT; in kvm_vcpu_init_check_features()
1425 return -EINVAL; in kvm_vcpu_init_check_features()
1433 return -EINVAL; in kvm_vcpu_init_check_features()
1439 if (kvm_has_mte(vcpu->kvm)) in kvm_vcpu_init_check_features()
1440 return -EINVAL; in kvm_vcpu_init_check_features()
1444 return -EINVAL; in kvm_vcpu_init_check_features()
1452 unsigned long features = init->features[0]; in kvm_vcpu_init_changed()
1454 return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, in kvm_vcpu_init_changed()
1460 struct kvm *kvm = vcpu->kvm; in kvm_setup_vcpu()
1467 if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) in kvm_setup_vcpu()
1480 unsigned long features = init->features[0]; in __kvm_vcpu_set_target()
1481 struct kvm *kvm = vcpu->kvm; in __kvm_vcpu_set_target()
1482 int ret = -EINVAL; in __kvm_vcpu_set_target()
1484 mutex_lock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1486 if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && in __kvm_vcpu_set_target()
1490 bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); in __kvm_vcpu_set_target()
1499 set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); in __kvm_vcpu_set_target()
1503 mutex_unlock(&kvm->arch.config_lock); in __kvm_vcpu_set_target()
1512 if (init->target != KVM_ARM_TARGET_GENERIC_V8 && in kvm_vcpu_set_target()
1513 init->target != kvm_target_cpu()) in kvm_vcpu_set_target()
1514 return -EINVAL; in kvm_vcpu_set_target()
1524 return -EINVAL; in kvm_vcpu_set_target()
1537 * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid in kvm_arch_vcpu_ioctl_vcpu_init()
1541 if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { in kvm_arch_vcpu_ioctl_vcpu_init()
1542 init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); in kvm_arch_vcpu_ioctl_vcpu_init()
1556 * need to invalidate the I-cache though, as FWB does *not* in kvm_arch_vcpu_ioctl_vcpu_init()
1561 stage2_unmap_vm(vcpu->kvm); in kvm_arch_vcpu_ioctl_vcpu_init()
1569 * Handle the "start in power-off" case. in kvm_arch_vcpu_ioctl_vcpu_init()
1571 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1576 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in kvm_arch_vcpu_ioctl_vcpu_init()
1578 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_vcpu_init()
1586 int ret = -ENXIO; in kvm_arm_vcpu_set_attr()
1588 switch (attr->group) { in kvm_arm_vcpu_set_attr()
1600 int ret = -ENXIO; in kvm_arm_vcpu_get_attr()
1602 switch (attr->group) { in kvm_arm_vcpu_get_attr()
1614 int ret = -ENXIO; in kvm_arm_vcpu_has_attr()
1616 switch (attr->group) { in kvm_arm_vcpu_has_attr()
1639 for (i = 0; i < ARRAY_SIZE(events->reserved); i++) in kvm_arm_vcpu_set_events()
1640 if (events->reserved[i]) in kvm_arm_vcpu_set_events()
1641 return -EINVAL; in kvm_arm_vcpu_set_events()
1644 for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) in kvm_arm_vcpu_set_events()
1645 if (events->exception.pad[i]) in kvm_arm_vcpu_set_events()
1646 return -EINVAL; in kvm_arm_vcpu_set_events()
1654 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
1663 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1674 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1678 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1701 r = -ENOEXEC; in kvm_arch_vcpu_ioctl()
1705 r = -EPERM; in kvm_arch_vcpu_ioctl()
1709 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1716 r = -E2BIG; in kvm_arch_vcpu_ioctl()
1719 r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); in kvm_arch_vcpu_ioctl()
1723 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1730 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1737 r = -EFAULT; in kvm_arch_vcpu_ioctl()
1747 return -EINVAL; in kvm_arch_vcpu_ioctl()
1750 return -EFAULT; in kvm_arch_vcpu_ioctl()
1758 return -EFAULT; in kvm_arch_vcpu_ioctl()
1766 return -ENOEXEC; in kvm_arch_vcpu_ioctl()
1769 return -EFAULT; in kvm_arch_vcpu_ioctl()
1774 r = -EINVAL; in kvm_arch_vcpu_ioctl()
1788 switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { in kvm_vm_ioctl_set_device_addr()
1791 return -ENXIO; in kvm_vm_ioctl_set_device_addr()
1794 return -ENODEV; in kvm_vm_ioctl_set_device_addr()
1800 switch (attr->group) { in kvm_vm_has_attr()
1804 return -ENXIO; in kvm_vm_has_attr()
1810 switch (attr->group) { in kvm_vm_set_attr()
1814 return -ENXIO; in kvm_vm_set_attr()
1820 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
1828 return -ENXIO; in kvm_arch_vm_ioctl()
1829 mutex_lock(&kvm->lock); in kvm_arch_vm_ioctl()
1831 mutex_unlock(&kvm->lock); in kvm_arch_vm_ioctl()
1838 return -EFAULT; in kvm_arch_vm_ioctl()
1847 return -EFAULT; in kvm_arch_vm_ioctl()
1855 return -EFAULT; in kvm_arch_vm_ioctl()
1862 return -EFAULT; in kvm_arch_vm_ioctl()
1867 return -EFAULT; in kvm_arch_vm_ioctl()
1873 return -EFAULT; in kvm_arch_vm_ioctl()
1881 return -EFAULT; in kvm_arch_vm_ioctl()
1885 return -EINVAL; in kvm_arch_vm_ioctl()
1894 for (; vcpu_lock_idx >= 0; vcpu_lock_idx--) { in unlock_vcpus()
1896 mutex_unlock(&tmp_vcpu->mutex); in unlock_vcpus()
1902 lockdep_assert_held(&kvm->lock); in unlock_all_vcpus()
1904 unlock_vcpus(kvm, atomic_read(&kvm->online_vcpus) - 1); in unlock_all_vcpus()
1913 lockdep_assert_held(&kvm->lock); in lock_all_vcpus()
1917 * core KVM code tries to grab the vcpu->mutex. in lock_all_vcpus()
1919 * By grabbing the vcpu->mutex of all VCPUs we ensure that no in lock_all_vcpus()
1923 if (!mutex_trylock(&tmp_vcpu->mutex)) { in lock_all_vcpus()
1924 unlock_vcpus(kvm, c - 1); in lock_all_vcpus()
1934 return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - in nvhe_percpu_size()
1988 * Calculate the raw per-cpu offset without a translation from the in cpu_prepare_hyp_mode()
1990 * so that we can use adr_l to access per-cpu variables in EL2. in cpu_prepare_hyp_mode()
1993 params->tpidr_el2 = (unsigned long)kasan_reset_tag(per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - in cpu_prepare_hyp_mode()
1996 params->mair_el2 = read_sysreg(mair_el1); in cpu_prepare_hyp_mode()
2011 params->tcr_el2 = tcr; in cpu_prepare_hyp_mode()
2013 params->pgd_pa = kvm_mmu_get_httbr(); in cpu_prepare_hyp_mode()
2015 params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; in cpu_prepare_hyp_mode()
2017 params->hcr_el2 = HCR_HOST_NVHE_FLAGS; in cpu_prepare_hyp_mode()
2019 params->hcr_el2 |= HCR_E2H; in cpu_prepare_hyp_mode()
2020 params->vttbr = params->vtcr = 0; in cpu_prepare_hyp_mode()
2054 * Disabling SSBD on a non-VHE system requires us to enable SSBS in cpu_init_hyp_mode()
2073 * - If the CPU is affected by Spectre-v2, the hardening sequence is
2077 * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot
2081 * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an
2086 * VHE, as we don't have hypervisor-specific mappings. If the system
2092 void *vector = hyp_spectre_vector_selector[data->slot]; in cpu_set_hyp_vector()
2097 kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); in cpu_set_hyp_vector()
2179 * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should in hyp_init_cpu_pm_notifier()
2180 * re-enable hyp. in hyp_init_cpu_pm_notifier()
2187 * so that the hyp will be re-enabled in hyp_init_cpu_pm_notifier()
2234 * Copy the MPIDR <-> logical CPU ID mapping to hyp. in init_cpu_logical_map()
2280 * Register CPU lower-power notifier in init_subsystems()
2292 case -ENODEV: in init_subsystems()
2293 case -ENXIO: in init_subsystems()
2305 * guest on non-cooperative hardware. in init_subsystems()
2346 free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); in teardown_hyp_mode()
2352 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in teardown_hyp_mode()
2372 * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu(). in do_pkvm_init()
2385 * Although this is per-CPU, we make it global for simplicity, e.g., not in get_hyp_id_aa64pfr0_el1()
2388 * Unlike for non-protected VMs, userspace cannot override this for in get_hyp_id_aa64pfr0_el1()
2423 kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start)); in kvm_hyp_init_symbols()
2456 return -ENOMEM; in init_pkvm_host_sve_state()
2458 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page); in init_pkvm_host_sve_state()
2481 sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; in finalize_init_hyp_mode()
2482 per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = in finalize_init_hyp_mode()
2495 hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2496 hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2497 hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2498 hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2499 hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2500 hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2501 hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2502 hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2503 hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2504 hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); in pkvm_hyp_init_ptrauth()
2508 /* Inits Hyp-mode on all online CPUs */
2513 int err = -ENOMEM; in init_hyp_mode()
2516 * The protected Hyp-mode cannot be initialized if the memory pool in init_hyp_mode()
2530 * Allocate stack pages for Hypervisor-mode in init_hyp_mode()
2535 stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT); in init_hyp_mode()
2537 err = -ENOMEM; in init_hyp_mode()
2545 * Allocate and initialize pages for Hypervisor-mode percpu regions. in init_hyp_mode()
2553 err = -ENOMEM; in init_hyp_mode()
2563 * Map the Hyp-code called directly from the host in init_hyp_mode()
2568 kvm_err("Cannot map world-switch code\n"); in init_hyp_mode()
2612 err = create_hyp_stack(__pa(stack_base), &params->stack_hyp_va); in init_hyp_mode()
2624 params->stack_pa = __pa(stack_base); in init_hyp_mode()
2652 err = -ENODEV; in init_hyp_mode()
2684 data = rcu_dereference(kvm->arch.mpidr_data); in kvm_mpidr_to_vcpu()
2689 vcpu = kvm_get_vcpu(kvm, data->cmpidr_to_idx[idx]); in kvm_mpidr_to_vcpu()
2722 return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_add_producer()
2723 &irqfd->irq_entry); in kvm_arch_irq_bypass_add_producer()
2731 kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq, in kvm_arch_irq_bypass_del_producer()
2732 &irqfd->irq_entry); in kvm_arch_irq_bypass_del_producer()
2740 kvm_arm_halt_guest(irqfd->kvm); in kvm_arch_irq_bypass_stop()
2748 kvm_arm_resume_guest(irqfd->kvm); in kvm_arch_irq_bypass_start()
2751 /* Initialize Hyp-mode and memory mappings on all CPUs */
2759 return -ENODEV; in kvm_arm_init()
2764 return -ENODEV; in kvm_arm_init()
2848 return -EINVAL; in early_kvm_mode_cfg()
2856 pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); in early_kvm_mode_cfg()
2879 return -EINVAL; in early_kvm_mode_cfg()
2881 early_param("kvm-arm.mode", early_kvm_mode_cfg);
2886 return -EINVAL; in early_kvm_wfx_trap_policy_cfg()
2898 return -EINVAL; in early_kvm_wfx_trap_policy_cfg()
2905 early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg);
2911 early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg);