Lines Matching +full:fw +full:- +full:cfg +full:- +full:mmio
1 // SPDX-License-Identifier: GPL-2.0-only
52 return !!(v->arch.pending_exceptions) || kvm_request_pending(v); in kvm_arch_vcpu_runnable()
96 vcpu->run->exit_reason = KVM_EXIT_INTR; in kvmppc_prepare_to_enter()
97 r = -EINTR; in kvmppc_prepare_to_enter()
101 vcpu->mode = IN_GUEST_MODE; in kvmppc_prepare_to_enter()
104 * Reading vcpu->requests must happen after setting vcpu->mode, in kvmppc_prepare_to_enter()
144 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; in kvmppc_swab_shared()
147 shared->sprg0 = swab64(shared->sprg0); in kvmppc_swab_shared()
148 shared->sprg1 = swab64(shared->sprg1); in kvmppc_swab_shared()
149 shared->sprg2 = swab64(shared->sprg2); in kvmppc_swab_shared()
150 shared->sprg3 = swab64(shared->sprg3); in kvmppc_swab_shared()
151 shared->srr0 = swab64(shared->srr0); in kvmppc_swab_shared()
152 shared->srr1 = swab64(shared->srr1); in kvmppc_swab_shared()
153 shared->dar = swab64(shared->dar); in kvmppc_swab_shared()
154 shared->msr = swab64(shared->msr); in kvmppc_swab_shared()
155 shared->dsisr = swab32(shared->dsisr); in kvmppc_swab_shared()
156 shared->int_pending = swab32(shared->int_pending); in kvmppc_swab_shared()
157 for (i = 0; i < ARRAY_SIZE(shared->sr); i++) in kvmppc_swab_shared()
158 shared->sr[i] = swab32(shared->sr[i]); in kvmppc_swab_shared()
186 if (vcpu->arch.intr_msr & MSR_LE) in kvmppc_kvm_pv()
188 if (shared_big_endian != vcpu->arch.shared_big_endian) in kvmppc_kvm_pv()
190 vcpu->arch.shared_big_endian = shared_big_endian; in kvmppc_kvm_pv()
199 vcpu->arch.disable_kernel_nx = true; in kvmppc_kvm_pv()
203 vcpu->arch.magic_page_pa = param1 & ~0xfffULL; in kvmppc_kvm_pv()
204 vcpu->arch.magic_page_ea = param2 & ~0xfffULL; in kvmppc_kvm_pv()
211 if ((vcpu->arch.magic_page_pa & 0xf000) != in kvmppc_kvm_pv()
212 ((ulong)vcpu->arch.shared & 0xf000)) { in kvmppc_kvm_pv()
213 void *old_shared = vcpu->arch.shared; in kvmppc_kvm_pv()
214 ulong shared = (ulong)vcpu->arch.shared; in kvmppc_kvm_pv()
218 shared |= vcpu->arch.magic_page_pa & 0xf000; in kvmppc_kvm_pv()
221 vcpu->arch.shared = new_shared; in kvmppc_kvm_pv()
258 if (!vcpu->arch.pvr) in kvmppc_sanity_check()
262 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) in kvmppc_sanity_check()
266 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) in kvmppc_sanity_check()
277 vcpu->arch.sane = r; in kvmppc_sanity_check()
278 return r ? 0 : -EINVAL; in kvmppc_sanity_check()
290 /* Future optimization: only reload non-volatiles if they were in kvmppc_emulate_mmio()
298 vcpu->run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio()
301 /* Future optimization: only reload non-volatiles if they were in kvmppc_emulate_mmio()
321 if (vcpu->mmio_is_write) in kvmppc_emulate_mmio()
326 vcpu->arch.vaddr_accessed, dsisr); in kvmppc_emulate_mmio()
351 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_st()
353 int r = -EINVAL; in kvmppc_st()
355 vcpu->stat.st++; in kvmppc_st()
357 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->store_to_eaddr) in kvmppc_st()
358 r = vcpu->kvm->arch.kvm_ops->store_to_eaddr(vcpu, eaddr, ptr, in kvmppc_st()
361 if ((!r) || (r == -EAGAIN)) in kvmppc_st()
372 return -EPERM; in kvmppc_st()
378 void *magic = vcpu->arch.shared; in kvmppc_st()
384 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) in kvmppc_st()
394 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; in kvmppc_ld()
396 int rc = -EINVAL; in kvmppc_ld()
398 vcpu->stat.ld++; in kvmppc_ld()
400 if (vcpu->kvm->arch.kvm_ops && vcpu->kvm->arch.kvm_ops->load_from_eaddr) in kvmppc_ld()
401 rc = vcpu->kvm->arch.kvm_ops->load_from_eaddr(vcpu, eaddr, ptr, in kvmppc_ld()
404 if ((!rc) || (rc == -EAGAIN)) in kvmppc_ld()
415 return -EPERM; in kvmppc_ld()
418 return -ENOEXEC; in kvmppc_ld()
424 void *magic = vcpu->arch.shared; in kvmppc_ld()
431 rc = kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size); in kvmppc_ld()
466 if (!try_module_get(kvm_ops->owner)) in kvm_arch_init_vm()
467 return -ENOENT; in kvm_arch_init_vm()
469 kvm->arch.kvm_ops = kvm_ops; in kvm_arch_init_vm()
472 module_put(kvm_ops->owner); in kvm_arch_init_vm()
475 return -EINVAL; in kvm_arch_init_vm()
492 mutex_lock(&kvm->lock); in kvm_arch_destroy_vm()
496 mutex_unlock(&kvm->lock); in kvm_arch_destroy_vm()
499 module_put(kvm->arch.kvm_ops->owner); in kvm_arch_destroy_vm()
510 * Hooray - we know which VM type we're running on. Depend on in kvm_vm_ioctl_check_extension()
591 if (kvm->arch.emul_smt_mode > 1) in kvm_vm_ioctl_check_extension()
592 r = kvm->arch.emul_smt_mode; in kvm_vm_ioctl_check_extension()
594 r = kvm->arch.smt_mode; in kvm_vm_ioctl_check_extension()
606 r = ((threads_per_subcore << 1) - 1); in kvm_vm_ioctl_check_extension()
619 r = !!(hv_enabled && kvmppc_hv_ops->hash_v3_possible && in kvm_vm_ioctl_check_extension()
620 kvmppc_hv_ops->hash_v3_possible()); in kvm_vm_ioctl_check_extension()
623 r = !!(hv_enabled && kvmppc_hv_ops->enable_nested && in kvm_vm_ioctl_check_extension()
624 !kvmppc_hv_ops->enable_nested(NULL)); in kvm_vm_ioctl_check_extension()
639 * return the number of present CPUs for -HV (since a host in kvm_vm_ioctl_check_extension()
672 r = !!(cur_cpu_spec->cpu_user_features2 & PPC_FEATURE2_HTM) || in kvm_vm_ioctl_check_extension()
678 r = hv_enabled && kvmppc_hv_ops->enable_svm && in kvm_vm_ioctl_check_extension()
679 !kvmppc_hv_ops->enable_svm(NULL); in kvm_vm_ioctl_check_extension()
682 r = !!(hv_enabled && kvmppc_hv_ops->enable_dawr1 && in kvm_vm_ioctl_check_extension()
683 !kvmppc_hv_ops->enable_dawr1(NULL)); in kvm_vm_ioctl_check_extension()
717 return -EINVAL; in kvm_arch_dev_ioctl()
766 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); in kvm_arch_vcpu_create()
767 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; in kvm_arch_vcpu_create()
770 mutex_init(&vcpu->arch.exit_timing_lock); in kvm_arch_vcpu_create()
780 rcuwait_init(&vcpu->arch.wait); in kvm_arch_vcpu_create()
781 vcpu->arch.waitp = &vcpu->arch.wait; in kvm_arch_vcpu_create()
796 hrtimer_cancel(&vcpu->arch.dec_timer); in kvm_arch_vcpu_destroy()
798 switch (vcpu->arch.irq_type) { in kvm_arch_vcpu_destroy()
800 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); in kvm_arch_vcpu_destroy()
830 * On non-booke this is associated with Altivec and in kvm_arch_vcpu_load()
833 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); in kvm_arch_vcpu_load()
842 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); in kvm_arch_vcpu_put()
854 return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || in kvm_arch_has_irq_bypass()
855 (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); in kvm_arch_has_irq_bypass()
863 struct kvm *kvm = irqfd->kvm; in kvm_arch_irq_bypass_add_producer()
865 if (kvm->arch.kvm_ops->irq_bypass_add_producer) in kvm_arch_irq_bypass_add_producer()
866 return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); in kvm_arch_irq_bypass_add_producer()
876 struct kvm *kvm = irqfd->kvm; in kvm_arch_irq_bypass_del_producer()
878 if (kvm->arch.kvm_ops->irq_bypass_del_producer) in kvm_arch_irq_bypass_del_producer()
879 kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); in kvm_arch_irq_bypass_del_producer()
888 return -1; in kvmppc_get_vsr_dword_offset()
893 offset = 1 - index; in kvmppc_get_vsr_dword_offset()
904 return -1; in kvmppc_get_vsr_word_offset()
909 offset = 3 - index; in kvmppc_get_vsr_word_offset()
918 int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_dword()
919 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword()
921 if (offset == -1) in kvmppc_set_vsr_dword()
925 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword()
927 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword()
937 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_dword_dump()
940 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword_dump()
943 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_dword_dump()
954 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word_dump()
961 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word_dump()
974 int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_set_vsr_word()
975 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vsr_word()
978 if (offset == -1) in kvmppc_set_vsr_word()
982 kvmppc_get_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word()
984 kvmppc_set_vsx_vr(vcpu, index - 32, &val.vval); in kvmppc_set_vsr_word()
1003 return -1; in kvmppc_get_vmx_offset_generic()
1006 offset = elts - index - 1; in kvmppc_get_vmx_offset_generic()
1043 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_dword()
1044 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_dword()
1046 if (offset == -1) in kvmppc_set_vmx_dword()
1059 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_word()
1060 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_word()
1062 if (offset == -1) in kvmppc_set_vmx_word()
1075 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_hword()
1076 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_hword()
1078 if (offset == -1) in kvmppc_set_vmx_hword()
1091 vcpu->arch.mmio_vmx_offset); in kvmppc_set_vmx_byte()
1092 int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; in kvmppc_set_vmx_byte()
1094 if (offset == -1) in kvmppc_set_vmx_byte()
1135 struct kvm_run *run = vcpu->run; in kvmppc_complete_mmio_load()
1138 if (run->mmio.len > sizeof(gpr)) in kvmppc_complete_mmio_load()
1141 if (!vcpu->arch.mmio_host_swabbed) { in kvmppc_complete_mmio_load()
1142 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
1143 case 8: gpr = *(u64 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1144 case 4: gpr = *(u32 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1145 case 2: gpr = *(u16 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1146 case 1: gpr = *(u8 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1149 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
1150 case 8: gpr = swab64(*(u64 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
1151 case 4: gpr = swab32(*(u32 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
1152 case 2: gpr = swab16(*(u16 *)run->mmio.data); break; in kvmppc_complete_mmio_load()
1153 case 1: gpr = *(u8 *)run->mmio.data; break; in kvmppc_complete_mmio_load()
1158 if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) in kvmppc_complete_mmio_load()
1161 if (vcpu->arch.mmio_sign_extend) { in kvmppc_complete_mmio_load()
1162 switch (run->mmio.len) { in kvmppc_complete_mmio_load()
1177 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { in kvmppc_complete_mmio_load()
1179 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); in kvmppc_complete_mmio_load()
1182 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1183 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_FP); in kvmppc_complete_mmio_load()
1185 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1189 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1192 kvmppc_set_fpr(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK, gpr); in kvmppc_complete_mmio_load()
1193 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; in kvmppc_complete_mmio_load()
1198 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1199 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VSX); in kvmppc_complete_mmio_load()
1201 if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_DWORD) in kvmppc_complete_mmio_load()
1203 else if (vcpu->arch.mmio_copy_type == KVMPPC_VSX_COPY_WORD) in kvmppc_complete_mmio_load()
1205 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1208 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1215 if (vcpu->kvm->arch.kvm_ops->giveup_ext) in kvmppc_complete_mmio_load()
1216 vcpu->kvm->arch.kvm_ops->giveup_ext(vcpu, MSR_VEC); in kvmppc_complete_mmio_load()
1218 if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_DWORD) in kvmppc_complete_mmio_load()
1220 else if (vcpu->arch.mmio_copy_type == KVMPPC_VMX_COPY_WORD) in kvmppc_complete_mmio_load()
1222 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1225 else if (vcpu->arch.mmio_copy_type == in kvmppc_complete_mmio_load()
1234 kvm_vcpu_write_guest(vcpu, vcpu->arch.nested_io_gpr, &gpr, in kvmppc_complete_mmio_load()
1247 struct kvm_run *run = vcpu->run; in __kvmppc_handle_load()
1258 if (bytes > sizeof(run->mmio.data)) in __kvmppc_handle_load()
1261 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in __kvmppc_handle_load()
1262 run->mmio.len = bytes; in __kvmppc_handle_load()
1263 run->mmio.is_write = 0; in __kvmppc_handle_load()
1265 vcpu->arch.io_gpr = rt; in __kvmppc_handle_load()
1266 vcpu->arch.mmio_host_swabbed = host_swabbed; in __kvmppc_handle_load()
1267 vcpu->mmio_needed = 1; in __kvmppc_handle_load()
1268 vcpu->mmio_is_write = 0; in __kvmppc_handle_load()
1269 vcpu->arch.mmio_sign_extend = sign_extend; in __kvmppc_handle_load()
1271 idx = srcu_read_lock(&vcpu->kvm->srcu); in __kvmppc_handle_load()
1273 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in __kvmppc_handle_load()
1274 bytes, &run->mmio.data); in __kvmppc_handle_load()
1276 srcu_read_unlock(&vcpu->kvm->srcu, idx); in __kvmppc_handle_load()
1280 vcpu->mmio_needed = 0; in __kvmppc_handle_load()
1311 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_load()
1314 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_load()
1321 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_load()
1323 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_load()
1324 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_load()
1333 struct kvm_run *run = vcpu->run; in kvmppc_handle_store()
1334 void *data = run->mmio.data; in kvmppc_handle_store()
1345 if (bytes > sizeof(run->mmio.data)) in kvmppc_handle_store()
1348 run->mmio.phys_addr = vcpu->arch.paddr_accessed; in kvmppc_handle_store()
1349 run->mmio.len = bytes; in kvmppc_handle_store()
1350 run->mmio.is_write = 1; in kvmppc_handle_store()
1351 vcpu->mmio_needed = 1; in kvmppc_handle_store()
1352 vcpu->mmio_is_write = 1; in kvmppc_handle_store()
1354 if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) in kvmppc_handle_store()
1374 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_store()
1376 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, in kvmppc_handle_store()
1377 bytes, &run->mmio.data); in kvmppc_handle_store()
1379 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvmppc_handle_store()
1382 vcpu->mmio_needed = 0; in kvmppc_handle_store()
1396 int copy_type = vcpu->arch.mmio_copy_type; in kvmppc_get_vsr_data()
1402 kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1404 if (vsx_offset == -1) { in kvmppc_get_vsr_data()
1405 result = -1; in kvmppc_get_vsr_data()
1412 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); in kvmppc_get_vsr_data()
1419 kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); in kvmppc_get_vsr_data()
1421 if (vsx_offset == -1) { in kvmppc_get_vsr_data()
1422 result = -1; in kvmppc_get_vsr_data()
1432 kvmppc_get_vsx_vr(vcpu, rs - 32, ®.vval); in kvmppc_get_vsr_data()
1438 result = -1; in kvmppc_get_vsr_data()
1451 vcpu->arch.io_gpr = rs; in kvmppc_handle_vsx_store()
1454 if (vcpu->arch.mmio_vsx_copy_nums > 4) in kvmppc_handle_vsx_store()
1457 while (vcpu->arch.mmio_vsx_copy_nums) { in kvmppc_handle_vsx_store()
1458 if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) in kvmppc_handle_vsx_store()
1467 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vsx_store()
1469 vcpu->arch.mmio_vsx_copy_nums--; in kvmppc_handle_vsx_store()
1470 vcpu->arch.mmio_vsx_offset++; in kvmppc_handle_vsx_store()
1478 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vsx_loadstore()
1482 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vsx_loadstore()
1484 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vsx_loadstore()
1485 emulated = kvmppc_handle_vsx_load(vcpu, vcpu->arch.io_gpr, in kvmppc_emulate_mmio_vsx_loadstore()
1486 run->mmio.len, 1, vcpu->arch.mmio_sign_extend); in kvmppc_emulate_mmio_vsx_loadstore()
1489 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vsx_loadstore()
1494 run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio_vsx_loadstore()
1498 pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); in kvmppc_emulate_mmio_vsx_loadstore()
1499 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_emulate_mmio_vsx_loadstore()
1500 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvmppc_emulate_mmio_vsx_loadstore()
1517 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_load()
1520 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_load()
1527 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_load()
1528 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_load()
1529 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_load()
1542 kvmppc_get_vmx_dword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_dword()
1544 if (vmx_offset == -1) in kvmppc_get_vmx_dword()
1545 return -1; in kvmppc_get_vmx_dword()
1560 kvmppc_get_vmx_word_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_word()
1562 if (vmx_offset == -1) in kvmppc_get_vmx_word()
1563 return -1; in kvmppc_get_vmx_word()
1578 kvmppc_get_vmx_hword_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_hword()
1580 if (vmx_offset == -1) in kvmppc_get_vmx_hword()
1581 return -1; in kvmppc_get_vmx_hword()
1596 kvmppc_get_vmx_byte_offset(vcpu, vcpu->arch.mmio_vmx_offset); in kvmppc_get_vmx_byte()
1598 if (vmx_offset == -1) in kvmppc_get_vmx_byte()
1599 return -1; in kvmppc_get_vmx_byte()
1614 if (vcpu->arch.mmio_vmx_copy_nums > 2) in kvmppc_handle_vmx_store()
1617 vcpu->arch.io_gpr = rs; in kvmppc_handle_vmx_store()
1619 while (vcpu->arch.mmio_vmx_copy_nums) { in kvmppc_handle_vmx_store()
1620 switch (vcpu->arch.mmio_copy_type) { in kvmppc_handle_vmx_store()
1622 if (kvmppc_get_vmx_dword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1627 if (kvmppc_get_vmx_word(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1631 if (kvmppc_get_vmx_hword(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1635 if (kvmppc_get_vmx_byte(vcpu, index, &val) == -1) in kvmppc_handle_vmx_store()
1647 vcpu->arch.paddr_accessed += vcpu->run->mmio.len; in kvmppc_handle_vmx_store()
1648 vcpu->arch.mmio_vmx_copy_nums--; in kvmppc_handle_vmx_store()
1649 vcpu->arch.mmio_vmx_offset++; in kvmppc_handle_vmx_store()
1657 struct kvm_run *run = vcpu->run; in kvmppc_emulate_mmio_vmx_loadstore()
1661 vcpu->arch.paddr_accessed += run->mmio.len; in kvmppc_emulate_mmio_vmx_loadstore()
1663 if (!vcpu->mmio_is_write) { in kvmppc_emulate_mmio_vmx_loadstore()
1665 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1668 vcpu->arch.io_gpr, run->mmio.len, 1); in kvmppc_emulate_mmio_vmx_loadstore()
1673 run->exit_reason = KVM_EXIT_MMIO; in kvmppc_emulate_mmio_vmx_loadstore()
1677 pr_info("KVM: MMIO emulation failed (VMX repeat)\n"); in kvmppc_emulate_mmio_vmx_loadstore()
1678 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_emulate_mmio_vmx_loadstore()
1679 run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; in kvmppc_emulate_mmio_vmx_loadstore()
1696 size = one_reg_size(reg->id); in kvm_vcpu_ioctl_get_one_reg()
1698 return -EINVAL; in kvm_vcpu_ioctl_get_one_reg()
1700 r = kvmppc_get_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_get_one_reg()
1701 if (r == -EINVAL) { in kvm_vcpu_ioctl_get_one_reg()
1703 switch (reg->id) { in kvm_vcpu_ioctl_get_one_reg()
1707 r = -ENXIO; in kvm_vcpu_ioctl_get_one_reg()
1710 kvmppc_get_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); in kvm_vcpu_ioctl_get_one_reg()
1714 r = -ENXIO; in kvm_vcpu_ioctl_get_one_reg()
1717 val = get_reg_val(reg->id, kvmppc_get_vscr(vcpu)); in kvm_vcpu_ioctl_get_one_reg()
1720 val = get_reg_val(reg->id, kvmppc_get_vrsave(vcpu)); in kvm_vcpu_ioctl_get_one_reg()
1724 r = -EINVAL; in kvm_vcpu_ioctl_get_one_reg()
1732 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) in kvm_vcpu_ioctl_get_one_reg()
1733 r = -EFAULT; in kvm_vcpu_ioctl_get_one_reg()
1744 size = one_reg_size(reg->id); in kvm_vcpu_ioctl_set_one_reg()
1746 return -EINVAL; in kvm_vcpu_ioctl_set_one_reg()
1748 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) in kvm_vcpu_ioctl_set_one_reg()
1749 return -EFAULT; in kvm_vcpu_ioctl_set_one_reg()
1751 r = kvmppc_set_one_reg(vcpu, reg->id, &val); in kvm_vcpu_ioctl_set_one_reg()
1752 if (r == -EINVAL) { in kvm_vcpu_ioctl_set_one_reg()
1754 switch (reg->id) { in kvm_vcpu_ioctl_set_one_reg()
1758 r = -ENXIO; in kvm_vcpu_ioctl_set_one_reg()
1761 kvmppc_set_vsx_vr(vcpu, reg->id - KVM_REG_PPC_VR0, &val.vval); in kvm_vcpu_ioctl_set_one_reg()
1765 r = -ENXIO; in kvm_vcpu_ioctl_set_one_reg()
1768 kvmppc_set_vscr(vcpu, set_reg_val(reg->id, val)); in kvm_vcpu_ioctl_set_one_reg()
1772 r = -ENXIO; in kvm_vcpu_ioctl_set_one_reg()
1775 kvmppc_set_vrsave(vcpu, set_reg_val(reg->id, val)); in kvm_vcpu_ioctl_set_one_reg()
1779 r = -EINVAL; in kvm_vcpu_ioctl_set_one_reg()
1789 struct kvm_run *run = vcpu->run; in kvm_arch_vcpu_ioctl_run()
1794 if (vcpu->mmio_needed) { in kvm_arch_vcpu_ioctl_run()
1795 vcpu->mmio_needed = 0; in kvm_arch_vcpu_ioctl_run()
1796 if (!vcpu->mmio_is_write) in kvm_arch_vcpu_ioctl_run()
1799 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1800 vcpu->arch.mmio_vsx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1801 vcpu->arch.mmio_vsx_offset++; in kvm_arch_vcpu_ioctl_run()
1804 if (vcpu->arch.mmio_vsx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1807 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1813 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1814 vcpu->arch.mmio_vmx_copy_nums--; in kvm_arch_vcpu_ioctl_run()
1815 vcpu->arch.mmio_vmx_offset++; in kvm_arch_vcpu_ioctl_run()
1818 if (vcpu->arch.mmio_vmx_copy_nums > 0) { in kvm_arch_vcpu_ioctl_run()
1821 vcpu->mmio_needed = 1; in kvm_arch_vcpu_ioctl_run()
1826 } else if (vcpu->arch.osi_needed) { in kvm_arch_vcpu_ioctl_run()
1827 u64 *gprs = run->osi.gprs; in kvm_arch_vcpu_ioctl_run()
1832 vcpu->arch.osi_needed = 0; in kvm_arch_vcpu_ioctl_run()
1833 } else if (vcpu->arch.hcall_needed) { in kvm_arch_vcpu_ioctl_run()
1836 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); in kvm_arch_vcpu_ioctl_run()
1838 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); in kvm_arch_vcpu_ioctl_run()
1839 vcpu->arch.hcall_needed = 0; in kvm_arch_vcpu_ioctl_run()
1841 } else if (vcpu->arch.epr_needed) { in kvm_arch_vcpu_ioctl_run()
1842 kvmppc_set_epr(vcpu, run->epr.epr); in kvm_arch_vcpu_ioctl_run()
1843 vcpu->arch.epr_needed = 0; in kvm_arch_vcpu_ioctl_run()
1849 if (!vcpu->wants_to_run) in kvm_arch_vcpu_ioctl_run()
1850 r = -EINTR; in kvm_arch_vcpu_ioctl_run()
1873 if (irq->irq == KVM_INTERRUPT_UNSET) { in kvm_vcpu_ioctl_interrupt()
1890 if (cap->flags) in kvm_vcpu_ioctl_enable_cap()
1891 return -EINVAL; in kvm_vcpu_ioctl_enable_cap()
1893 switch (cap->cap) { in kvm_vcpu_ioctl_enable_cap()
1896 vcpu->arch.osi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1900 vcpu->arch.papr_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1904 if (cap->args[0]) in kvm_vcpu_ioctl_enable_cap()
1905 vcpu->arch.epr_flags |= KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1907 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; in kvm_vcpu_ioctl_enable_cap()
1912 vcpu->arch.watchdog_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1917 struct kvm_config_tlb cfg; in kvm_vcpu_ioctl_enable_cap() local
1918 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; in kvm_vcpu_ioctl_enable_cap()
1920 r = -EFAULT; in kvm_vcpu_ioctl_enable_cap()
1921 if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) in kvm_vcpu_ioctl_enable_cap()
1924 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); in kvm_vcpu_ioctl_enable_cap()
1930 CLASS(fd, f)(cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
1933 r = -EBADF; in kvm_vcpu_ioctl_enable_cap()
1937 r = -EPERM; in kvm_vcpu_ioctl_enable_cap()
1940 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1947 CLASS(fd, f)(cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
1950 r = -EBADF; in kvm_vcpu_ioctl_enable_cap()
1954 r = -EPERM; in kvm_vcpu_ioctl_enable_cap()
1958 r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1960 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1967 CLASS(fd, f)(cap->args[0]); in kvm_vcpu_ioctl_enable_cap()
1970 r = -EBADF; in kvm_vcpu_ioctl_enable_cap()
1974 r = -ENXIO; in kvm_vcpu_ioctl_enable_cap()
1978 r = -EPERM; in kvm_vcpu_ioctl_enable_cap()
1982 cap->args[1]); in kvm_vcpu_ioctl_enable_cap()
1988 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
1989 if (!is_kvmppc_hv_enabled(vcpu->kvm)) in kvm_vcpu_ioctl_enable_cap()
1992 vcpu->kvm->arch.fwnmi_enabled = true; in kvm_vcpu_ioctl_enable_cap()
1996 r = -EINVAL; in kvm_vcpu_ioctl_enable_cap()
2009 if (kvm->arch.mpic) in kvm_arch_intc_initialized()
2013 if (kvm->arch.xics || kvm->arch.xive) in kvm_arch_intc_initialized()
2022 return -EINVAL; in kvm_arch_vcpu_ioctl_get_mpstate()
2028 return -EINVAL; in kvm_arch_vcpu_ioctl_set_mpstate()
2034 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_async_ioctl()
2040 return -EFAULT; in kvm_arch_vcpu_async_ioctl()
2043 return -ENOIOCTLCMD; in kvm_arch_vcpu_async_ioctl()
2049 struct kvm_vcpu *vcpu = filp->private_data; in kvm_arch_vcpu_ioctl()
2057 r = -EFAULT; in kvm_arch_vcpu_ioctl()
2070 r = -EFAULT; in kvm_arch_vcpu_ioctl()
2083 r = -EFAULT; in kvm_arch_vcpu_ioctl()
2093 r = -EINVAL; in kvm_arch_vcpu_ioctl()
2110 pvinfo->hcall[0] = cpu_to_be32(inst_sc1); in kvm_vm_ioctl_get_pvinfo()
2111 pvinfo->hcall[1] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2112 pvinfo->hcall[2] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2113 pvinfo->hcall[3] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2129 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); in kvm_vm_ioctl_get_pvinfo()
2130 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); in kvm_vm_ioctl_get_pvinfo()
2131 pvinfo->hcall[2] = cpu_to_be32(inst_sc); in kvm_vm_ioctl_get_pvinfo()
2132 pvinfo->hcall[3] = cpu_to_be32(inst_nop); in kvm_vm_ioctl_get_pvinfo()
2135 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; in kvm_vm_ioctl_get_pvinfo()
2145 ret = ret || (kvm->arch.mpic != NULL); in kvm_arch_irqchip_in_kernel()
2148 ret = ret || (kvm->arch.xics != NULL); in kvm_arch_irqchip_in_kernel()
2149 ret = ret || (kvm->arch.xive != NULL); in kvm_arch_irqchip_in_kernel()
2159 return -ENXIO; in kvm_vm_ioctl_irq_line()
2161 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, in kvm_vm_ioctl_irq_line()
2162 irq_event->irq, irq_event->level, in kvm_vm_ioctl_irq_line()
2173 if (cap->flags) in kvm_vm_ioctl_enable_cap()
2174 return -EINVAL; in kvm_vm_ioctl_enable_cap()
2176 switch (cap->cap) { in kvm_vm_ioctl_enable_cap()
2179 unsigned long hcall = cap->args[0]; in kvm_vm_ioctl_enable_cap()
2181 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2183 cap->args[1] > 1) in kvm_vm_ioctl_enable_cap()
2187 if (cap->args[1]) in kvm_vm_ioctl_enable_cap()
2188 set_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2190 clear_bit(hcall / 4, kvm->arch.enabled_hcalls); in kvm_vm_ioctl_enable_cap()
2195 unsigned long mode = cap->args[0]; in kvm_vm_ioctl_enable_cap()
2196 unsigned long flags = cap->args[1]; in kvm_vm_ioctl_enable_cap()
2198 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2199 if (kvm->arch.kvm_ops->set_smt_mode) in kvm_vm_ioctl_enable_cap()
2200 r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); in kvm_vm_ioctl_enable_cap()
2205 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2207 !kvm->arch.kvm_ops->enable_nested) in kvm_vm_ioctl_enable_cap()
2209 r = kvm->arch.kvm_ops->enable_nested(kvm); in kvm_vm_ioctl_enable_cap()
2214 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2215 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_svm) in kvm_vm_ioctl_enable_cap()
2217 r = kvm->arch.kvm_ops->enable_svm(kvm); in kvm_vm_ioctl_enable_cap()
2220 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2221 if (!is_kvmppc_hv_enabled(kvm) || !kvm->arch.kvm_ops->enable_dawr1) in kvm_vm_ioctl_enable_cap()
2223 r = kvm->arch.kvm_ops->enable_dawr1(kvm); in kvm_vm_ioctl_enable_cap()
2227 r = -EINVAL; in kvm_vm_ioctl_enable_cap()
2250 return -ENOTTY; in pseries_get_cpu_char()
2254 cp->character = c.character; in pseries_get_cpu_char()
2255 cp->behaviour = c.behaviour; in pseries_get_cpu_char()
2256 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | in pseries_get_cpu_char()
2265 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | in pseries_get_cpu_char()
2275 return -ENOTTY; in pseries_get_cpu_char()
2300 if (r != -ENOTTY) in kvmppc_get_cpu_char()
2305 fw_features = of_get_child_by_name(np, "fw-features"); in kvmppc_get_cpu_char()
2310 "inst-spec-barrier-ori31,31,0")) in kvmppc_get_cpu_char()
2311 cp->character |= KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31; in kvmppc_get_cpu_char()
2313 "fw-bcctrl-serialized")) in kvmppc_get_cpu_char()
2314 cp->character |= KVM_PPC_CPU_CHAR_BCCTRL_SERIALISED; in kvmppc_get_cpu_char()
2316 "inst-l1d-flush-ori30,30,0")) in kvmppc_get_cpu_char()
2317 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_ORI30; in kvmppc_get_cpu_char()
2319 "inst-l1d-flush-trig2")) in kvmppc_get_cpu_char()
2320 cp->character |= KVM_PPC_CPU_CHAR_L1D_FLUSH_TRIG2; in kvmppc_get_cpu_char()
2322 "fw-l1d-thread-split")) in kvmppc_get_cpu_char()
2323 cp->character |= KVM_PPC_CPU_CHAR_L1D_THREAD_PRIV; in kvmppc_get_cpu_char()
2325 "fw-count-cache-disabled")) in kvmppc_get_cpu_char()
2326 cp->character |= KVM_PPC_CPU_CHAR_COUNT_CACHE_DIS; in kvmppc_get_cpu_char()
2328 "fw-count-cache-flush-bcctr2,0,0")) in kvmppc_get_cpu_char()
2329 cp->character |= KVM_PPC_CPU_CHAR_BCCTR_FLUSH_ASSIST; in kvmppc_get_cpu_char()
2330 cp->character_mask = KVM_PPC_CPU_CHAR_SPEC_BAR_ORI31 | in kvmppc_get_cpu_char()
2339 "speculation-policy-favor-security")) in kvmppc_get_cpu_char()
2340 cp->behaviour |= KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY; in kvmppc_get_cpu_char()
2342 "needs-l1d-flush-msr-pr-0-to-1")) in kvmppc_get_cpu_char()
2343 cp->behaviour |= KVM_PPC_CPU_BEHAV_L1D_FLUSH_PR; in kvmppc_get_cpu_char()
2345 "needs-spec-barrier-for-bound-checks")) in kvmppc_get_cpu_char()
2346 cp->behaviour |= KVM_PPC_CPU_BEHAV_BNDS_CHK_SPEC_BAR; in kvmppc_get_cpu_char()
2348 "needs-count-cache-flush-on-context-switch")) in kvmppc_get_cpu_char()
2349 cp->behaviour |= KVM_PPC_CPU_BEHAV_FLUSH_COUNT_CACHE; in kvmppc_get_cpu_char()
2350 cp->behaviour_mask = KVM_PPC_CPU_BEHAV_FAVOUR_SECURITY | in kvmppc_get_cpu_char()
2364 struct kvm *kvm __maybe_unused = filp->private_data; in kvm_arch_vm_ioctl()
2374 r = -EFAULT; in kvm_arch_vm_ioctl()
2384 r = -EFAULT; in kvm_arch_vm_ioctl()
2388 r = -EINVAL; in kvm_arch_vm_ioctl()
2398 r = -EFAULT; in kvm_arch_vm_ioctl()
2415 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2418 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2420 r = -EFAULT; in kvm_arch_vm_ioctl()
2424 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2430 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2431 struct kvm_ppc_mmuv3_cfg cfg; in kvm_arch_vm_ioctl() local
2433 r = -EINVAL; in kvm_arch_vm_ioctl()
2434 if (!kvm->arch.kvm_ops->configure_mmu) in kvm_arch_vm_ioctl()
2436 r = -EFAULT; in kvm_arch_vm_ioctl()
2437 if (copy_from_user(&cfg, argp, sizeof(cfg))) in kvm_arch_vm_ioctl()
2439 r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); in kvm_arch_vm_ioctl()
2443 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2446 r = -EINVAL; in kvm_arch_vm_ioctl()
2447 if (!kvm->arch.kvm_ops->get_rmmu_info) in kvm_arch_vm_ioctl()
2449 r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); in kvm_arch_vm_ioctl()
2451 r = -EFAULT; in kvm_arch_vm_ioctl()
2459 r = -EFAULT; in kvm_arch_vm_ioctl()
2463 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2466 if (!kvm->arch.kvm_ops->svm_off) in kvm_arch_vm_ioctl()
2469 r = kvm->arch.kvm_ops->svm_off(kvm); in kvm_arch_vm_ioctl()
2473 struct kvm *kvm = filp->private_data; in kvm_arch_vm_ioctl()
2474 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); in kvm_arch_vm_ioctl()
2478 r = -ENOTTY; in kvm_arch_vm_ioctl()
2493 lpid = ida_alloc_range(&lpid_inuse, 1, nr_lpids - 1, GFP_KERNEL); in kvmppc_alloc_lpid()
2495 if (lpid == -ENOMEM) in kvmppc_alloc_lpid()
2499 return -ENOMEM; in kvmppc_alloc_lpid()
2523 if (vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs) in kvm_arch_create_vcpu_debugfs()
2524 vcpu->kvm->arch.kvm_ops->create_vcpu_debugfs(vcpu, debugfs_dentry); in kvm_arch_create_vcpu_debugfs()
2529 if (kvm->arch.kvm_ops->create_vm_debugfs) in kvm_arch_create_vm_debugfs()
2530 kvm->arch.kvm_ops->create_vm_debugfs(kvm); in kvm_arch_create_vm_debugfs()