Lines Matching full:arch
39 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_save_host_pmu()
54 context = this_cpu_ptr(vcpu->kvm->arch.vmcs); in kvm_restore_host_pmu()
68 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_save_guest_pmu()
82 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_restore_guest_pmu()
98 if (!kvm_guest_has_pmu(&vcpu->arch)) in kvm_own_pmu()
105 val |= (kvm_get_pmu_num(&vcpu->arch) + 1) << CSR_GCFG_GPERF_SHIFT; in kvm_own_pmu()
116 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_lose_pmu()
118 if (!(vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_lose_pmu()
136 vcpu->arch.aux_inuse &= ~KVM_LARCH_PMU; in kvm_lose_pmu()
143 if ((vcpu->arch.aux_inuse & KVM_LARCH_PMU)) in kvm_restore_pmu()
151 vcpu->arch.aux_inuse |= KVM_LARCH_PMU; in kvm_check_pmu()
164 ghc = &vcpu->arch.st.cache; in kvm_update_stolen_time()
165 gpa = vcpu->arch.st.guest_addr; in kvm_update_stolen_time()
188 steal += current->sched_info.run_delay - vcpu->arch.st.last_steal; in kvm_update_stolen_time()
189 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_update_stolen_time()
211 vcpu->arch.vpid = 0; /* Drop vpid for this vCPU */ in kvm_check_requests()
226 if (vcpu->arch.flush_gpa != INVALID_GPA) { in kvm_late_check_requests()
227 kvm_flush_tlb_gpa(vcpu, vcpu->arch.flush_gpa); in kvm_late_check_requests()
228 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_late_check_requests()
292 vcpu->arch.host_eentry = csr_read64(LOONGARCH_CSR_EENTRY); in kvm_pre_enter_guest()
294 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in kvm_pre_enter_guest()
313 unsigned long estat = vcpu->arch.host_estat; in kvm_handle_exit()
353 return !!(vcpu->arch.irq_pending) && in kvm_arch_vcpu_runnable()
354 vcpu->arch.mp_state.mp_state == KVM_MP_STATE_RUNNABLE; in kvm_arch_vcpu_runnable()
396 kvm_debug("\tPC = 0x%08lx\n", vcpu->arch.pc); in kvm_arch_vcpu_dump_regs()
397 kvm_debug("\tExceptions: %08lx\n", vcpu->arch.irq_pending); in kvm_arch_vcpu_dump_regs()
401 vcpu->arch.gprs[i], vcpu->arch.gprs[i + 1], in kvm_arch_vcpu_dump_regs()
402 vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); in kvm_arch_vcpu_dump_regs()
417 *mp_state = vcpu->arch.mp_state; in kvm_arch_vcpu_ioctl_get_mpstate()
429 vcpu->arch.mp_state = *mp_state; in kvm_arch_vcpu_ioctl_set_mpstate()
456 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_set_cpuid()
461 map = vcpu->kvm->arch.phyid_map; in kvm_set_cpuid()
464 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
468 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
476 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
483 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
491 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
498 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_set_cpuid()
507 struct loongarch_csrs *csr = vcpu->arch.csr; in kvm_drop_cpuid()
509 map = vcpu->kvm->arch.phyid_map; in kvm_drop_cpuid()
515 spin_lock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
521 spin_unlock(&vcpu->kvm->arch.phyid_map_lock); in kvm_drop_cpuid()
531 map = kvm->arch.phyid_map; in kvm_get_vcpu_by_cpuid()
541 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_getcsr()
554 vcpu->arch.aux_inuse &= ~KVM_LARCH_SWCSR_LATEST; in _kvm_getcsr()
576 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_setcsr()
741 *v = vcpu->arch.cpucfg[id]; in kvm_get_one_reg()
746 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_get_one_reg()
751 *v = vcpu->arch.lbt.scr0; in kvm_get_one_reg()
754 *v = vcpu->arch.lbt.scr1; in kvm_get_one_reg()
757 *v = vcpu->arch.lbt.scr2; in kvm_get_one_reg()
760 *v = vcpu->arch.lbt.scr3; in kvm_get_one_reg()
763 *v = vcpu->arch.lbt.eflags; in kvm_get_one_reg()
766 *v = vcpu->arch.fpu.ftop; in kvm_get_one_reg()
776 *v = drdtime() + vcpu->kvm->arch.time_offset; in kvm_get_one_reg()
830 vcpu->arch.cpucfg[id] = (u32)v; in kvm_set_one_reg()
832 vcpu->arch.max_pmu_csrid = in kvm_set_one_reg()
833 LOONGARCH_CSR_PERFCTRL0 + 2 * kvm_get_pmu_num(&vcpu->arch) + 1; in kvm_set_one_reg()
836 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_set_one_reg()
841 vcpu->arch.lbt.scr0 = v; in kvm_set_one_reg()
844 vcpu->arch.lbt.scr1 = v; in kvm_set_one_reg()
847 vcpu->arch.lbt.scr2 = v; in kvm_set_one_reg()
850 vcpu->arch.lbt.scr3 = v; in kvm_set_one_reg()
853 vcpu->arch.lbt.eflags = v; in kvm_set_one_reg()
856 vcpu->arch.fpu.ftop = v; in kvm_set_one_reg()
871 vcpu->kvm->arch.time_offset = (signed long)(v - drdtime()); in kvm_set_one_reg()
874 vcpu->arch.st.guest_addr = 0; in kvm_set_one_reg()
875 memset(&vcpu->arch.irq_pending, 0, sizeof(vcpu->arch.irq_pending)); in kvm_set_one_reg()
876 memset(&vcpu->arch.irq_clear, 0, sizeof(vcpu->arch.irq_clear)); in kvm_set_one_reg()
923 for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_get_regs()
924 regs->gpr[i] = vcpu->arch.gprs[i]; in kvm_arch_vcpu_ioctl_get_regs()
926 regs->pc = vcpu->arch.pc; in kvm_arch_vcpu_ioctl_get_regs()
935 for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) in kvm_arch_vcpu_ioctl_set_regs()
936 vcpu->arch.gprs[i] = regs->gpr[i]; in kvm_arch_vcpu_ioctl_set_regs()
938 vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ in kvm_arch_vcpu_ioctl_set_regs()
939 vcpu->arch.pc = regs->pc; in kvm_arch_vcpu_ioctl_set_regs()
1010 val = vcpu->kvm->arch.pv_features & LOONGARCH_PV_FEAT_MASK; in kvm_loongarch_cpucfg_get_attr()
1031 gpa = vcpu->arch.st.guest_addr; in kvm_loongarch_pvtime_get_attr()
1074 if ((kvm->arch.pv_features & LOONGARCH_PV_FEAT_UPDATED) in kvm_loongarch_cpucfg_set_attr()
1075 && ((kvm->arch.pv_features & valid) != val)) in kvm_loongarch_cpucfg_set_attr()
1077 kvm->arch.pv_features = val | LOONGARCH_PV_FEAT_UPDATED; in kvm_loongarch_cpucfg_set_attr()
1102 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1113 vcpu->arch.st.guest_addr = gpa; in kvm_loongarch_pvtime_set_attr()
1114 vcpu->arch.st.last_steal = current->sched_info.run_delay; in kvm_loongarch_pvtime_set_attr()
1156 * be clear in vcpu->arch.aux_inuse, and vcpu_load will check in kvm_arch_vcpu_ioctl()
1170 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in kvm_arch_vcpu_ioctl()
1217 fpu->fcc = vcpu->arch.fpu.fcc; in kvm_arch_vcpu_ioctl_get_fpu()
1218 fpu->fcsr = vcpu->arch.fpu.fcsr; in kvm_arch_vcpu_ioctl_get_fpu()
1220 memcpy(&fpu->fpr[i], &vcpu->arch.fpu.fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_get_fpu()
1229 vcpu->arch.fpu.fcc = fpu->fcc; in kvm_arch_vcpu_ioctl_set_fpu()
1230 vcpu->arch.fpu.fcsr = fpu->fcsr; in kvm_arch_vcpu_ioctl_set_fpu()
1232 memcpy(&vcpu->arch.fpu.fpr[i], &fpu->fpr[i], FPU_REG_WIDTH / 64); in kvm_arch_vcpu_ioctl_set_fpu()
1240 if (!kvm_guest_has_lbt(&vcpu->arch)) in kvm_own_lbt()
1245 _restore_lbt(&vcpu->arch.lbt); in kvm_own_lbt()
1246 vcpu->arch.aux_inuse |= KVM_LARCH_LBT; in kvm_own_lbt()
1255 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) { in kvm_lose_lbt()
1256 _save_lbt(&vcpu->arch.lbt); in kvm_lose_lbt()
1258 vcpu->arch.aux_inuse &= ~KVM_LARCH_LBT; in kvm_lose_lbt()
1275 if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_check_fcsr_alive()
1276 if (vcpu->arch.aux_inuse & KVM_LARCH_LBT) in kvm_check_fcsr_alive()
1296 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_fpu()
1299 kvm_restore_fpu(&vcpu->arch.fpu); in kvm_own_fpu()
1300 vcpu->arch.aux_inuse |= KVM_LARCH_FPU; in kvm_own_fpu()
1310 if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch)) in kvm_own_lsx()
1316 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lsx()
1318 switch (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_own_lsx()
1324 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lsx()
1330 kvm_restore_lsx(&vcpu->arch.fpu); in kvm_own_lsx()
1335 vcpu->arch.aux_inuse |= KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lsx()
1346 …if (!kvm_guest_has_fpu(&vcpu->arch) || !kvm_guest_has_lsx(&vcpu->arch) || !kvm_guest_has_lasx(&vcp… in kvm_own_lasx()
1351 kvm_check_fcsr(vcpu, vcpu->arch.fpu.fcsr); in kvm_own_lasx()
1353 switch (vcpu->arch.aux_inuse & (KVM_LARCH_FPU | KVM_LARCH_LSX)) { in kvm_own_lasx()
1357 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1361 _restore_lsx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1362 _restore_lasx_upper(&vcpu->arch.fpu); in kvm_own_lasx()
1366 kvm_restore_lasx(&vcpu->arch.fpu); in kvm_own_lasx()
1371 vcpu->arch.aux_inuse |= KVM_LARCH_LASX | KVM_LARCH_LSX | KVM_LARCH_FPU; in kvm_own_lasx()
1384 if (vcpu->arch.aux_inuse & KVM_LARCH_LASX) { in kvm_lose_fpu()
1385 kvm_save_lasx(&vcpu->arch.fpu); in kvm_lose_fpu()
1386 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU | KVM_LARCH_LASX); in kvm_lose_fpu()
1391 } else if (vcpu->arch.aux_inuse & KVM_LARCH_LSX) { in kvm_lose_fpu()
1392 kvm_save_lsx(&vcpu->arch.fpu); in kvm_lose_fpu()
1393 vcpu->arch.aux_inuse &= ~(KVM_LARCH_LSX | KVM_LARCH_FPU); in kvm_lose_fpu()
1398 } else if (vcpu->arch.aux_inuse & KVM_LARCH_FPU) { in kvm_lose_fpu()
1399 kvm_save_fpu(&vcpu->arch.fpu); in kvm_lose_fpu()
1400 vcpu->arch.aux_inuse &= ~KVM_LARCH_FPU; in kvm_lose_fpu()
1459 vcpu->arch.vpid = 0; in kvm_arch_vcpu_create()
1460 vcpu->arch.flush_gpa = INVALID_GPA; in kvm_arch_vcpu_create()
1462 hrtimer_init(&vcpu->arch.swtimer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED_HARD); in kvm_arch_vcpu_create()
1463 vcpu->arch.swtimer.function = kvm_swtimer_wakeup; in kvm_arch_vcpu_create()
1465 vcpu->arch.handle_exit = kvm_handle_exit; in kvm_arch_vcpu_create()
1466 vcpu->arch.guest_eentry = (unsigned long)kvm_loongarch_ops->exc_entry; in kvm_arch_vcpu_create()
1467 vcpu->arch.csr = kzalloc(sizeof(struct loongarch_csrs), GFP_KERNEL); in kvm_arch_vcpu_create()
1468 if (!vcpu->arch.csr) in kvm_arch_vcpu_create()
1475 vcpu->arch.host_ecfg = (read_csr_ecfg() & CSR_ECFG_VS); in kvm_arch_vcpu_create()
1478 vcpu->arch.last_sched_cpu = -1; in kvm_arch_vcpu_create()
1481 spin_lock_init(&vcpu->arch.ipi_state.lock); in kvm_arch_vcpu_create()
1490 csr = vcpu->arch.csr; in kvm_arch_vcpu_create()
1512 hrtimer_cancel(&vcpu->arch.swtimer); in kvm_arch_vcpu_destroy()
1513 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
1515 kfree(vcpu->arch.csr); in kvm_arch_vcpu_destroy()
1522 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in kvm_arch_vcpu_destroy()
1532 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_load()
1538 migrated = (vcpu->arch.last_sched_cpu != cpu); in _kvm_vcpu_load()
1544 context = per_cpu_ptr(vcpu->kvm->arch.vmcs, cpu); in _kvm_vcpu_load()
1546 vcpu->arch.aux_inuse &= ~KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1557 if (vcpu->arch.aux_inuse & KVM_LARCH_HWCSR_USABLE) in _kvm_vcpu_load()
1560 write_csr_gcntc((ulong)vcpu->kvm->arch.time_offset); in _kvm_vcpu_load()
1619 vcpu->arch.aux_inuse |= KVM_LARCH_HWCSR_USABLE; in _kvm_vcpu_load()
1636 struct loongarch_csrs *csr = vcpu->arch.csr; in _kvm_vcpu_put()
1646 if (vcpu->arch.aux_inuse & KVM_LARCH_SWCSR_LATEST) in _kvm_vcpu_put()
1697 vcpu->arch.aux_inuse |= KVM_LARCH_SWCSR_LATEST; in _kvm_vcpu_put()
1714 vcpu->arch.last_sched_cpu = cpu; in kvm_arch_vcpu_put()