Lines Matching full:arch
14 * This file is derived from arch/powerpc/kvm/book3s.c,
243 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
280 * Updates to busy_stolen are protected by arch.tbacct_lock;
320 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
325 if (vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
326 WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); in kvmppc_core_vcpu_load_hv()
327 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
328 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
344 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
345 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
346 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
347 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
348 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
350 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
355 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
364 WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); in kvmppc_core_vcpu_put_hv()
370 vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
371 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
380 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
381 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
382 vcpu->arch.busy_preempt = now; in kvmppc_core_vcpu_put_hv()
383 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
388 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
418 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
492 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
498 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
500 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
502 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
504 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
506 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
507 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
509 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
510 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
511 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
513 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
515 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
516 vcpu->arch.last_inst); in kvmppc_dump_regs()
536 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
542 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
605 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
618 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
629 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
632 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
639 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
642 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
649 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
650 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
653 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
658 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
663 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
674 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
697 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
702 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
734 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
735 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
736 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
739 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
740 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
741 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa); in kvmppc_update_vpas()
748 if (vcpu->arch.vpa.pinned_addr) { in kvmppc_update_vpas()
749 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
751 kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr)); in kvmppc_update_vpas()
754 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
755 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa); in kvmppc_update_vpas()
759 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
760 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
762 if (vcpu->arch.slb_shadow.update_pending) { in kvmppc_update_vpas()
763 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa); in kvmppc_update_vpas()
769 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
799 dt = vcpu->arch.dtl_ptr; in __kvmppc_create_dtl_entry()
806 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); in __kvmppc_create_dtl_entry()
813 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in __kvmppc_create_dtl_entry()
816 if (dt == vcpu->arch.dtl.pinned_end) in __kvmppc_create_dtl_entry()
817 dt = vcpu->arch.dtl.pinned_addr; in __kvmppc_create_dtl_entry()
818 vcpu->arch.dtl_ptr = dt; in __kvmppc_create_dtl_entry()
821 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in __kvmppc_create_dtl_entry()
823 /* vcpu->arch.dtl.dirty is set by the caller */ in __kvmppc_create_dtl_entry()
835 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch()
842 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch()
843 vcpu->arch.stolen_logged = core_stolen; in kvmppc_update_vpa_dispatch()
844 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
845 stolen += vcpu->arch.busy_stolen; in kvmppc_update_vpa_dispatch()
846 vcpu->arch.busy_stolen = 0; in kvmppc_update_vpa_dispatch()
847 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
853 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch()
864 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch_p9()
869 stolen_delta = stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch_p9()
870 vcpu->arch.stolen_logged = stolen; in kvmppc_update_vpa_dispatch_p9()
876 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch_p9()
885 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
895 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
946 if (!vcpu->kvm->arch.dawr1_enabled) in kvmppc_h_set_mode()
1045 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
1060 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
1075 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1076 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
1079 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1104 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, in kvmppc_nested_h_rpt_invalidate()
1140 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, in kvmppc_h_rpt_invalidate()
1155 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
1214 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
1216 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
1239 if (list_empty(&kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
1353 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1357 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1407 * Instead the kvm->arch.secure_guest flag is checked inside in kvmppc_pseries_do_hcall()
1418 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1432 vcpu->arch.ceded = 1; in kvmppc_cede()
1434 if (vcpu->arch.prodded) { in kvmppc_cede()
1435 vcpu->arch.prodded = 0; in kvmppc_cede()
1437 vcpu->arch.ceded = 0; in kvmppc_cede()
1489 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1508 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1549 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1556 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1561 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1562 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1570 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1571 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1602 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) in kvmppc_pmu_unavailable()
1612 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) in kvmppc_ebb_unavailable()
1622 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) in kvmppc_tm_unavailable()
1649 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1650 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1653 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1658 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1662 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_exit_hv()
1688 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); in kvmppc_handle_exit_hv()
1696 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1706 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1710 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1772 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1791 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { in kvmppc_handle_exit_hv()
1807 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { in kvmppc_handle_exit_hv()
1810 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1816 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1818 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1820 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1821 vsid, vcpu->arch.fault_dsisr, true); in kvmppc_handle_exit_hv()
1829 vcpu->arch.fault_dar, err); in kvmppc_handle_exit_hv()
1838 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1839 vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) & in kvmppc_handle_exit_hv()
1849 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1854 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { in kvmppc_handle_exit_hv()
1856 vcpu->arch.fault_dsisr | in kvmppc_handle_exit_hv()
1863 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1865 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1867 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1868 vsid, vcpu->arch.fault_dsisr, false); in kvmppc_handle_exit_hv()
1889 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1890 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1891 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1892 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1959 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1961 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1987 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1992 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
2009 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_nested_exit()
2027 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
2042 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
2043 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
2046 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
2071 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
2107 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2108 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
2109 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2110 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2122 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
2126 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
2128 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2129 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2133 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2185 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
2215 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
2218 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
2220 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
2244 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
2247 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
2275 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
2286 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
2317 *val = get_reg_val(id, vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
2319 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
2349 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
2352 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
2355 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
2361 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
2367 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
2370 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
2373 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2374 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
2375 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2378 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2379 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
2380 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
2381 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2384 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2385 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
2386 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
2387 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2401 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
2404 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
2407 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
2411 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
2419 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
2422 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
2429 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
2432 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
2435 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
2438 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
2441 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
2444 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
2447 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
2450 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
2454 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
2459 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
2462 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
2472 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
2475 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
2502 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2505 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
2533 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2544 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2569 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; in kvmppc_set_one_reg_hv()
2571 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2604 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2607 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2610 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2616 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2622 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2625 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2630 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2631 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2633 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2639 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2641 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2648 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2651 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2684 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2687 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2690 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2694 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2702 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2705 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2711 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2714 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2717 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2720 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2723 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2726 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2729 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2732 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2736 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2741 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2744 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2755 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2756 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2757 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2758 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2759 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2762 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2802 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2816 {"vcpu_entry", offsetof(struct kvm_vcpu, arch.vcpu_entry)},
2817 {"guest_entry", offsetof(struct kvm_vcpu, arch.guest_entry)},
2818 {"in_guest", offsetof(struct kvm_vcpu, arch.in_guest)},
2819 {"guest_exit", offsetof(struct kvm_vcpu, arch.guest_exit)},
2820 {"vcpu_exit", offsetof(struct kvm_vcpu, arch.vcpu_exit)},
2821 {"hypercall", offsetof(struct kvm_vcpu, arch.hcall)},
2822 {"page_fault", offsetof(struct kvm_vcpu, arch.pg_fault)},
2824 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
2825 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
2826 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
2827 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
2828 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
2972 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2979 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2981 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2986 err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io); in kvmppc_core_vcpu_create_hv()
3000 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
3001 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
3002 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
3004 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
3029 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
3031 vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu); in kvmppc_core_vcpu_create_hv()
3040 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
3042 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
3048 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
3052 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
3056 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
3059 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
3070 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
3071 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
3072 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
3073 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
3074 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
3085 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
3086 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
3087 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
3088 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
3090 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
3123 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
3124 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
3125 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
3142 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3143 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
3144 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
3145 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
3146 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3148 kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io); in kvmppc_core_vcpu_free_hv()
3169 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
3170 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
3180 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
3182 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3184 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
3185 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
3186 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
3187 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
3188 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3190 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
3242 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
3249 need_tlb_flush = &kvm->arch.need_tlb_flush; in radix_flush_cpu()
3287 if (kvm->arch.lpcr & LPCR_GTSE) in do_migrate_away_vcpu()
3295 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
3303 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
3305 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
3328 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
3330 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
3341 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
3342 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
3343 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
3345 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
3347 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
3587 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
3588 vcpu->arch.ret = -EINTR; in prepare_threads()
3589 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
3590 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
3591 vcpu->arch.dtl.update_pending) in prepare_threads()
3592 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
3596 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
3610 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { in collect_piggybacks()
3639 if (!vc->kvm->arch.mmu_ready) in recheck_signals_and_mmu()
3642 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
3662 * so any vcpus becoming runnable will have their arch.trap in post_guest_process()
3674 if (vcpu->arch.trap) in post_guest_process()
3676 vcpu->arch.run_task); in post_guest_process()
3678 vcpu->arch.ret = ret; in post_guest_process()
3679 vcpu->arch.trap = 0; in post_guest_process()
3682 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
3683 if (vcpu->arch.pending_exceptions) in post_guest_process()
3685 if (vcpu->arch.ceded) in post_guest_process()
3691 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3707 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3805 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
3829 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3831 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3963 * It updates vcpu->cpu and vcpu->arch.thread_cpu in kvmppc_run_core()
3972 if (!vcpu->arch.ptid) in kvmppc_run_core()
3974 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
4103 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in vcpu_vpa_increment_dispatch()
4107 vcpu->arch.vpa.dirty = 1; in vcpu_vpa_increment_dispatch()
4170 vcpu->arch.l1_to_l2_cs = l1_to_l2_ns; in do_trace_nested_cs_time()
4171 vcpu->arch.l2_to_l1_cs = l2_to_l1_ns; in do_trace_nested_cs_time()
4172 vcpu->arch.l2_runtime_agg = l2_runtime_ns; in do_trace_nested_cs_time()
4196 struct kvm_vcpu_arch *arch; in kvmhv_get_l1_to_l2_cs_time_vcpu() local
4200 arch = &vcpu->arch; in kvmhv_get_l1_to_l2_cs_time_vcpu()
4201 return arch->l1_to_l2_cs; in kvmhv_get_l1_to_l2_cs_time_vcpu()
4211 struct kvm_vcpu_arch *arch; in kvmhv_get_l2_to_l1_cs_time_vcpu() local
4215 arch = &vcpu->arch; in kvmhv_get_l2_to_l1_cs_time_vcpu()
4216 return arch->l2_to_l1_cs; in kvmhv_get_l2_to_l1_cs_time_vcpu()
4226 struct kvm_vcpu_arch *arch; in kvmhv_get_l2_runtime_agg_vcpu() local
4230 arch = &vcpu->arch; in kvmhv_get_l2_runtime_agg_vcpu()
4231 return arch->l2_runtime_agg; in kvmhv_get_l2_runtime_agg_vcpu()
4257 if (vcpu->arch.doorbell_request) { in kvmhv_vcpu_entry_nestedv2()
4258 vcpu->arch.doorbell_request = 0; in kvmhv_vcpu_entry_nestedv2()
4262 io = &vcpu->arch.nestedv2_io; in kvmhv_vcpu_entry_nestedv2()
4275 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_nestedv2()
4276 rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id, in kvmhv_vcpu_entry_nestedv2()
4289 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_nestedv2()
4338 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4339 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_vcpu_entry_p9_nested()
4344 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_vcpu_entry_p9_nested()
4346 if (vcpu->arch.nested) { in kvmhv_vcpu_entry_p9_nested()
4347 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_vcpu_entry_p9_nested()
4348 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4350 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_vcpu_entry_p9_nested()
4361 if (vcpu->arch.doorbell_request) in kvmhv_vcpu_entry_p9_nested()
4362 vcpu->arch.doorbell_request = 0; in kvmhv_vcpu_entry_p9_nested()
4380 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_vcpu_entry_p9_nested()
4381 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_vcpu_entry_p9_nested()
4383 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_p9_nested()
4385 __pa(&vcpu->arch.regs)); in kvmhv_vcpu_entry_p9_nested()
4386 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_p9_nested()
4389 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_vcpu_entry_p9_nested()
4390 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_vcpu_entry_p9_nested()
4391 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_vcpu_entry_p9_nested()
4392 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_vcpu_entry_p9_nested()
4400 vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu)); in kvmhv_vcpu_entry_p9_nested()
4405 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4418 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_p9_guest_entry()
4430 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4477 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4509 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
4526 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
4527 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
4532 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
4558 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
4559 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
4570 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
4579 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcpu_check_block()
4724 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4725 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
4731 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
4734 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4748 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
4749 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
4755 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4757 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
4758 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
4759 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
4760 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
4761 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
4762 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
4783 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4786 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4794 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4808 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
4812 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
4813 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4816 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4821 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
4823 v->arch.ceded = 0; in kvmppc_run_vcpu()
4840 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4849 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4853 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4860 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4865 return vcpu->arch.ret; in kvmppc_run_vcpu()
4877 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4884 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4885 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4887 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4888 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4889 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4890 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4893 if (unlikely(!kvm->arch.mmu_ready)) { in kvmhv_run_single_vcpu()
4898 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4916 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4920 if (need_resched() || !kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4924 vcpu->arch.thread_cpu = pcpu; in kvmhv_run_single_vcpu()
4941 &vcpu->arch.pending_exceptions) || in kvmhv_run_single_vcpu()
4955 * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit in kvmhv_run_single_vcpu()
4966 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4968 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4972 if (vcpu->arch.timer_running) { in kvmhv_run_single_vcpu()
4973 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmhv_run_single_vcpu()
4974 vcpu->arch.timer_running = 0; in kvmhv_run_single_vcpu()
4991 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
5001 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
5002 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
5041 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
5052 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
5065 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
5070 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
5075 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
5078 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
5079 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
5093 start_timing(vcpu, &vcpu->arch.vcpu_entry); in kvmppc_vcpu_run_hv()
5095 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
5125 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
5126 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
5127 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
5133 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
5155 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
5156 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
5157 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
5160 accumulate_time(vcpu, &vcpu->arch.guest_entry); in kvmppc_vcpu_run_hv()
5163 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
5168 accumulate_time(vcpu, &vcpu->arch.hcall); in kvmppc_vcpu_run_hv()
5185 accumulate_time(vcpu, &vcpu->arch.pg_fault); in kvmppc_vcpu_run_hv()
5188 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
5197 accumulate_time(vcpu, &vcpu->arch.vcpu_exit); in kvmppc_vcpu_run_hv()
5199 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
5200 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
5312 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5313 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
5314 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
5315 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5330 vfree(slot->arch.rmap); in kvmppc_core_free_memslot_hv()
5331 slot->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
5340 unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap)); in kvmppc_core_prepare_memory_region_hv()
5345 new->arch.rmap = vzalloc(size); in kvmppc_core_prepare_memory_region_hv()
5346 if (!new->arch.rmap) in kvmppc_core_prepare_memory_region_hv()
5349 new->arch.rmap = old->arch.rmap; in kvmppc_core_prepare_memory_region_hv()
5367 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
5388 if (!kvm->arch.secure_guest) in kvmppc_core_commit_memory_region_hv()
5409 * Update LPCR values in kvm->arch and in vcores.
5410 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
5411 * of kvm->arch.lpcr update).
5418 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
5421 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5424 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
5432 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
5451 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
5452 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
5454 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
5457 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
5460 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
5461 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
5463 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
5468 * Must be called with kvm->arch.mmu_setup_lock held.
5482 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
5531 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
5543 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
5557 * Must be called with kvm->arch.mmu_setup_lock held and
5567 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
5570 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
5584 * Must be called with kvm->arch.mmu_setup_lock held and
5598 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
5600 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
5607 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_switch_mmu_to_radix()
5703 mutex_init(&kvm->arch.uvmem_lock); in kvmppc_core_init_vm_hv()
5704 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); in kvmppc_core_init_vm_hv()
5705 mutex_init(&kvm->arch.mmu_setup_lock); in kvmppc_core_init_vm_hv()
5713 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
5742 kvm->arch.lpid = guest_id; in kvmppc_core_init_vm_hv()
5754 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
5757 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
5758 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
5761 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
5765 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
5766 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5777 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
5805 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
5806 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
5811 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_core_init_vm_hv()
5816 plpar_guest_delete(0, kvm->arch.lpid); in kvmppc_core_init_vm_hv()
5818 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
5825 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
5828 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
5838 kvm->arch.tlb_sets = 1; in kvmppc_core_init_vm_hv()
5840 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
5842 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
5844 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
5846 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
5863 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
5865 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
5866 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
5884 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
5885 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
5899 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
5905 kvm->arch.process_table = 0; in kvmppc_core_destroy_vm_hv()
5906 if (kvm->arch.secure_guest) in kvmppc_core_destroy_vm_hv()
5907 uv_svm_terminate(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5909 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); in kvmppc_core_destroy_vm_hv()
5913 kvmhv_flush_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5914 plpar_guest_delete(0, kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5916 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5958 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
5984 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
5992 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
6076 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
6079 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
6289 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
6291 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
6292 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
6295 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
6296 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
6309 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()
6317 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
6334 kvm->arch.nested_enable = true; in kvmhv_enable_nested()
6351 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
6370 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
6396 kvm->arch.svm_enabled = 1; in kvmhv_enable_svm()
6416 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmhv_svm_off()
6419 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6420 mmu_was_ready = kvm->arch.mmu_ready; in kvmhv_svm_off()
6421 if (kvm->arch.mmu_ready) { in kvmhv_svm_off()
6422 kvm->arch.mmu_ready = 0; in kvmhv_svm_off()
6425 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_svm_off()
6426 kvm->arch.mmu_ready = 1; in kvmhv_svm_off()
6443 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in kvmhv_svm_off()
6448 ret = uv_svm_terminate(kvm->arch.lpid); in kvmhv_svm_off()
6465 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6466 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
6467 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
6468 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
6469 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6473 kvm->arch.secure_guest = 0; in kvmhv_svm_off()
6474 kvm->arch.mmu_ready = mmu_was_ready; in kvmhv_svm_off()
6476 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6487 kvm->arch.dawr1_enabled = true; in kvmhv_enable_dawr1()