Lines Matching +full:intc +full:- +full:no +full:- +full:eoi

1 // SPDX-License-Identifier: GPL-2.0-only
9 * Kevin Wolf <mail@kevin-wolf.de>
32 #include <linux/page-flags.h>
50 #include <asm/ppc-opcode.h>
51 #include <asm/asm-prototypes.h>
73 #include <asm/pnv-pci.h>
108 MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4,…
170 vcpu = READ_ONCE(vc->runnable_threads[i]); in next_runnable_thread()
181 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
215 if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { in kvmppc_ipi_thread()
241 ++vcpu->stat.generic.halt_wakeup; in kvmppc_fast_vcpu_kick_hv()
243 cpu = READ_ONCE(vcpu->arch.thread_cpu); in kvmppc_fast_vcpu_kick_hv()
248 cpu = vcpu->cpu; in kvmppc_fast_vcpu_kick_hv()
267 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
281 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
299 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
300 vc->preempt_tb = tb; in kvmppc_core_start_stolen()
301 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_start_stolen()
310 spin_lock_irqsave(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
311 if (vc->preempt_tb != TB_NIL) { in kvmppc_core_end_stolen()
312 vc->stolen_tb += tb - vc->preempt_tb; in kvmppc_core_end_stolen()
313 vc->preempt_tb = TB_NIL; in kvmppc_core_end_stolen()
315 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in kvmppc_core_end_stolen()
320 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
325 if (vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
326 WARN_ON_ONCE(vcpu->arch.state != KVMPPC_VCPU_BUSY_IN_HOST); in kvmppc_core_vcpu_load_hv()
327 vc->stolen_tb += mftb() - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
328 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
336 * We can test vc->runner without taking the vcore lock, in kvmppc_core_vcpu_load_hv()
337 * because only this task ever sets vc->runner to this in kvmppc_core_vcpu_load_hv()
341 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_load_hv()
344 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
345 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && in kvmppc_core_vcpu_load_hv()
346 vcpu->arch.busy_preempt != TB_NIL) { in kvmppc_core_vcpu_load_hv()
347 vcpu->arch.busy_stolen += now - vcpu->arch.busy_preempt; in kvmppc_core_vcpu_load_hv()
348 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_load_hv()
350 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_load_hv()
355 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
364 WARN_ON_ONCE(vcpu->arch.state == KVMPPC_VCPU_RUNNABLE); in kvmppc_core_vcpu_put_hv()
370 vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
371 vcpu->arch.busy_preempt = mftb(); in kvmppc_core_vcpu_put_hv()
377 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) in kvmppc_core_vcpu_put_hv()
380 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
381 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) in kvmppc_core_vcpu_put_hv()
382 vcpu->arch.busy_preempt = now; in kvmppc_core_vcpu_put_hv()
383 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_core_vcpu_put_hv()
388 vcpu->arch.pvr = pvr; in kvmppc_set_pvr_hv()
418 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
454 return -EINVAL; in kvmppc_set_arch_compat()
460 return -EINVAL; in kvmppc_set_arch_compat()
470 return -EINVAL; in kvmppc_set_arch_compat()
473 spin_lock(&vc->lock); in kvmppc_set_arch_compat()
474 vc->arch_compat = arch_compat; in kvmppc_set_arch_compat()
480 vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK; in kvmppc_set_arch_compat()
481 spin_unlock(&vc->lock); in kvmppc_set_arch_compat()
490 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); in kvmppc_dump_regs()
492 vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); in kvmppc_dump_regs()
498 vcpu->arch.regs.ctr, vcpu->arch.regs.link); in kvmppc_dump_regs()
500 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); in kvmppc_dump_regs()
502 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); in kvmppc_dump_regs()
504 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); in kvmppc_dump_regs()
506 vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); in kvmppc_dump_regs()
507 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); in kvmppc_dump_regs()
509 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_dump_regs()
510 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); in kvmppc_dump_regs()
511 for (r = 0; r < vcpu->arch.slb_max; ++r) in kvmppc_dump_regs()
513 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); in kvmppc_dump_regs()
515 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
516 vcpu->arch.last_inst); in kvmppc_dump_regs()
526 vpa->__old_status |= LPPACA_OLD_SHARED_PROC; in init_vpa()
527 vpa->yield_count = cpu_to_be32(1); in init_vpa()
534 if (addr & (L1_CACHE_BYTES - 1)) in set_vpa()
535 return -EINVAL; in set_vpa()
536 spin_lock(&vcpu->arch.vpa_update_lock); in set_vpa()
537 if (v->next_gpa != addr || v->len != len) { in set_vpa()
538 v->next_gpa = addr; in set_vpa()
539 v->len = addr ? len : 0; in set_vpa()
540 v->update_pending = 1; in set_vpa()
542 spin_unlock(&vcpu->arch.vpa_update_lock); in set_vpa()
546 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
557 if (vpap->update_pending) in vpa_is_registered()
558 return vpap->next_gpa != 0; in vpa_is_registered()
559 return vpap->pinned_addr != NULL; in vpa_is_registered()
566 struct kvm *kvm = vcpu->kvm; in do_h_register_vpa()
581 /* Registering new area - address must be cache-line aligned */ in do_h_register_vpa()
582 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) in do_h_register_vpa()
590 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); in do_h_register_vpa()
592 len = be32_to_cpu(((struct reg_vpa *)va)->length.word); in do_h_register_vpa()
605 spin_lock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
618 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
625 len -= len % sizeof(struct dtl_entry); in do_h_register_vpa()
629 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
632 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
639 if (!vpa_is_registered(&tvcpu->arch.vpa)) in do_h_register_vpa()
642 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
649 if (vpa_is_registered(&tvcpu->arch.dtl) || in do_h_register_vpa()
650 vpa_is_registered(&tvcpu->arch.slb_shadow)) in do_h_register_vpa()
653 vpap = &tvcpu->arch.vpa; in do_h_register_vpa()
658 vpap = &tvcpu->arch.dtl; in do_h_register_vpa()
663 vpap = &tvcpu->arch.slb_shadow; in do_h_register_vpa()
669 vpap->next_gpa = vpa; in do_h_register_vpa()
670 vpap->len = len; in do_h_register_vpa()
671 vpap->update_pending = 1; in do_h_register_vpa()
674 spin_unlock(&tvcpu->arch.vpa_update_lock); in do_h_register_vpa()
682 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpa()
688 * We need to pin the page pointed to by vpap->next_gpa, in kvmppc_update_vpa()
696 gpa = vpap->next_gpa; in kvmppc_update_vpa()
697 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
702 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpa()
703 if (gpa == vpap->next_gpa) in kvmppc_update_vpa()
710 vpap->update_pending = 0; in kvmppc_update_vpa()
711 if (va && nb < vpap->len) { in kvmppc_update_vpa()
722 vpap->gpa = gpa; in kvmppc_update_vpa()
723 vpap->pinned_addr = va; in kvmppc_update_vpa()
724 vpap->dirty = false; in kvmppc_update_vpa()
726 vpap->pinned_end = va + vpap->len; in kvmppc_update_vpa()
731 struct kvm *kvm = vcpu->kvm; in kvmppc_update_vpas()
734 if (!(vcpu->arch.vpa.update_pending || in kvmppc_update_vpas()
735 vcpu->arch.slb_shadow.update_pending || in kvmppc_update_vpas()
736 vcpu->arch.dtl.update_pending)) in kvmppc_update_vpas()
739 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
740 if (vcpu->arch.vpa.update_pending) { in kvmppc_update_vpas()
741 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa, &old_vpa); in kvmppc_update_vpas()
748 if (vcpu->arch.vpa.pinned_addr) { in kvmppc_update_vpas()
749 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); in kvmppc_update_vpas()
751 kvmhv_nestedv2_set_vpa(vcpu, __pa(vcpu->arch.vpa.pinned_addr)); in kvmppc_update_vpas()
754 if (vcpu->arch.dtl.update_pending) { in kvmppc_update_vpas()
755 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl, &old_vpa); in kvmppc_update_vpas()
759 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; in kvmppc_update_vpas()
760 vcpu->arch.dtl_index = 0; in kvmppc_update_vpas()
762 if (vcpu->arch.slb_shadow.update_pending) { in kvmppc_update_vpas()
763 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow, &old_vpa); in kvmppc_update_vpas()
769 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_update_vpas()
783 spin_lock_irqsave(&vc->stoltb_lock, flags); in vcore_stolen_time()
784 p = vc->stolen_tb; in vcore_stolen_time()
785 if (vc->vcore_state != VCORE_INACTIVE && in vcore_stolen_time()
786 vc->preempt_tb != TB_NIL) in vcore_stolen_time()
787 p += now - vc->preempt_tb; in vcore_stolen_time()
788 spin_unlock_irqrestore(&vc->stoltb_lock, flags); in vcore_stolen_time()
799 dt = vcpu->arch.dtl_ptr; in __kvmppc_create_dtl_entry()
804 dt->dispatch_reason = 7; in __kvmppc_create_dtl_entry()
805 dt->preempt_reason = 0; in __kvmppc_create_dtl_entry()
806 dt->processor_id = cpu_to_be16(pcpu + vcpu->arch.ptid); in __kvmppc_create_dtl_entry()
807 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); in __kvmppc_create_dtl_entry()
808 dt->ready_to_enqueue_time = 0; in __kvmppc_create_dtl_entry()
809 dt->waiting_to_ready_time = 0; in __kvmppc_create_dtl_entry()
810 dt->timebase = cpu_to_be64(now); in __kvmppc_create_dtl_entry()
811 dt->fault_addr = 0; in __kvmppc_create_dtl_entry()
812 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); in __kvmppc_create_dtl_entry()
813 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); in __kvmppc_create_dtl_entry()
816 if (dt == vcpu->arch.dtl.pinned_end) in __kvmppc_create_dtl_entry()
817 dt = vcpu->arch.dtl.pinned_addr; in __kvmppc_create_dtl_entry()
818 vcpu->arch.dtl_ptr = dt; in __kvmppc_create_dtl_entry()
819 /* order writing *dt vs. writing vpa->dtl_idx */ in __kvmppc_create_dtl_entry()
821 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); in __kvmppc_create_dtl_entry()
823 /* vcpu->arch.dtl.dirty is set by the caller */ in __kvmppc_create_dtl_entry()
835 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch()
842 stolen = core_stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch()
843 vcpu->arch.stolen_logged = core_stolen; in kvmppc_update_vpa_dispatch()
844 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
845 stolen += vcpu->arch.busy_stolen; in kvmppc_update_vpa_dispatch()
846 vcpu->arch.busy_stolen = 0; in kvmppc_update_vpa_dispatch()
847 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); in kvmppc_update_vpa_dispatch()
849 vpa->enqueue_dispatch_tb = cpu_to_be64(be64_to_cpu(vpa->enqueue_dispatch_tb) + stolen); in kvmppc_update_vpa_dispatch()
851 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now + kvmppc_get_tb_offset(vcpu), stolen); in kvmppc_update_vpa_dispatch()
853 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch()
864 vpa = vcpu->arch.vpa.pinned_addr; in kvmppc_update_vpa_dispatch_p9()
868 stolen = vc->stolen_tb; in kvmppc_update_vpa_dispatch_p9()
869 stolen_delta = stolen - vcpu->arch.stolen_logged; in kvmppc_update_vpa_dispatch_p9()
870 vcpu->arch.stolen_logged = stolen; in kvmppc_update_vpa_dispatch_p9()
872 vpa->enqueue_dispatch_tb = cpu_to_be64(stolen); in kvmppc_update_vpa_dispatch_p9()
874 __kvmppc_create_dtl_entry(vcpu, vpa, vc->pcpu, now, stolen_delta); in kvmppc_update_vpa_dispatch_p9()
876 vcpu->arch.vpa.dirty = true; in kvmppc_update_vpa_dispatch_p9()
885 if (vcpu->arch.doorbell_request) in kvmppc_doorbell_pending()
890 * Ensure that the read of vcore->dpdes comes after the read in kvmppc_doorbell_pending()
891 * of vcpu->doorbell_request. This barrier matches the in kvmppc_doorbell_pending()
895 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
896 thr = vcpu->vcpu_id - vc->first_vcpuid; in kvmppc_doorbell_pending()
897 return !!(vc->dpdes & (1 << thr)); in kvmppc_doorbell_pending()
946 if (!vcpu->kvm->arch.dawr1_enabled) in kvmppc_h_set_mode()
969 /* Copy guest memory in place - must reside within a single memslot */
981 return -EFAULT; in kvmppc_copy_guest()
982 if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) in kvmppc_copy_guest()
984 return -EINVAL; in kvmppc_copy_guest()
987 return -EFAULT; in kvmppc_copy_guest()
988 from_addr |= (from & (PAGE_SIZE - 1)); in kvmppc_copy_guest()
993 return -EFAULT; in kvmppc_copy_guest()
994 if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) in kvmppc_copy_guest()
996 return -EINVAL; in kvmppc_copy_guest()
999 return -EFAULT; in kvmppc_copy_guest()
1000 to_addr |= (to & (PAGE_SIZE - 1)); in kvmppc_copy_guest()
1006 return -EFAULT; in kvmppc_copy_guest()
1015 u64 pg_mask = SZ_4K - 1; in kvmppc_h_page_init()
1029 ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); in kvmppc_h_page_init()
1033 ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); in kvmppc_h_page_init()
1045 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to()
1055 * mode handler is not called but no other threads are in the in kvm_arch_vcpu_yield_to()
1059 spin_lock(&vcore->lock); in kvm_arch_vcpu_yield_to()
1060 if (target->arch.state == KVMPPC_VCPU_RUNNABLE && in kvm_arch_vcpu_yield_to()
1061 vcore->vcore_state != VCORE_INACTIVE && in kvm_arch_vcpu_yield_to()
1062 vcore->runner) in kvm_arch_vcpu_yield_to()
1063 target = vcore->runner; in kvm_arch_vcpu_yield_to()
1064 spin_unlock(&vcore->lock); in kvm_arch_vcpu_yield_to()
1075 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1076 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; in kvmppc_get_yield_count()
1078 yield_count = be32_to_cpu(lppaca->yield_count); in kvmppc_get_yield_count()
1079 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_yield_count()
1086 * Handles only nested process-scoped invalidation requests in L0.
1094 * The partition-scoped invalidations aren't handled here in L0. in kvmppc_nested_h_rpt_invalidate()
1104 do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, in kvmppc_nested_h_rpt_invalidate()
1116 if (!kvm_is_radix(vcpu->kvm)) in kvmppc_h_rpt_invalidate()
1123 * Partition-scoped invalidation for nested guests. in kvmppc_h_rpt_invalidate()
1126 if (!nesting_enabled(vcpu->kvm)) in kvmppc_h_rpt_invalidate()
1138 * Process-scoped invalidation for L1 guests. in kvmppc_h_rpt_invalidate()
1140 do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, in kvmppc_h_rpt_invalidate()
1147 struct kvm *kvm = vcpu->kvm; in kvmppc_pseries_do_hcall()
1155 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) in kvmppc_pseries_do_hcall()
1214 tvcpu->arch.prodded = 1; in kvmppc_pseries_do_hcall()
1216 if (tvcpu->arch.ceded) in kvmppc_pseries_do_hcall()
1221 if (target == -1) in kvmppc_pseries_do_hcall()
1239 if (list_empty(&kvm->arch.rtas_tokens)) in kvmppc_pseries_do_hcall()
1242 idx = srcu_read_lock(&kvm->srcu); in kvmppc_pseries_do_hcall()
1244 srcu_read_unlock(&kvm->srcu, idx); in kvmppc_pseries_do_hcall()
1246 if (rc == -ENOENT) in kvmppc_pseries_do_hcall()
1353 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1354 return -EINTR; in kvmppc_pseries_do_hcall()
1357 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1407 * Instead the kvm->arch.secure_guest flag is checked inside in kvmppc_pseries_do_hcall()
1418 vcpu->arch.hcall_needed = 0; in kvmppc_pseries_do_hcall()
1423 * Handle H_CEDE in the P9 path where we don't call the real-mode hcall
1432 vcpu->arch.ceded = 1; in kvmppc_cede()
1434 if (vcpu->arch.prodded) { in kvmppc_cede()
1435 vcpu->arch.prodded = 0; in kvmppc_cede()
1437 vcpu->arch.ceded = 0; in kvmppc_cede()
1470 /* See if it's in the real-mode table */ in kvmppc_hcall_impl_hv()
1488 vcpu->run->exit_reason = KVM_EXIT_DEBUG; in kvmppc_emulate_debug_inst()
1489 vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); in kvmppc_emulate_debug_inst()
1508 nthreads = vcpu->kvm->arch.emul_smt_mode; in kvmppc_read_dpdes()
1510 cpu = vcpu->vcpu_id & ~(nthreads - 1); in kvmppc_read_dpdes()
1512 v = kvmppc_find_vcpu(vcpu->kvm, cpu); in kvmppc_read_dpdes()
1518 * which will update its vcore->dpdes value. in kvmppc_read_dpdes()
1520 pcpu = READ_ONCE(v->cpu); in kvmppc_read_dpdes()
1530 * On POWER9, emulate doorbell-related instructions in order to
1531 * give the guest the illusion of running on a multi-threaded core.
1539 struct kvm *kvm = vcpu->kvm; in kvmppc_emulate_doorbell_instr()
1549 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); in kvmppc_emulate_doorbell_instr()
1556 if (arg >= kvm->arch.emul_smt_mode) in kvmppc_emulate_doorbell_instr()
1558 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); in kvmppc_emulate_doorbell_instr()
1561 if (!tvcpu->arch.doorbell_request) { in kvmppc_emulate_doorbell_instr()
1562 tvcpu->arch.doorbell_request = 1; in kvmppc_emulate_doorbell_instr()
1570 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
1571 vcpu->arch.doorbell_request = 0; in kvmppc_emulate_doorbell_instr()
1602 if (!(vcpu->arch.hfscr_permitted & HFSCR_PM)) in kvmppc_pmu_unavailable()
1612 if (!(vcpu->arch.hfscr_permitted & HFSCR_EBB)) in kvmppc_ebb_unavailable()
1622 if (!(vcpu->arch.hfscr_permitted & HFSCR_TM)) in kvmppc_tm_unavailable()
1633 struct kvm_run *run = vcpu->run; in kvmppc_handle_exit_hv()
1636 vcpu->stat.sum_exits++; in kvmppc_handle_exit_hv()
1641 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV in kvmppc_handle_exit_hv()
1649 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1650 vcpu->arch.shregs.msr); in kvmppc_handle_exit_hv()
1652 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_handle_exit_hv()
1653 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1656 run->exit_reason = KVM_EXIT_UNKNOWN; in kvmppc_handle_exit_hv()
1657 run->ready_for_interrupt_injection = 1; in kvmppc_handle_exit_hv()
1658 switch (vcpu->arch.trap) { in kvmppc_handle_exit_hv()
1659 /* We're good on these - the host merely wanted to get our attention */ in kvmppc_handle_exit_hv()
1662 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_exit_hv()
1665 vcpu->stat.dec_exits++; in kvmppc_handle_exit_hv()
1671 vcpu->stat.ext_intr_exits++; in kvmppc_handle_exit_hv()
1688 machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); in kvmppc_handle_exit_hv()
1696 if (!vcpu->kvm->arch.fwnmi_enabled) { in kvmppc_handle_exit_hv()
1705 run->exit_reason = KVM_EXIT_NMI; in kvmppc_handle_exit_hv()
1706 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1707 /* Clear out the old NMI status from run->flags */ in kvmppc_handle_exit_hv()
1708 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; in kvmppc_handle_exit_hv()
1710 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) in kvmppc_handle_exit_hv()
1711 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; in kvmppc_handle_exit_hv()
1713 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; in kvmppc_handle_exit_hv()
1764 * hcall - gather args and set exit_reason. This will next be in kvmppc_handle_exit_hv()
1768 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); in kvmppc_handle_exit_hv()
1770 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); in kvmppc_handle_exit_hv()
1771 run->exit_reason = KVM_EXIT_PAPR_HCALL; in kvmppc_handle_exit_hv()
1772 vcpu->arch.hcall_needed = 1; in kvmppc_handle_exit_hv()
1791 unlikely(vcpu->arch.fault_dsisr == HDSISR_CANARY)) { in kvmppc_handle_exit_hv()
1796 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { in kvmppc_handle_exit_hv()
1798 * Radix doesn't require anything, and pre-ISAv3.0 hash in kvmppc_handle_exit_hv()
1807 if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { in kvmppc_handle_exit_hv()
1810 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_handle_exit_hv()
1816 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1818 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1820 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1821 vsid, vcpu->arch.fault_dsisr, true); in kvmppc_handle_exit_hv()
1824 } else if (err == -1 || err == -2) { in kvmppc_handle_exit_hv()
1829 vcpu->arch.fault_dar, err); in kvmppc_handle_exit_hv()
1838 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_exit_hv()
1839 vcpu->arch.fault_dsisr = __kvmppc_get_msr_hv(vcpu) & in kvmppc_handle_exit_hv()
1841 if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { in kvmppc_handle_exit_hv()
1843 * Radix doesn't require anything, and pre-ISAv3.0 hash in kvmppc_handle_exit_hv()
1849 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_exit_hv()
1854 if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { in kvmppc_handle_exit_hv()
1856 vcpu->arch.fault_dsisr | in kvmppc_handle_exit_hv()
1863 vsid = vcpu->kvm->arch.vrma_slb_v; in kvmppc_handle_exit_hv()
1865 vsid = vcpu->arch.fault_gpa; in kvmppc_handle_exit_hv()
1867 err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, in kvmppc_handle_exit_hv()
1868 vsid, vcpu->arch.fault_dsisr, false); in kvmppc_handle_exit_hv()
1871 } else if (err == -1) { in kvmppc_handle_exit_hv()
1889 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) in kvmppc_handle_exit_hv()
1890 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? in kvmppc_handle_exit_hv()
1891 swab32(vcpu->arch.emul_inst) : in kvmppc_handle_exit_hv()
1892 vcpu->arch.emul_inst; in kvmppc_handle_exit_hv()
1893 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { in kvmppc_handle_exit_hv()
1905 * This occurs for various TM-related instructions that in kvmppc_handle_exit_hv()
1907 * handled the cases where the guest was in real-suspend in kvmppc_handle_exit_hv()
1911 if (r != -1) in kvmppc_handle_exit_hv()
1959 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_exit_hv()
1961 run->hw.hardware_exit_reason = vcpu->arch.trap; in kvmppc_handle_exit_hv()
1974 vcpu->stat.sum_exits++; in kvmppc_handle_nested_exit()
1979 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV in kvmppc_handle_nested_exit()
1987 vcpu->arch.trap, kvmppc_get_pc(vcpu), in kvmppc_handle_nested_exit()
1992 switch (vcpu->arch.trap) { in kvmppc_handle_nested_exit()
1993 /* We're good on these - the host merely wanted to get our attention */ in kvmppc_handle_nested_exit()
1995 vcpu->stat.dec_exits++; in kvmppc_handle_nested_exit()
1999 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
2004 vcpu->stat.ext_intr_exits++; in kvmppc_handle_nested_exit()
2009 vcpu->arch.trap = BOOK3S_INTERRUPT_HV_DECREMENTER; in kvmppc_handle_nested_exit()
2010 vcpu->stat.dec_exits++; in kvmppc_handle_nested_exit()
2027 machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); in kvmppc_handle_nested_exit()
2037 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
2039 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
2042 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); in kvmppc_handle_nested_exit()
2043 vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & in kvmppc_handle_nested_exit()
2046 vcpu->arch.fault_dsisr |= DSISR_ISSTORE; in kvmppc_handle_nested_exit()
2047 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); in kvmppc_handle_nested_exit()
2049 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); in kvmppc_handle_nested_exit()
2055 * This occurs for various TM-related instructions that in kvmppc_handle_nested_exit()
2057 * handled the cases where the guest was in real-suspend in kvmppc_handle_nested_exit()
2061 if (r != -1) in kvmppc_handle_nested_exit()
2071 vcpu->arch.trap = 0; in kvmppc_handle_nested_exit()
2082 * guests for process-scoped invalidations when in kvmppc_handle_nested_exit()
2107 sregs->pvr = vcpu->arch.pvr; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2108 for (i = 0; i < vcpu->arch.slb_max; i++) { in kvm_arch_vcpu_ioctl_get_sregs_hv()
2109 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2110 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; in kvm_arch_vcpu_ioctl_get_sregs_hv()
2122 if (sregs->pvr != vcpu->arch.pvr) in kvm_arch_vcpu_ioctl_set_sregs_hv()
2123 return -EINVAL; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2126 for (i = 0; i < vcpu->arch.slb_nr; i++) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
2127 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { in kvm_arch_vcpu_ioctl_set_sregs_hv()
2128 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2129 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2133 vcpu->arch.slb_max = j; in kvm_arch_vcpu_ioctl_set_sregs_hv()
2184 struct kvm *kvm = vcpu->kvm; in kvmppc_set_lpcr()
2185 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
2188 spin_lock(&vc->lock); in kvmppc_set_lpcr()
2192 * DPFD (default prefetch depth), ILE (interrupt little-endian), in kvmppc_set_lpcr()
2199 /* Broken 32-bit version of LPCR must not clear top bits */ in kvmppc_set_lpcr()
2204 (vc->lpcr & ~mask) | (new_lpcr & mask)); in kvmppc_set_lpcr()
2207 * If ILE (interrupt little-endian) has changed, update the in kvmppc_set_lpcr()
2210 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
2215 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
2218 vcpu->arch.intr_msr |= MSR_LE; in kvmppc_set_lpcr()
2220 vcpu->arch.intr_msr &= ~MSR_LE; in kvmppc_set_lpcr()
2224 vc->lpcr = new_lpcr; in kvmppc_set_lpcr()
2227 spin_unlock(&vc->lock); in kvmppc_set_lpcr()
2244 *val = get_reg_val(id, vcpu->arch.dabr); in kvmppc_get_one_reg_hv()
2247 *val = get_reg_val(id, vcpu->arch.dabrx); in kvmppc_get_one_reg_hv()
2265 i = id - KVM_REG_PPC_MMCR0; in kvmppc_get_one_reg_hv()
2275 *val = get_reg_val(id, vcpu->arch.mmcrs); in kvmppc_get_one_reg_hv()
2281 i = id - KVM_REG_PPC_PMC1; in kvmppc_get_one_reg_hv()
2285 i = id - KVM_REG_PPC_SPMC1; in kvmppc_get_one_reg_hv()
2286 *val = get_reg_val(id, vcpu->arch.spmc[i]); in kvmppc_get_one_reg_hv()
2313 * either vcore->dpdes or doorbell_request. in kvmppc_get_one_reg_hv()
2317 *val = get_reg_val(id, vcpu->arch.doorbell_request); in kvmppc_get_one_reg_hv()
2319 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
2349 *val = get_reg_val(id, vcpu->arch.csigr); in kvmppc_get_one_reg_hv()
2352 *val = get_reg_val(id, vcpu->arch.tacr); in kvmppc_get_one_reg_hv()
2355 *val = get_reg_val(id, vcpu->arch.tcscr); in kvmppc_get_one_reg_hv()
2361 *val = get_reg_val(id, vcpu->arch.acop); in kvmppc_get_one_reg_hv()
2367 *val = get_reg_val(id, vcpu->arch.tid); in kvmppc_get_one_reg_hv()
2370 *val = get_reg_val(id, vcpu->arch.psscr); in kvmppc_get_one_reg_hv()
2373 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2374 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); in kvmppc_get_one_reg_hv()
2375 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2378 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2379 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; in kvmppc_get_one_reg_hv()
2380 val->vpaval.length = vcpu->arch.slb_shadow.len; in kvmppc_get_one_reg_hv()
2381 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2384 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2385 val->vpaval.addr = vcpu->arch.dtl.next_gpa; in kvmppc_get_one_reg_hv()
2386 val->vpaval.length = vcpu->arch.dtl.len; in kvmppc_get_one_reg_hv()
2387 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_get_one_reg_hv()
2401 *val = get_reg_val(id, vcpu->arch.tfhar); in kvmppc_get_one_reg_hv()
2404 *val = get_reg_val(id, vcpu->arch.tfiar); in kvmppc_get_one_reg_hv()
2407 *val = get_reg_val(id, vcpu->arch.texasr); in kvmppc_get_one_reg_hv()
2410 i = id - KVM_REG_PPC_TM_GPR0; in kvmppc_get_one_reg_hv()
2411 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); in kvmppc_get_one_reg_hv()
2416 i = id - KVM_REG_PPC_TM_VSR0; in kvmppc_get_one_reg_hv()
2419 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; in kvmppc_get_one_reg_hv()
2422 val->vval = vcpu->arch.vr_tm.vr[i-32]; in kvmppc_get_one_reg_hv()
2424 r = -ENXIO; in kvmppc_get_one_reg_hv()
2429 *val = get_reg_val(id, vcpu->arch.cr_tm); in kvmppc_get_one_reg_hv()
2432 *val = get_reg_val(id, vcpu->arch.xer_tm); in kvmppc_get_one_reg_hv()
2435 *val = get_reg_val(id, vcpu->arch.lr_tm); in kvmppc_get_one_reg_hv()
2438 *val = get_reg_val(id, vcpu->arch.ctr_tm); in kvmppc_get_one_reg_hv()
2441 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); in kvmppc_get_one_reg_hv()
2444 *val = get_reg_val(id, vcpu->arch.amr_tm); in kvmppc_get_one_reg_hv()
2447 *val = get_reg_val(id, vcpu->arch.ppr_tm); in kvmppc_get_one_reg_hv()
2450 *val = get_reg_val(id, vcpu->arch.vrsave_tm); in kvmppc_get_one_reg_hv()
2454 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); in kvmppc_get_one_reg_hv()
2456 r = -ENXIO; in kvmppc_get_one_reg_hv()
2459 *val = get_reg_val(id, vcpu->arch.dscr_tm); in kvmppc_get_one_reg_hv()
2462 *val = get_reg_val(id, vcpu->arch.tar_tm); in kvmppc_get_one_reg_hv()
2472 *val = get_reg_val(id, vcpu->arch.online); in kvmppc_get_one_reg_hv()
2475 *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); in kvmppc_get_one_reg_hv()
2481 r = -EINVAL; in kvmppc_get_one_reg_hv()
2499 r = -EINVAL; in kvmppc_set_one_reg_hv()
2502 vcpu->arch.dabr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2505 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; in kvmppc_set_one_reg_hv()
2523 i = id - KVM_REG_PPC_MMCR0; in kvmppc_set_one_reg_hv()
2533 vcpu->arch.mmcrs = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2539 i = id - KVM_REG_PPC_PMC1; in kvmppc_set_one_reg_hv()
2543 i = id - KVM_REG_PPC_SPMC1; in kvmppc_set_one_reg_hv()
2544 vcpu->arch.spmc[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2569 vcpu->arch.doorbell_request = set_reg_val(id, *val) & 1; in kvmppc_set_one_reg_hv()
2571 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2604 vcpu->arch.csigr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2607 vcpu->arch.tacr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2610 vcpu->arch.tcscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2616 vcpu->arch.acop = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2622 vcpu->arch.tid = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2625 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; in kvmppc_set_one_reg_hv()
2629 r = -EINVAL; in kvmppc_set_one_reg_hv()
2630 if (!addr && (vcpu->arch.slb_shadow.next_gpa || in kvmppc_set_one_reg_hv()
2631 vcpu->arch.dtl.next_gpa)) in kvmppc_set_one_reg_hv()
2633 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); in kvmppc_set_one_reg_hv()
2636 addr = val->vpaval.addr; in kvmppc_set_one_reg_hv()
2637 len = val->vpaval.length; in kvmppc_set_one_reg_hv()
2638 r = -EINVAL; in kvmppc_set_one_reg_hv()
2639 if (addr && !vcpu->arch.vpa.next_gpa) in kvmppc_set_one_reg_hv()
2641 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); in kvmppc_set_one_reg_hv()
2644 addr = val->vpaval.addr; in kvmppc_set_one_reg_hv()
2645 len = val->vpaval.length; in kvmppc_set_one_reg_hv()
2646 r = -EINVAL; in kvmppc_set_one_reg_hv()
2648 !vcpu->arch.vpa.next_gpa)) in kvmppc_set_one_reg_hv()
2650 len -= len % sizeof(struct dtl_entry); in kvmppc_set_one_reg_hv()
2651 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); in kvmppc_set_one_reg_hv()
2684 vcpu->arch.tfhar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2687 vcpu->arch.tfiar = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2690 vcpu->arch.texasr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2693 i = id - KVM_REG_PPC_TM_GPR0; in kvmppc_set_one_reg_hv()
2694 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2699 i = id - KVM_REG_PPC_TM_VSR0; in kvmppc_set_one_reg_hv()
2702 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; in kvmppc_set_one_reg_hv()
2705 vcpu->arch.vr_tm.vr[i-32] = val->vval; in kvmppc_set_one_reg_hv()
2707 r = -ENXIO; in kvmppc_set_one_reg_hv()
2711 vcpu->arch.cr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2714 vcpu->arch.xer_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2717 vcpu->arch.lr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2720 vcpu->arch.ctr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2723 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2726 vcpu->arch.amr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2729 vcpu->arch.ppr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2732 vcpu->arch.vrsave_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2736 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2738 r = - ENXIO; in kvmppc_set_one_reg_hv()
2741 vcpu->arch.dscr_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2744 vcpu->arch.tar_tm = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2755 if (i && !vcpu->arch.online) in kvmppc_set_one_reg_hv()
2756 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2757 else if (!i && vcpu->arch.online) in kvmppc_set_one_reg_hv()
2758 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2759 vcpu->arch.online = i; in kvmppc_set_one_reg_hv()
2762 vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2768 r = -EINVAL; in kvmppc_set_one_reg_hv()
2798 spin_lock_init(&vcore->lock); in kvmppc_vcore_create()
2799 spin_lock_init(&vcore->stoltb_lock); in kvmppc_vcore_create()
2800 rcuwait_init(&vcore->wait); in kvmppc_vcore_create()
2801 vcore->preempt_tb = TB_NIL; in kvmppc_vcore_create()
2802 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2803 vcore->first_vcpuid = id; in kvmppc_vcore_create()
2804 vcore->kvm = kvm; in kvmppc_vcore_create()
2805 INIT_LIST_HEAD(&vcore->preempt_list); in kvmppc_vcore_create()
2842 struct kvm_vcpu *vcpu = inode->i_private; in debugfs_timings_open()
2847 return -ENOMEM; in debugfs_timings_open()
2849 kvm_get_kvm(vcpu->kvm); in debugfs_timings_open()
2850 p->vcpu = vcpu; in debugfs_timings_open()
2851 file->private_data = p; in debugfs_timings_open()
2858 struct debugfs_timings_state *p = file->private_data; in debugfs_timings_release()
2860 kvm_put_kvm(p->vcpu->kvm); in debugfs_timings_release()
2868 struct debugfs_timings_state *p = file->private_data; in debugfs_timings_read()
2869 struct kvm_vcpu *vcpu = p->vcpu; in debugfs_timings_read()
2878 if (!p->buflen) { in debugfs_timings_read()
2879 s = p->buf; in debugfs_timings_read()
2880 buf_end = s + sizeof(p->buf); in debugfs_timings_read()
2888 count = acc->seqcount; in debugfs_timings_read()
2893 if (count == acc->seqcount) { in debugfs_timings_read()
2901 snprintf(s, buf_end - s, "%s: stuck\n", in debugfs_timings_read()
2904 snprintf(s, buf_end - s, in debugfs_timings_read()
2912 p->buflen = s - p->buf; in debugfs_timings_read()
2916 if (pos >= p->buflen) in debugfs_timings_read()
2918 if (len > p->buflen - pos) in debugfs_timings_read()
2919 len = p->buflen - pos; in debugfs_timings_read()
2920 n = copy_to_user(buf, p->buf + pos, len); in debugfs_timings_read()
2923 return -EFAULT; in debugfs_timings_read()
2924 len -= n; in debugfs_timings_read()
2933 return -EACCES; in debugfs_timings_write()
2969 kvm = vcpu->kvm; in kvmppc_core_vcpu_create_hv()
2970 id = vcpu->vcpu_id; in kvmppc_core_vcpu_create_hv()
2972 vcpu->arch.shared = &vcpu->arch.shregs; in kvmppc_core_vcpu_create_hv()
2979 vcpu->arch.shared_big_endian = true; in kvmppc_core_vcpu_create_hv()
2981 vcpu->arch.shared_big_endian = false; in kvmppc_core_vcpu_create_hv()
2986 err = kvmhv_nestedv2_vcpu_create(vcpu, &vcpu->arch.nestedv2_io); in kvmppc_core_vcpu_create_hv()
3000 spin_lock_init(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_create_hv()
3001 spin_lock_init(&vcpu->arch.tbacct_lock); in kvmppc_core_vcpu_create_hv()
3002 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_core_vcpu_create_hv()
3004 vcpu->arch.intr_msr = MSR_SF | MSR_ME; in kvmppc_core_vcpu_create_hv()
3029 vcpu->arch.hfscr |= HFSCR_TM; in kvmppc_core_vcpu_create_hv()
3031 vcpu->arch.hfscr_permitted = kvmppc_get_hfscr_hv(vcpu); in kvmppc_core_vcpu_create_hv()
3034 * PM, EBB, TM are demand-faulted so start with it clear. in kvmppc_core_vcpu_create_hv()
3040 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_core_vcpu_create_hv()
3042 init_waitqueue_head(&vcpu->arch.cpu_run); in kvmppc_core_vcpu_create_hv()
3044 mutex_lock(&kvm->lock); in kvmppc_core_vcpu_create_hv()
3046 err = -EINVAL; in kvmppc_core_vcpu_create_hv()
3048 if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { in kvmppc_core_vcpu_create_hv()
3052 BUG_ON(kvm->arch.smt_mode != 1); in kvmppc_core_vcpu_create_hv()
3056 core = id / kvm->arch.smt_mode; in kvmppc_core_vcpu_create_hv()
3059 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
3068 err = -ENOMEM; in kvmppc_core_vcpu_create_hv()
3070 id & ~(kvm->arch.smt_mode - 1)); in kvmppc_core_vcpu_create_hv()
3071 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
3072 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
3073 kvm->arch.online_vcores++; in kvmppc_core_vcpu_create_hv()
3074 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmppc_core_vcpu_create_hv()
3077 mutex_unlock(&kvm->lock); in kvmppc_core_vcpu_create_hv()
3082 spin_lock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
3083 ++vcore->num_threads; in kvmppc_core_vcpu_create_hv()
3084 spin_unlock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
3085 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
3086 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
3087 vcpu->arch.thread_cpu = -1; in kvmppc_core_vcpu_create_hv()
3088 vcpu->arch.prev_cpu = -1; in kvmppc_core_vcpu_create_hv()
3090 vcpu->arch.cpu_type = KVM_CPU_3S_64; in kvmppc_core_vcpu_create_hv()
3103 return -EINVAL; in kvmhv_set_smt_mode()
3105 return -EINVAL; in kvmhv_set_smt_mode()
3112 return -EINVAL; in kvmhv_set_smt_mode()
3121 mutex_lock(&kvm->lock); in kvmhv_set_smt_mode()
3122 err = -EBUSY; in kvmhv_set_smt_mode()
3123 if (!kvm->arch.online_vcores) { in kvmhv_set_smt_mode()
3124 kvm->arch.smt_mode = smt_mode; in kvmhv_set_smt_mode()
3125 kvm->arch.emul_smt_mode = esmt; in kvmhv_set_smt_mode()
3128 mutex_unlock(&kvm->lock); in kvmhv_set_smt_mode()
3135 if (vpa->pinned_addr) in unpin_vpa()
3136 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, in unpin_vpa()
3137 vpa->dirty); in unpin_vpa()
3142 spin_lock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3143 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); in kvmppc_core_vcpu_free_hv()
3144 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); in kvmppc_core_vcpu_free_hv()
3145 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); in kvmppc_core_vcpu_free_hv()
3146 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmppc_core_vcpu_free_hv()
3148 kvmhv_nestedv2_vcpu_free(vcpu, &vcpu->arch.nestedv2_io); in kvmppc_core_vcpu_free_hv()
3168 dec_nsec = tb_to_ns(kvmppc_dec_expires_host_tb(vcpu) - now); in kvmppc_set_timer()
3169 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); in kvmppc_set_timer()
3170 vcpu->arch.timer_running = 1; in kvmppc_set_timer()
3180 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_remove_runnable()
3182 spin_lock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3184 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - in kvmppc_remove_runnable()
3185 vcpu->arch.stolen_logged; in kvmppc_remove_runnable()
3186 vcpu->arch.busy_preempt = now; in kvmppc_remove_runnable()
3187 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_remove_runnable()
3188 spin_unlock_irq(&vcpu->arch.tbacct_lock); in kvmppc_remove_runnable()
3189 --vc->n_runnable; in kvmppc_remove_runnable()
3190 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); in kvmppc_remove_runnable()
3201 tpaca->kvm_hstate.kvm_vcpu = NULL; in kvmppc_grab_hwthread()
3202 tpaca->kvm_hstate.kvm_vcore = NULL; in kvmppc_grab_hwthread()
3203 tpaca->kvm_hstate.napping = 0; in kvmppc_grab_hwthread()
3205 tpaca->kvm_hstate.hwthread_req = 1; in kvmppc_grab_hwthread()
3217 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { in kvmppc_grab_hwthread()
3218 if (--timeout <= 0) { in kvmppc_grab_hwthread()
3220 return -EBUSY; in kvmppc_grab_hwthread()
3232 tpaca->kvm_hstate.hwthread_req = 0; in kvmppc_release_hwthread()
3233 tpaca->kvm_hstate.kvm_vcpu = NULL; in kvmppc_release_hwthread()
3234 tpaca->kvm_hstate.kvm_vcore = NULL; in kvmppc_release_hwthread()
3235 tpaca->kvm_hstate.kvm_split_mode = NULL; in kvmppc_release_hwthread()
3242 struct kvm_nested_guest *nested = vcpu->arch.nested; in radix_flush_cpu()
3247 need_tlb_flush = &nested->need_tlb_flush; in radix_flush_cpu()
3249 need_tlb_flush = &kvm->arch.need_tlb_flush; in radix_flush_cpu()
3277 struct kvm *kvm = vcpu->kvm; in do_migrate_away_vcpu()
3287 if (kvm->arch.lpcr & LPCR_GTSE) in do_migrate_away_vcpu()
3295 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmppc_prepare_radix_vcpu()
3296 struct kvm *kvm = vcpu->kvm; in kvmppc_prepare_radix_vcpu()
3303 prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; in kvmppc_prepare_radix_vcpu()
3305 prev_cpu = vcpu->arch.prev_cpu; in kvmppc_prepare_radix_vcpu()
3328 nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; in kvmppc_prepare_radix_vcpu()
3330 vcpu->arch.prev_cpu = pcpu; in kvmppc_prepare_radix_vcpu()
3339 cpu = vc->pcpu; in kvmppc_start_thread()
3341 if (vcpu->arch.timer_running) { in kvmppc_start_thread()
3342 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmppc_start_thread()
3343 vcpu->arch.timer_running = 0; in kvmppc_start_thread()
3345 cpu += vcpu->arch.ptid; in kvmppc_start_thread()
3346 vcpu->cpu = vc->pcpu; in kvmppc_start_thread()
3347 vcpu->arch.thread_cpu = cpu; in kvmppc_start_thread()
3350 tpaca->kvm_hstate.kvm_vcpu = vcpu; in kvmppc_start_thread()
3351 tpaca->kvm_hstate.ptid = cpu - vc->pcpu; in kvmppc_start_thread()
3352 tpaca->kvm_hstate.fake_suspend = 0; in kvmppc_start_thread()
3355 tpaca->kvm_hstate.kvm_vcore = vc; in kvmppc_start_thread()
3372 * for any threads that still have a non-NULL vcore ptr. in kvmppc_wait_for_nap()
3375 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) in kvmppc_wait_for_nap()
3385 if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) in kvmppc_wait_for_nap()
3391 * this core are off-line. Then grab the threads so they can't
3414 } while (--thr > 0); in on_primary_thread()
3439 spin_lock_init(&lp->lock); in init_vcore_lists()
3440 INIT_LIST_HEAD(&lp->list); in init_vcore_lists()
3450 vc->vcore_state = VCORE_PREEMPT; in kvmppc_vcore_preempt()
3451 vc->pcpu = smp_processor_id(); in kvmppc_vcore_preempt()
3452 if (vc->num_threads < threads_per_vcore(vc->kvm)) { in kvmppc_vcore_preempt()
3453 spin_lock(&lp->lock); in kvmppc_vcore_preempt()
3454 list_add_tail(&vc->preempt_list, &lp->list); in kvmppc_vcore_preempt()
3455 spin_unlock(&lp->lock); in kvmppc_vcore_preempt()
3469 if (!list_empty(&vc->preempt_list)) { in kvmppc_vcore_end_preempt()
3470 lp = &per_cpu(preempted_vcores, vc->pcpu); in kvmppc_vcore_end_preempt()
3471 spin_lock(&lp->lock); in kvmppc_vcore_end_preempt()
3472 list_del_init(&vc->preempt_list); in kvmppc_vcore_end_preempt()
3473 spin_unlock(&lp->lock); in kvmppc_vcore_end_preempt()
3475 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_end_preempt()
3491 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
3492 * respectively in 2-way micro-threading (split-core) mode on POWER8.
3499 cip->n_subcores = 1; in init_core_info()
3500 cip->max_subcore_threads = vc->num_threads; in init_core_info()
3501 cip->total_threads = vc->num_threads; in init_core_info()
3502 cip->subcore_threads[0] = vc->num_threads; in init_core_info()
3503 cip->vc[0] = vc; in init_core_info()
3509 * POWER9 "SMT4" cores are permanently in what is effectively a 4-way in subcore_config_ok()
3510 * split-core mode, with one thread per subcore. in subcore_config_ok()
3532 vc->entry_exit_map = 0; in init_vcore_to_run()
3533 vc->in_guest = 0; in init_vcore_to_run()
3534 vc->napping_threads = 0; in init_vcore_to_run()
3535 vc->conferring_threads = 0; in init_vcore_to_run()
3536 vc->tb_offset_applied = 0; in init_vcore_to_run()
3541 int n_threads = vc->num_threads; in can_dynamic_split()
3548 if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) in can_dynamic_split()
3551 if (n_threads < cip->max_subcore_threads) in can_dynamic_split()
3552 n_threads = cip->max_subcore_threads; in can_dynamic_split()
3553 if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) in can_dynamic_split()
3555 cip->max_subcore_threads = n_threads; in can_dynamic_split()
3557 sub = cip->n_subcores; in can_dynamic_split()
3558 ++cip->n_subcores; in can_dynamic_split()
3559 cip->total_threads += vc->num_threads; in can_dynamic_split()
3560 cip->subcore_threads[sub] = vc->num_threads; in can_dynamic_split()
3561 cip->vc[sub] = vc; in can_dynamic_split()
3563 list_del_init(&vc->preempt_list); in can_dynamic_split()
3575 if (cip->total_threads + pvc->num_threads > target_threads) in can_piggyback()
3587 if (signal_pending(vcpu->arch.run_task)) in prepare_threads()
3588 vcpu->arch.ret = -EINTR; in prepare_threads()
3589 else if (vcpu->arch.vpa.update_pending || in prepare_threads()
3590 vcpu->arch.slb_shadow.update_pending || in prepare_threads()
3591 vcpu->arch.dtl.update_pending) in prepare_threads()
3592 vcpu->arch.ret = RESUME_GUEST; in prepare_threads()
3596 wake_up(&vcpu->arch.cpu_run); in prepare_threads()
3605 spin_lock(&lp->lock); in collect_piggybacks()
3606 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { in collect_piggybacks()
3607 if (!spin_trylock(&pvc->lock)) in collect_piggybacks()
3610 if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { in collect_piggybacks()
3611 list_del_init(&pvc->preempt_list); in collect_piggybacks()
3612 if (pvc->runner == NULL) { in collect_piggybacks()
3613 pvc->vcore_state = VCORE_INACTIVE; in collect_piggybacks()
3616 spin_unlock(&pvc->lock); in collect_piggybacks()
3620 spin_unlock(&pvc->lock); in collect_piggybacks()
3624 pvc->vcore_state = VCORE_PIGGYBACK; in collect_piggybacks()
3625 if (cip->total_threads >= target_threads) in collect_piggybacks()
3628 spin_unlock(&lp->lock); in collect_piggybacks()
3637 for (sub = 0; sub < cip->n_subcores; ++sub) { in recheck_signals_and_mmu()
3638 vc = cip->vc[sub]; in recheck_signals_and_mmu()
3639 if (!vc->kvm->arch.mmu_ready) in recheck_signals_and_mmu()
3642 if (signal_pending(vcpu->arch.run_task)) in recheck_signals_and_mmu()
3655 spin_lock(&vc->lock); in post_guest_process()
3665 spin_unlock(&vc->lock); in post_guest_process()
3674 if (vcpu->arch.trap) in post_guest_process()
3676 vcpu->arch.run_task); in post_guest_process()
3678 vcpu->arch.ret = ret; in post_guest_process()
3679 vcpu->arch.trap = 0; in post_guest_process()
3681 spin_lock(&vc->lock); in post_guest_process()
3682 if (is_kvmppc_resume_guest(vcpu->arch.ret)) { in post_guest_process()
3683 if (vcpu->arch.pending_exceptions) in post_guest_process()
3685 if (vcpu->arch.ceded) in post_guest_process()
3691 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3697 } else if (vc->runner) { in post_guest_process()
3698 vc->vcore_state = VCORE_PREEMPT; in post_guest_process()
3701 vc->vcore_state = VCORE_INACTIVE; in post_guest_process()
3703 if (vc->n_runnable > 0 && vc->runner == NULL) { in post_guest_process()
3705 i = -1; in post_guest_process()
3707 wake_up(&vcpu->arch.cpu_run); in post_guest_process()
3710 spin_unlock(&vc->lock); in post_guest_process()
3730 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; in kvmppc_clear_host_core()
3751 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; in kvmppc_set_host_core()
3759 local_paca->irq_happened |= PACA_IRQ_EE; in set_irq_happened()
3762 local_paca->irq_happened |= PACA_IRQ_DBELL; in set_irq_happened()
3765 local_paca->irq_happened |= PACA_IRQ_HMI; in set_irq_happened()
3775 * Called with vc->lock held.
3804 /* if the runner is no longer runnable, let the caller pick a new one */ in kvmppc_run_core()
3805 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_core()
3812 vc->preempt_tb = TB_NIL; in kvmppc_run_core()
3819 controlled_threads = threads_per_vcore(vc->kvm); in kvmppc_run_core()
3827 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { in kvmppc_run_core()
3829 vcpu->arch.ret = -EBUSY; in kvmppc_run_core()
3831 wake_up(&vcpu->arch.cpu_run); in kvmppc_run_core()
3845 if (vc->num_threads < target_threads) in kvmppc_run_core()
3849 * Hard-disable interrupts, and check resched flag and signals. in kvmppc_run_core()
3860 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
3866 spin_unlock(&pvc->lock); in kvmppc_run_core()
3875 /* Decide on micro-threading (split-core) mode */ in kvmppc_run_core()
3913 paca->kvm_hstate.napping = 0; in kvmppc_run_core()
3914 paca->kvm_hstate.kvm_split_mode = sip; in kvmppc_run_core()
3917 /* Initiate micro-threading (split-core) on POWER8 if required */ in kvmppc_run_core()
3940 int n_online = atomic_read(&vc->online_count); in kvmppc_run_core()
3943 * Use the 8-thread value if we're doing split-core in kvmppc_run_core()
3959 pvc->pcpu = pcpu + thr; in kvmppc_run_core()
3963 * It updates vcpu->cpu and vcpu->arch.thread_cpu in kvmppc_run_core()
3972 if (!vcpu->arch.ptid) in kvmppc_run_core()
3974 active |= 1 << (thr + vcpu->arch.ptid); in kvmppc_run_core()
3991 * When doing micro-threading, poke the inactive threads as well. in kvmppc_run_core()
4002 vc->vcore_state = VCORE_RUNNING; in kvmppc_run_core()
4008 spin_unlock(&core_info.vc[sub]->lock); in kvmppc_run_core()
4012 srcu_idx = srcu_read_lock(&vc->kvm->srcu); in kvmppc_run_core()
4022 srcu_read_unlock(&vc->kvm->srcu, srcu_idx); in kvmppc_run_core()
4026 spin_lock(&vc->lock); in kvmppc_run_core()
4028 vc->vcore_state = VCORE_EXITING; in kvmppc_run_core()
4033 /* Return to whole-core mode if we split the core earlier */ in kvmppc_run_core()
4072 if (sip && sip->napped[i]) in kvmppc_run_core()
4076 spin_unlock(&vc->lock); in kvmppc_run_core()
4088 spin_lock(&vc->lock); in kvmppc_run_core()
4091 vc->vcore_state = VCORE_INACTIVE; in kvmppc_run_core()
4103 struct lppaca *lp = vcpu->arch.vpa.pinned_addr; in vcpu_vpa_increment_dispatch()
4105 u32 yield_count = be32_to_cpu(lp->yield_count) + 1; in vcpu_vpa_increment_dispatch()
4106 lp->yield_count = cpu_to_be32(yield_count); in vcpu_vpa_increment_dispatch()
4107 vcpu->arch.vpa.dirty = 1; in vcpu_vpa_increment_dispatch()
4120 get_lppaca()->l2_counters_enable; in kvmhv_get_l2_counters_status()
4161 l1_to_l2_ns = tb_to_ns(be64_to_cpu(lp->l1_to_l2_cs_tb)); in do_trace_nested_cs_time()
4162 l2_to_l1_ns = tb_to_ns(be64_to_cpu(lp->l2_to_l1_cs_tb)); in do_trace_nested_cs_time()
4163 l2_runtime_ns = tb_to_ns(be64_to_cpu(lp->l2_runtime_tb)); in do_trace_nested_cs_time()
4164 trace_kvmppc_vcpu_stats(vcpu, l1_to_l2_ns - *l1_to_l2_cs_ptr, in do_trace_nested_cs_time()
4165 l2_to_l1_ns - *l2_to_l1_cs_ptr, in do_trace_nested_cs_time()
4166 l2_runtime_ns - *l2_runtime_agg_ptr); in do_trace_nested_cs_time()
4170 vcpu->arch.l1_to_l2_cs = l1_to_l2_ns; in do_trace_nested_cs_time()
4171 vcpu->arch.l2_to_l1_cs = l2_to_l1_ns; in do_trace_nested_cs_time()
4172 vcpu->arch.l2_runtime_agg = l2_runtime_ns; in do_trace_nested_cs_time()
4177 return tb_to_ns(be64_to_cpu(get_lppaca()->l1_to_l2_cs_tb)); in kvmhv_get_l1_to_l2_cs_time()
4183 return tb_to_ns(be64_to_cpu(get_lppaca()->l2_to_l1_cs_tb)); in kvmhv_get_l2_to_l1_cs_time()
4189 return tb_to_ns(be64_to_cpu(get_lppaca()->l2_runtime_tb)); in kvmhv_get_l2_runtime_agg()
4198 vcpu = local_paca->kvm_hstate.kvm_vcpu; in kvmhv_get_l1_to_l2_cs_time_vcpu()
4200 arch = &vcpu->arch; in kvmhv_get_l1_to_l2_cs_time_vcpu()
4201 return arch->l1_to_l2_cs; in kvmhv_get_l1_to_l2_cs_time_vcpu()
4213 vcpu = local_paca->kvm_hstate.kvm_vcpu; in kvmhv_get_l2_to_l1_cs_time_vcpu()
4215 arch = &vcpu->arch; in kvmhv_get_l2_to_l1_cs_time_vcpu()
4216 return arch->l2_to_l1_cs; in kvmhv_get_l2_to_l1_cs_time_vcpu()
4228 vcpu = local_paca->kvm_hstate.kvm_vcpu; in kvmhv_get_l2_runtime_agg_vcpu()
4230 arch = &vcpu->arch; in kvmhv_get_l2_runtime_agg_vcpu()
4231 return arch->l2_runtime_agg; in kvmhv_get_l2_runtime_agg_vcpu()
4257 if (vcpu->arch.doorbell_request) { in kvmhv_vcpu_entry_nestedv2()
4258 vcpu->arch.doorbell_request = 0; in kvmhv_vcpu_entry_nestedv2()
4262 io = &vcpu->arch.nestedv2_io; in kvmhv_vcpu_entry_nestedv2()
4271 return -EINVAL; in kvmhv_vcpu_entry_nestedv2()
4273 kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr); in kvmhv_vcpu_entry_nestedv2()
4275 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_nestedv2()
4276 rc = plpar_guest_run_vcpu(0, vcpu->kvm->arch.lpid, vcpu->vcpu_id, in kvmhv_vcpu_entry_nestedv2()
4287 return -EINVAL; in kvmhv_vcpu_entry_nestedv2()
4289 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_nestedv2()
4292 kvmppc_gsm_reset(io->vcpu_message); in kvmhv_vcpu_entry_nestedv2()
4293 kvmppc_gsm_reset(io->vcore_message); in kvmhv_vcpu_entry_nestedv2()
4294 kvmppc_gsbm_zero(&io->valids); in kvmhv_vcpu_entry_nestedv2()
4298 return -EINVAL; in kvmhv_vcpu_entry_nestedv2()
4338 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4339 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); in kvmhv_vcpu_entry_p9_nested()
4344 vcpu->arch.regs.msr = vcpu->arch.shregs.msr; in kvmhv_vcpu_entry_p9_nested()
4346 if (vcpu->arch.nested) { in kvmhv_vcpu_entry_p9_nested()
4347 hvregs.lpid = vcpu->arch.nested->shadow_lpid; in kvmhv_vcpu_entry_p9_nested()
4348 hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4350 hvregs.lpid = vcpu->kvm->arch.lpid; in kvmhv_vcpu_entry_p9_nested()
4351 hvregs.vcpu_token = vcpu->vcpu_id; in kvmhv_vcpu_entry_p9_nested()
4361 if (vcpu->arch.doorbell_request) in kvmhv_vcpu_entry_p9_nested()
4362 vcpu->arch.doorbell_request = 0; in kvmhv_vcpu_entry_p9_nested()
4378 mtspr(SPRN_DEC, kvmppc_dec_expires_host_tb(vcpu) - *tb); in kvmhv_vcpu_entry_p9_nested()
4380 mtspr(SPRN_DAR, vcpu->arch.shregs.dar); in kvmhv_vcpu_entry_p9_nested()
4381 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); in kvmhv_vcpu_entry_p9_nested()
4383 accumulate_time(vcpu, &vcpu->arch.in_guest); in kvmhv_vcpu_entry_p9_nested()
4385 __pa(&vcpu->arch.regs)); in kvmhv_vcpu_entry_p9_nested()
4386 accumulate_time(vcpu, &vcpu->arch.guest_exit); in kvmhv_vcpu_entry_p9_nested()
4389 vcpu->arch.shregs.msr = vcpu->arch.regs.msr; in kvmhv_vcpu_entry_p9_nested()
4390 vcpu->arch.shregs.dar = mfspr(SPRN_DAR); in kvmhv_vcpu_entry_p9_nested()
4391 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); in kvmhv_vcpu_entry_p9_nested()
4392 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); in kvmhv_vcpu_entry_p9_nested()
4400 vcpu->arch.dec_expires = dec + (*tb + kvmppc_get_tb_offset(vcpu)); in kvmhv_vcpu_entry_p9_nested()
4405 if (vcpu->arch.psscr != host_psscr) in kvmhv_vcpu_entry_p9_nested()
4417 struct kvm *kvm = vcpu->kvm; in kvmhv_p9_guest_entry()
4418 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_p9_guest_entry()
4430 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4477 vcpu->arch.ceded = 0; in kvmhv_p9_guest_entry()
4509 vcpu->arch.slb_max = 0; in kvmhv_p9_guest_entry()
4526 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); in kvmppc_wait_for_exec()
4527 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_wait_for_exec()
4528 spin_unlock(&vc->lock); in kvmppc_wait_for_exec()
4530 spin_lock(&vc->lock); in kvmppc_wait_for_exec()
4532 finish_wait(&vcpu->arch.cpu_run, &wait); in kvmppc_wait_for_exec()
4540 vc->halt_poll_ns *= halt_poll_ns_grow; in grow_halt_poll_ns()
4541 if (vc->halt_poll_ns < halt_poll_ns_grow_start) in grow_halt_poll_ns()
4542 vc->halt_poll_ns = halt_poll_ns_grow_start; in grow_halt_poll_ns()
4548 vc->halt_poll_ns = 0; in shrink_halt_poll_ns()
4550 vc->halt_poll_ns /= halt_poll_ns_shrink; in shrink_halt_poll_ns()
4558 return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < in xive_interrupt_pending()
4559 vcpu->arch.xive_saved_state.cppr; in xive_interrupt_pending()
4570 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || in kvmppc_vcpu_woken()
4579 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) in kvmppc_vcpu_check_block()
4586 * exceptions or are no longer ceded
4603 * or external interrupt to one of the vcpus. vc->lock is held.
4615 if (vc->halt_poll_ns) { in kvmppc_vcore_blocked()
4616 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); in kvmppc_vcore_blocked()
4617 ++vc->runner->stat.generic.halt_attempted_poll; in kvmppc_vcore_blocked()
4619 vc->vcore_state = VCORE_POLLING; in kvmppc_vcore_blocked()
4620 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
4630 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
4631 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
4634 ++vc->runner->stat.generic.halt_successful_poll; in kvmppc_vcore_blocked()
4639 prepare_to_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
4642 finish_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
4645 if (vc->halt_poll_ns) in kvmppc_vcore_blocked()
4646 ++vc->runner->stat.generic.halt_successful_poll; in kvmppc_vcore_blocked()
4652 vc->vcore_state = VCORE_SLEEPING; in kvmppc_vcore_blocked()
4653 trace_kvmppc_vcore_blocked(vc->runner, 0); in kvmppc_vcore_blocked()
4654 spin_unlock(&vc->lock); in kvmppc_vcore_blocked()
4656 finish_rcuwait(&vc->wait); in kvmppc_vcore_blocked()
4657 spin_lock(&vc->lock); in kvmppc_vcore_blocked()
4658 vc->vcore_state = VCORE_INACTIVE; in kvmppc_vcore_blocked()
4659 trace_kvmppc_vcore_blocked(vc->runner, 1); in kvmppc_vcore_blocked()
4660 ++vc->runner->stat.halt_successful_wait; in kvmppc_vcore_blocked()
4665 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); in kvmppc_vcore_blocked()
4669 vc->runner->stat.generic.halt_wait_ns += in kvmppc_vcore_blocked()
4670 ktime_to_ns(cur) - ktime_to_ns(start_wait); in kvmppc_vcore_blocked()
4672 vc->runner->stat.generic.halt_wait_hist, in kvmppc_vcore_blocked()
4673 ktime_to_ns(cur) - ktime_to_ns(start_wait)); in kvmppc_vcore_blocked()
4675 if (vc->halt_poll_ns) { in kvmppc_vcore_blocked()
4676 vc->runner->stat.generic.halt_poll_fail_ns += in kvmppc_vcore_blocked()
4677 ktime_to_ns(start_wait) - in kvmppc_vcore_blocked()
4680 vc->runner->stat.generic.halt_poll_fail_hist, in kvmppc_vcore_blocked()
4681 ktime_to_ns(start_wait) - in kvmppc_vcore_blocked()
4686 if (vc->halt_poll_ns) { in kvmppc_vcore_blocked()
4687 vc->runner->stat.generic.halt_poll_success_ns += in kvmppc_vcore_blocked()
4688 ktime_to_ns(cur) - in kvmppc_vcore_blocked()
4691 vc->runner->stat.generic.halt_poll_success_hist, in kvmppc_vcore_blocked()
4692 ktime_to_ns(cur) - ktime_to_ns(start_poll)); in kvmppc_vcore_blocked()
4698 if (block_ns <= vc->halt_poll_ns) in kvmppc_vcore_blocked()
4701 else if (vc->halt_poll_ns && block_ns > halt_poll_ns) in kvmppc_vcore_blocked()
4704 else if (vc->halt_poll_ns < halt_poll_ns && in kvmppc_vcore_blocked()
4707 if (vc->halt_poll_ns > halt_poll_ns) in kvmppc_vcore_blocked()
4708 vc->halt_poll_ns = halt_poll_ns; in kvmppc_vcore_blocked()
4710 vc->halt_poll_ns = 0; in kvmppc_vcore_blocked()
4722 struct kvm *kvm = vcpu->kvm; in kvmhv_setup_mmu()
4724 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4725 if (!kvm->arch.mmu_ready) { in kvmhv_setup_mmu()
4731 kvm->arch.mmu_ready = 1; in kvmhv_setup_mmu()
4734 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_setup_mmu()
4740 struct kvm_run *run = vcpu->run; in kvmppc_run_vcpu()
4747 run->exit_reason = 0; in kvmppc_run_vcpu()
4748 vcpu->arch.ret = RESUME_GUEST; in kvmppc_run_vcpu()
4749 vcpu->arch.trap = 0; in kvmppc_run_vcpu()
4755 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4756 spin_lock(&vc->lock); in kvmppc_run_vcpu()
4757 vcpu->arch.ceded = 0; in kvmppc_run_vcpu()
4758 vcpu->arch.run_task = current; in kvmppc_run_vcpu()
4759 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); in kvmppc_run_vcpu()
4760 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmppc_run_vcpu()
4761 vcpu->arch.busy_preempt = TB_NIL; in kvmppc_run_vcpu()
4762 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); in kvmppc_run_vcpu()
4763 ++vc->n_runnable; in kvmppc_run_vcpu()
4771 if ((vc->vcore_state == VCORE_PIGGYBACK || in kvmppc_run_vcpu()
4772 vc->vcore_state == VCORE_RUNNING) && in kvmppc_run_vcpu()
4777 } else if (vc->vcore_state == VCORE_SLEEPING) { in kvmppc_run_vcpu()
4778 rcuwait_wake_up(&vc->wait); in kvmppc_run_vcpu()
4783 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4786 if (!vcpu->kvm->arch.mmu_ready) { in kvmppc_run_vcpu()
4787 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
4789 spin_lock(&vc->lock); in kvmppc_run_vcpu()
4791 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmppc_run_vcpu()
4792 run->fail_entry. in kvmppc_run_vcpu()
4794 vcpu->arch.ret = r; in kvmppc_run_vcpu()
4799 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
4802 if (vc->vcore_state != VCORE_INACTIVE) { in kvmppc_run_vcpu()
4808 if (signal_pending(v->arch.run_task)) { in kvmppc_run_vcpu()
4810 v->stat.signal_exits++; in kvmppc_run_vcpu()
4811 v->run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
4812 v->arch.ret = -EINTR; in kvmppc_run_vcpu()
4813 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4816 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) in kvmppc_run_vcpu()
4821 n_ceded += v->arch.ceded; in kvmppc_run_vcpu()
4823 v->arch.ceded = 0; in kvmppc_run_vcpu()
4825 vc->runner = vcpu; in kvmppc_run_vcpu()
4826 if (n_ceded == vc->n_runnable) { in kvmppc_run_vcpu()
4831 cond_resched_lock(&vc->lock); in kvmppc_run_vcpu()
4832 if (vc->vcore_state == VCORE_PREEMPT) in kvmppc_run_vcpu()
4837 vc->runner = NULL; in kvmppc_run_vcpu()
4840 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && in kvmppc_run_vcpu()
4841 (vc->vcore_state == VCORE_RUNNING || in kvmppc_run_vcpu()
4842 vc->vcore_state == VCORE_EXITING || in kvmppc_run_vcpu()
4843 vc->vcore_state == VCORE_PIGGYBACK)) in kvmppc_run_vcpu()
4846 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) in kvmppc_run_vcpu()
4849 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { in kvmppc_run_vcpu()
4851 vcpu->stat.signal_exits++; in kvmppc_run_vcpu()
4852 run->exit_reason = KVM_EXIT_INTR; in kvmppc_run_vcpu()
4853 vcpu->arch.ret = -EINTR; in kvmppc_run_vcpu()
4856 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { in kvmppc_run_vcpu()
4858 i = -1; in kvmppc_run_vcpu()
4860 wake_up(&v->arch.cpu_run); in kvmppc_run_vcpu()
4864 spin_unlock(&vc->lock); in kvmppc_run_vcpu()
4865 return vcpu->arch.ret; in kvmppc_run_vcpu()
4872 struct kvm_run *run = vcpu->run; in kvmhv_run_single_vcpu()
4876 struct kvm *kvm = vcpu->kvm; in kvmhv_run_single_vcpu()
4877 struct kvm_nested_guest *nested = vcpu->arch.nested; in kvmhv_run_single_vcpu()
4883 run->exit_reason = 0; in kvmhv_run_single_vcpu()
4884 vcpu->arch.ret = RESUME_GUEST; in kvmhv_run_single_vcpu()
4885 vcpu->arch.trap = 0; in kvmhv_run_single_vcpu()
4887 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4888 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
4889 vcpu->arch.run_task = current; in kvmhv_run_single_vcpu()
4890 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; in kvmhv_run_single_vcpu()
4893 if (unlikely(!kvm->arch.mmu_ready)) { in kvmhv_run_single_vcpu()
4896 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmhv_run_single_vcpu()
4897 run->fail_entry.hardware_entry_failure_reason = 0; in kvmhv_run_single_vcpu()
4898 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
4913 /* flags save not required, but irq_pmu has no disable/enable API */ in kvmhv_run_single_vcpu()
4916 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; in kvmhv_run_single_vcpu()
4920 if (need_resched() || !kvm->arch.mmu_ready) in kvmhv_run_single_vcpu()
4923 vcpu->cpu = pcpu; in kvmhv_run_single_vcpu()
4924 vcpu->arch.thread_cpu = pcpu; in kvmhv_run_single_vcpu()
4925 vc->pcpu = pcpu; in kvmhv_run_single_vcpu()
4926 local_paca->kvm_hstate.kvm_vcpu = vcpu; in kvmhv_run_single_vcpu()
4927 local_paca->kvm_hstate.ptid = 0; in kvmhv_run_single_vcpu()
4928 local_paca->kvm_hstate.fake_suspend = 0; in kvmhv_run_single_vcpu()
4941 &vcpu->arch.pending_exceptions) || in kvmhv_run_single_vcpu()
4955 * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit in kvmhv_run_single_vcpu()
4956 * unexpectedly set - for e.g. during NMI handling when all register in kvmhv_run_single_vcpu()
4966 } else if (vcpu->arch.pending_exceptions || in kvmhv_run_single_vcpu()
4968 vcpu->arch.ret = RESUME_HOST; in kvmhv_run_single_vcpu()
4972 if (vcpu->arch.timer_running) { in kvmhv_run_single_vcpu()
4973 hrtimer_try_to_cancel(&vcpu->arch.dec_timer); in kvmhv_run_single_vcpu()
4974 vcpu->arch.timer_running = 0; in kvmhv_run_single_vcpu()
4985 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_run_single_vcpu()
4991 vcpu->arch.trap = trap; in kvmhv_run_single_vcpu()
4996 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_run_single_vcpu()
5000 vcpu->cpu = -1; in kvmhv_run_single_vcpu()
5001 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
5002 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
5041 vcpu->arch.ret = r; in kvmhv_run_single_vcpu()
5050 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
5051 run->exit_reason = KVM_EXIT_INTR; in kvmhv_run_single_vcpu()
5052 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
5065 vcpu->arch.ceded = 0; in kvmhv_run_single_vcpu()
5070 return vcpu->arch.ret; in kvmhv_run_single_vcpu()
5073 vcpu->stat.signal_exits++; in kvmhv_run_single_vcpu()
5074 run->exit_reason = KVM_EXIT_INTR; in kvmhv_run_single_vcpu()
5075 vcpu->arch.ret = -EINTR; in kvmhv_run_single_vcpu()
5077 vcpu->cpu = -1; in kvmhv_run_single_vcpu()
5078 vcpu->arch.thread_cpu = -1; in kvmhv_run_single_vcpu()
5079 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmhv_run_single_vcpu()
5087 struct kvm_run *run = vcpu->run; in kvmppc_vcpu_run_hv()
5093 start_timing(vcpu, &vcpu->arch.vcpu_entry); in kvmppc_vcpu_run_hv()
5095 if (!vcpu->arch.sane) { in kvmppc_vcpu_run_hv()
5096 run->exit_reason = KVM_EXIT_INTERNAL_ERROR; in kvmppc_vcpu_run_hv()
5097 return -EINVAL; in kvmppc_vcpu_run_hv()
5100 /* No need to go into the guest when all we'll do is come back out */ in kvmppc_vcpu_run_hv()
5102 run->exit_reason = KVM_EXIT_INTR; in kvmppc_vcpu_run_hv()
5103 return -EINTR; in kvmppc_vcpu_run_hv()
5111 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && in kvmppc_vcpu_run_hv()
5112 (current->thread.regs->msr & MSR_TM)) { in kvmppc_vcpu_run_hv()
5113 if (MSR_TM_ACTIVE(current->thread.regs->msr)) { in kvmppc_vcpu_run_hv()
5114 run->exit_reason = KVM_EXIT_FAIL_ENTRY; in kvmppc_vcpu_run_hv()
5115 run->fail_entry.hardware_entry_failure_reason = 0; in kvmppc_vcpu_run_hv()
5116 return -EINVAL; in kvmppc_vcpu_run_hv()
5125 if (!vcpu->arch.online) { in kvmppc_vcpu_run_hv()
5126 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
5127 vcpu->arch.online = 1; in kvmppc_vcpu_run_hv()
5132 kvm = vcpu->kvm; in kvmppc_vcpu_run_hv()
5133 atomic_inc(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
5155 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
5156 vcpu->arch.pgdir = kvm->mm->pgd; in kvmppc_vcpu_run_hv()
5157 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; in kvmppc_vcpu_run_hv()
5160 accumulate_time(vcpu, &vcpu->arch.guest_entry); in kvmppc_vcpu_run_hv()
5163 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
5167 if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { in kvmppc_vcpu_run_hv()
5168 accumulate_time(vcpu, &vcpu->arch.hcall); in kvmppc_vcpu_run_hv()
5185 accumulate_time(vcpu, &vcpu->arch.pg_fault); in kvmppc_vcpu_run_hv()
5186 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_vcpu_run_hv()
5188 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); in kvmppc_vcpu_run_hv()
5189 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_vcpu_run_hv()
5197 accumulate_time(vcpu, &vcpu->arch.vcpu_exit); in kvmppc_vcpu_run_hv()
5199 vcpu->arch.state = KVMPPC_VCPU_NOTREADY; in kvmppc_vcpu_run_hv()
5200 atomic_dec(&kvm->arch.vcpus_running); in kvmppc_vcpu_run_hv()
5212 (*sps)->page_shift = shift; in kvmppc_add_seg_page_size()
5213 (*sps)->slb_enc = sllp; in kvmppc_add_seg_page_size()
5214 (*sps)->enc[0].page_shift = shift; in kvmppc_add_seg_page_size()
5215 (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); in kvmppc_add_seg_page_size()
5221 if (penc != -1) { in kvmppc_add_seg_page_size()
5222 (*sps)->enc[1].page_shift = 24; in kvmppc_add_seg_page_size()
5223 (*sps)->enc[1].pte_enc = penc; in kvmppc_add_seg_page_size()
5239 info->data_keys = 32; in kvm_vm_ioctl_get_smmu_info_hv()
5240 info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; in kvm_vm_ioctl_get_smmu_info_hv()
5242 /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ in kvm_vm_ioctl_get_smmu_info_hv()
5243 info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; in kvm_vm_ioctl_get_smmu_info_hv()
5244 info->slb_size = 32; in kvm_vm_ioctl_get_smmu_info_hv()
5246 /* We only support these sizes for now, and no muti-size segments */ in kvm_vm_ioctl_get_smmu_info_hv()
5247 sps = &info->sps[0]; in kvm_vm_ioctl_get_smmu_info_hv()
5254 info->flags |= KVM_PPC_NO_HASH; in kvm_vm_ioctl_get_smmu_info_hv()
5272 mutex_lock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5274 r = -EINVAL; in kvm_vm_ioctl_get_dirty_log_hv()
5275 if (log->slot >= KVM_USER_MEM_SLOTS) in kvm_vm_ioctl_get_dirty_log_hv()
5279 memslot = id_to_memslot(slots, log->slot); in kvm_vm_ioctl_get_dirty_log_hv()
5280 r = -ENOENT; in kvm_vm_ioctl_get_dirty_log_hv()
5281 if (!memslot || !memslot->dirty_bitmap) in kvm_vm_ioctl_get_dirty_log_hv()
5289 buf = memslot->dirty_bitmap + n / sizeof(long); in kvm_vm_ioctl_get_dirty_log_hv()
5305 p = memslot->dirty_bitmap; in kvm_vm_ioctl_get_dirty_log_hv()
5312 spin_lock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5313 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
5314 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); in kvm_vm_ioctl_get_dirty_log_hv()
5315 spin_unlock(&vcpu->arch.vpa_update_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5318 r = -EFAULT; in kvm_vm_ioctl_get_dirty_log_hv()
5319 if (copy_to_user(log->dirty_bitmap, buf, n)) in kvm_vm_ioctl_get_dirty_log_hv()
5324 mutex_unlock(&kvm->slots_lock); in kvm_vm_ioctl_get_dirty_log_hv()
5330 vfree(slot->arch.rmap); in kvmppc_core_free_memslot_hv()
5331 slot->arch.rmap = NULL; in kvmppc_core_free_memslot_hv()
5340 unsigned long size = array_size(new->npages, sizeof(*new->arch.rmap)); in kvmppc_core_prepare_memory_region_hv()
5343 return -ENOMEM; in kvmppc_core_prepare_memory_region_hv()
5345 new->arch.rmap = vzalloc(size); in kvmppc_core_prepare_memory_region_hv()
5346 if (!new->arch.rmap) in kvmppc_core_prepare_memory_region_hv()
5347 return -ENOMEM; in kvmppc_core_prepare_memory_region_hv()
5349 new->arch.rmap = old->arch.rmap; in kvmppc_core_prepare_memory_region_hv()
5363 * MMIO be no longer emulated MMIO, so invalidate in kvmppc_core_commit_memory_region_hv()
5367 atomic64_inc(&kvm->arch.mmio_update); in kvmppc_core_commit_memory_region_hv()
5372 * flush shadow mappings. For KVM_MR_CREATE we have no in kvmppc_core_commit_memory_region_hv()
5377 * to get rid of any THP PTEs in the partition-scoped page tables in kvmppc_core_commit_memory_region_hv()
5383 ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) in kvmppc_core_commit_memory_region_hv()
5388 if (!kvm->arch.secure_guest) in kvmppc_core_commit_memory_region_hv()
5409 * Update LPCR values in kvm->arch and in vcores.
5410 * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion
5411 * of kvm->arch.lpcr update).
5418 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
5421 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5424 struct kvmppc_vcore *vc = kvm->arch.vcores[i]; in kvmppc_update_lpcr()
5428 spin_lock(&vc->lock); in kvmppc_update_lpcr()
5429 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5430 verify_lpcr(kvm, vc->lpcr); in kvmppc_update_lpcr()
5431 spin_unlock(&vc->lock); in kvmppc_update_lpcr()
5432 if (++cores_done >= kvm->arch.online_vcores) in kvmppc_update_lpcr()
5450 /* PS field - page size for VRMA */ in kvmppc_setup_partition_table()
5451 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | in kvmppc_setup_partition_table()
5452 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); in kvmppc_setup_partition_table()
5454 dw0 |= kvm->arch.sdr1; in kvmppc_setup_partition_table()
5457 dw1 = kvm->arch.process_table; in kvmppc_setup_partition_table()
5460 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; in kvmppc_setup_partition_table()
5461 dw1 = PATB_GR | kvm->arch.process_table; in kvmppc_setup_partition_table()
5463 kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); in kvmppc_setup_partition_table()
5467 * Set up HPT (hashed page table) and RMA (real-mode area).
5468 * Must be called with kvm->arch.mmu_setup_lock held.
5473 struct kvm *kvm = vcpu->kvm; in kvmppc_hv_setup_htab_rma()
5482 if (!kvm->arch.hpt.virt) { in kvmppc_hv_setup_htab_rma()
5490 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER) in kvmppc_hv_setup_htab_rma()
5502 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmppc_hv_setup_htab_rma()
5506 err = -EINVAL; in kvmppc_hv_setup_htab_rma()
5507 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) in kvmppc_hv_setup_htab_rma()
5511 hva = memslot->userspace_addr; in kvmppc_hv_setup_htab_rma()
5512 mmap_read_lock(kvm->mm); in kvmppc_hv_setup_htab_rma()
5513 vma = vma_lookup(kvm->mm, hva); in kvmppc_hv_setup_htab_rma()
5514 if (!vma || (vma->vm_flags & VM_IO)) in kvmppc_hv_setup_htab_rma()
5519 mmap_read_unlock(kvm->mm); in kvmppc_hv_setup_htab_rma()
5531 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | in kvmppc_hv_setup_htab_rma()
5538 /* the -4 is to account for senc values starting at 0x10 */ in kvmppc_hv_setup_htab_rma()
5539 lpcr = senc << (LPCR_VRMASD_SH - 4); in kvmppc_hv_setup_htab_rma()
5543 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
5547 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmppc_hv_setup_htab_rma()
5552 mmap_read_unlock(kvm->mm); in kvmppc_hv_setup_htab_rma()
5557 * Must be called with kvm->arch.mmu_setup_lock held and
5558 * mmu_ready = 0 and no vcpus running.
5567 kvm->arch.process_table = 0; in kvmppc_switch_mmu_to_hpt()
5569 spin_lock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_hpt()
5570 kvm->arch.radix = 0; in kvmppc_switch_mmu_to_hpt()
5571 spin_unlock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_hpt()
5584 * Must be called with kvm->arch.mmu_setup_lock held and
5585 * mmu_ready = 0 and no vcpus running.
5597 spin_lock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_radix()
5598 kvm->arch.radix = 1; in kvmppc_switch_mmu_to_radix()
5599 spin_unlock(&kvm->mmu_lock); in kvmppc_switch_mmu_to_radix()
5600 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_switch_mmu_to_radix()
5607 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_switch_mmu_to_radix()
5617 * Allocate a per-core structure for managing state about which cores are
5622 * It is only freed when the kvm-hv module is unloaded.
5645 ops->rm_core = kzalloc(size, GFP_KERNEL); in kvmppc_alloc_host_rm_ops()
5647 if (!ops->rm_core) { in kvmppc_alloc_host_rm_ops()
5659 ops->rm_core[core].rm_state.in_host = 1; in kvmppc_alloc_host_rm_ops()
5662 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; in kvmppc_alloc_host_rm_ops()
5667 * Do an atomic assignment (no locks used here), but if someone in kvmppc_alloc_host_rm_ops()
5675 kfree(ops->rm_core); in kvmppc_alloc_host_rm_ops()
5691 kfree(kvmppc_host_rm_ops_hv->rm_core); in kvmppc_free_host_rm_ops()
5703 mutex_init(&kvm->arch.uvmem_lock); in kvmppc_core_init_vm_hv()
5704 INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); in kvmppc_core_init_vm_hv()
5705 mutex_init(&kvm->arch.mmu_setup_lock); in kvmppc_core_init_vm_hv()
5712 return -ENOMEM; in kvmppc_core_init_vm_hv()
5713 kvm->arch.lpid = lpid; in kvmppc_core_init_vm_hv()
5733 return -EINVAL; in kvmppc_core_init_vm_hv()
5736 return -ENOMEM; in kvmppc_core_init_vm_hv()
5738 return -EPERM; in kvmppc_core_init_vm_hv()
5740 return -EBUSY; in kvmppc_core_init_vm_hv()
5742 kvm->arch.lpid = guest_id; in kvmppc_core_init_vm_hv()
5754 cpumask_setall(&kvm->arch.need_tlb_flush); in kvmppc_core_init_vm_hv()
5757 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, in kvmppc_core_init_vm_hv()
5758 sizeof(kvm->arch.enabled_hcalls)); in kvmppc_core_init_vm_hv()
5761 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); in kvmppc_core_init_vm_hv()
5765 kvm->arch.host_lpid = mfspr(SPRN_LPID); in kvmppc_core_init_vm_hv()
5766 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5777 kvm->arch.vrma_slb_v = SLB_VSID_B_1T | in kvmppc_core_init_vm_hv()
5786 * be unnecessary but better safe than sorry in case we re-enable in kvmppc_core_init_vm_hv()
5805 kvm->arch.radix = 1; in kvmppc_core_init_vm_hv()
5806 kvm->arch.mmu_ready = 1; in kvmppc_core_init_vm_hv()
5811 (kvm->arch.host_lpcr & LPCR_HAIL)) in kvmppc_core_init_vm_hv()
5816 plpar_guest_delete(0, kvm->arch.lpid); in kvmppc_core_init_vm_hv()
5818 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_init_vm_hv()
5825 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
5828 kvm->arch.resize_hpt = NULL; in kvmppc_core_init_vm_hv()
5838 kvm->arch.tlb_sets = 1; in kvmppc_core_init_vm_hv()
5840 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ in kvmppc_core_init_vm_hv()
5842 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ in kvmppc_core_init_vm_hv()
5844 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ in kvmppc_core_init_vm_hv()
5846 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ in kvmppc_core_init_vm_hv()
5863 kvm->arch.smt_mode = threads_per_subcore; in kvmppc_core_init_vm_hv()
5865 kvm->arch.smt_mode = 1; in kvmppc_core_init_vm_hv()
5866 kvm->arch.emul_smt_mode = 1; in kvmppc_core_init_vm_hv()
5884 kfree(kvm->arch.vcores[i]); in kvmppc_free_vcores()
5885 kvm->arch.online_vcores = 0; in kvmppc_free_vcores()
5899 kvmppc_free_hpt(&kvm->arch.hpt); in kvmppc_core_destroy_vm_hv()
5905 kvm->arch.process_table = 0; in kvmppc_core_destroy_vm_hv()
5906 if (kvm->arch.secure_guest) in kvmppc_core_destroy_vm_hv()
5907 uv_svm_terminate(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5909 kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); in kvmppc_core_destroy_vm_hv()
5913 kvmhv_flush_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5914 plpar_guest_delete(0, kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5916 kvmppc_free_lpid(kvm->arch.lpid); in kvmppc_core_destroy_vm_hv()
5951 return -EIO; in kvmppc_core_check_processor_compat_hv()
5958 kfree(kvm->arch.pimap); in kvmppc_free_pimap()
5980 return -EIO; in kvmppc_set_passthru_irq()
5982 mutex_lock(&kvm->lock); in kvmppc_set_passthru_irq()
5984 pimap = kvm->arch.pimap; in kvmppc_set_passthru_irq()
5989 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
5990 return -ENOMEM; in kvmppc_set_passthru_irq()
5992 kvm->arch.pimap = pimap; in kvmppc_set_passthru_irq()
5996 * For now, we only support interrupts for which the EOI operation in kvmppc_set_passthru_irq()
5998 * what our real-mode EOI code does, or a XIVE interrupt in kvmppc_set_passthru_irq()
6000 chip = irq_data_get_irq_chip(&desc->irq_data); in kvmppc_set_passthru_irq()
6004 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
6005 return -ENOENT; in kvmppc_set_passthru_irq()
6011 * otherwise re-use this entry. in kvmppc_set_passthru_irq()
6013 for (i = 0; i < pimap->n_mapped; i++) { in kvmppc_set_passthru_irq()
6014 if (guest_gsi == pimap->mapped[i].v_hwirq) { in kvmppc_set_passthru_irq()
6015 if (pimap->mapped[i].r_hwirq) { in kvmppc_set_passthru_irq()
6016 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
6017 return -EINVAL; in kvmppc_set_passthru_irq()
6024 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
6025 return -EAGAIN; /* table is full */ in kvmppc_set_passthru_irq()
6028 irq_map = &pimap->mapped[i]; in kvmppc_set_passthru_irq()
6030 irq_map->v_hwirq = guest_gsi; in kvmppc_set_passthru_irq()
6031 irq_map->desc = desc; in kvmppc_set_passthru_irq()
6040 * The 'host_irq' number is mapped in the PCI-MSI domain but in kvmppc_set_passthru_irq()
6041 * the underlying calls, which will EOI the interrupt in real in kvmppc_set_passthru_irq()
6045 irq_map->r_hwirq = (unsigned int)irqd_to_hwirq(host_data); in kvmppc_set_passthru_irq()
6047 if (i == pimap->n_mapped) in kvmppc_set_passthru_irq()
6048 pimap->n_mapped++; in kvmppc_set_passthru_irq()
6053 kvmppc_xics_set_mapped(kvm, guest_gsi, irq_map->r_hwirq); in kvmppc_set_passthru_irq()
6055 irq_map->r_hwirq = 0; in kvmppc_set_passthru_irq()
6057 mutex_unlock(&kvm->lock); in kvmppc_set_passthru_irq()
6073 return -EIO; in kvmppc_clr_passthru_irq()
6075 mutex_lock(&kvm->lock); in kvmppc_clr_passthru_irq()
6076 if (!kvm->arch.pimap) in kvmppc_clr_passthru_irq()
6079 pimap = kvm->arch.pimap; in kvmppc_clr_passthru_irq()
6081 for (i = 0; i < pimap->n_mapped; i++) { in kvmppc_clr_passthru_irq()
6082 if (guest_gsi == pimap->mapped[i].v_hwirq) in kvmppc_clr_passthru_irq()
6086 if (i == pimap->n_mapped) { in kvmppc_clr_passthru_irq()
6087 mutex_unlock(&kvm->lock); in kvmppc_clr_passthru_irq()
6088 return -ENODEV; in kvmppc_clr_passthru_irq()
6094 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); in kvmppc_clr_passthru_irq()
6097 pimap->mapped[i].r_hwirq = 0; in kvmppc_clr_passthru_irq()
6104 mutex_unlock(&kvm->lock); in kvmppc_clr_passthru_irq()
6115 irqfd->producer = prod; in kvmppc_irq_bypass_add_producer_hv()
6117 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); in kvmppc_irq_bypass_add_producer_hv()
6120 prod->irq, irqfd->gsi, ret); in kvmppc_irq_bypass_add_producer_hv()
6132 irqfd->producer = NULL; in kvmppc_irq_bypass_del_producer_hv()
6136 * default external interrupt handling mode - KVM real mode in kvmppc_irq_bypass_del_producer_hv()
6139 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); in kvmppc_irq_bypass_del_producer_hv()
6142 prod->irq, irqfd->gsi, ret); in kvmppc_irq_bypass_del_producer_hv()
6149 struct kvm *kvm __maybe_unused = filp->private_data; in kvm_arch_vm_ioctl_hv()
6160 r = -EOPNOTSUPP; in kvm_arch_vm_ioctl_hv()
6164 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
6177 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
6187 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
6198 r = -EFAULT; in kvm_arch_vm_ioctl_hv()
6207 r = -ENOTTY; in kvm_arch_vm_ioctl_hv()
6216 * all hcalls that were implemented before the hcall-enabling
6266 return -ENODEV; in kvmhv_configure_mmu()
6269 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) in kvmhv_configure_mmu()
6270 return -EINVAL; in kvmhv_configure_mmu()
6273 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); in kvmhv_configure_mmu()
6274 if (!!(cfg->process_table & PATB_GR) != radix) in kvmhv_configure_mmu()
6275 return -EINVAL; in kvmhv_configure_mmu()
6278 if ((cfg->process_table & PRTS_MASK) > 24) in kvmhv_configure_mmu()
6279 return -EINVAL; in kvmhv_configure_mmu()
6283 return -EINVAL; in kvmhv_configure_mmu()
6287 return -EINVAL; in kvmhv_configure_mmu()
6289 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
6291 if (kvm->arch.mmu_ready) { in kvmhv_configure_mmu()
6292 kvm->arch.mmu_ready = 0; in kvmhv_configure_mmu()
6295 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_configure_mmu()
6296 kvm->arch.mmu_ready = 1; in kvmhv_configure_mmu()
6297 err = -EBUSY; in kvmhv_configure_mmu()
6309 kvm->arch.process_table = cfg->process_table; in kvmhv_configure_mmu()
6312 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; in kvmhv_configure_mmu()
6317 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_configure_mmu()
6324 return -EPERM; in kvmhv_enable_nested()
6326 return -ENODEV; in kvmhv_enable_nested()
6328 return -ENODEV; in kvmhv_enable_nested()
6330 return -ENODEV; in kvmhv_enable_nested()
6334 kvm->arch.nested_enable = true; in kvmhv_enable_nested()
6341 int rc = -EINVAL; in kvmhv_load_from_eaddr()
6347 rc = -EINVAL; in kvmhv_load_from_eaddr()
6351 if (rc && vcpu->arch.nested) in kvmhv_load_from_eaddr()
6352 rc = -EAGAIN; in kvmhv_load_from_eaddr()
6360 int rc = -EINVAL; in kvmhv_store_to_eaddr()
6366 rc = -EINVAL; in kvmhv_store_to_eaddr()
6370 if (rc && vcpu->arch.nested) in kvmhv_store_to_eaddr()
6371 rc = -EAGAIN; in kvmhv_store_to_eaddr()
6379 vpa->gpa = 0; in unpin_vpa_reset()
6380 vpa->pinned_addr = NULL; in unpin_vpa_reset()
6381 vpa->dirty = false; in unpin_vpa_reset()
6382 vpa->update_pending = 0; in unpin_vpa_reset()
6394 return -EINVAL; in kvmhv_enable_svm()
6396 kvm->arch.svm_enabled = 1; in kvmhv_enable_svm()
6403 * - Release all device pages
6404 * - Issue ucall to terminate the guest on the UV side
6405 * - Unpin the VPA pages.
6406 * - Reinit the partition scoped page tables
6416 if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) in kvmhv_svm_off()
6419 mutex_lock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6420 mmu_was_ready = kvm->arch.mmu_ready; in kvmhv_svm_off()
6421 if (kvm->arch.mmu_ready) { in kvmhv_svm_off()
6422 kvm->arch.mmu_ready = 0; in kvmhv_svm_off()
6425 if (atomic_read(&kvm->arch.vcpus_running)) { in kvmhv_svm_off()
6426 kvm->arch.mmu_ready = 1; in kvmhv_svm_off()
6427 ret = -EBUSY; in kvmhv_svm_off()
6432 srcu_idx = srcu_read_lock(&kvm->srcu); in kvmhv_svm_off()
6443 uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); in kvmhv_svm_off()
6446 srcu_read_unlock(&kvm->srcu, srcu_idx); in kvmhv_svm_off()
6448 ret = uv_svm_terminate(kvm->arch.lpid); in kvmhv_svm_off()
6450 ret = -EINVAL; in kvmhv_svm_off()
6456 * to UV via UV_PAGE_IN before the non-boot vcpus get a in kvmhv_svm_off()
6465 spin_lock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6466 unpin_vpa_reset(kvm, &vcpu->arch.dtl); in kvmhv_svm_off()
6467 unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); in kvmhv_svm_off()
6468 unpin_vpa_reset(kvm, &vcpu->arch.vpa); in kvmhv_svm_off()
6469 spin_unlock(&vcpu->arch.vpa_update_lock); in kvmhv_svm_off()
6473 kvm->arch.secure_guest = 0; in kvmhv_svm_off()
6474 kvm->arch.mmu_ready = mmu_was_ready; in kvmhv_svm_off()
6476 mutex_unlock(&kvm->arch.mmu_setup_lock); in kvmhv_svm_off()
6483 return -ENODEV; in kvmhv_enable_dawr1()
6487 kvm->arch.dawr1_enabled = true; in kvmhv_enable_dawr1()
6573 if (paca_ptrs[first_cpu]->sibling_subcore_state) in kvm_init_subcore_bitmap()
6580 return -ENOMEM; in kvm_init_subcore_bitmap()
6586 paca_ptrs[cpu]->sibling_subcore_state = in kvm_init_subcore_bitmap()
6603 pr_err("KVM-HV: Host does not support TLBIE\n"); in kvmppc_book3s_init_hv()
6604 return -ENODEV; in kvmppc_book3s_init_hv()
6612 return -ENODEV; in kvmppc_book3s_init_hv()
6626 * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or in kvmppc_book3s_init_hv()
6631 !local_paca->kvm_hstate.xics_phys) { in kvmppc_book3s_init_hv()
6634 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); in kvmppc_book3s_init_hv()
6636 pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); in kvmppc_book3s_init_hv()
6637 r = -ENODEV; in kvmppc_book3s_init_hv()
6640 /* presence of intc confirmed - node can be dropped again */ in kvmppc_book3s_init_hv()
6661 pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); in kvmppc_book3s_init_hv()