Lines Matching full:vcore
140 * online threads in the vcore being run.
179 /* Used to traverse the list of runnable threads for a given vcore */
257 * run as part of a virtual core, but the task running the vcore
262 * of running the core, and the other vcpu tasks in the vcore will
267 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
270 * stolen time for a vcore when it is inactive, or for a vcpu
281 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
320 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_load_hv()
336 * We can test vc->runner without taking the vcore lock, in kvmppc_core_vcpu_load_hv()
355 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_core_vcpu_put_hv()
418 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_arch_compat()
515 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
773 * Return the accumulated stolen time for the vcore up until `now'.
774 * The caller should hold the vcore lock.
890 * Ensure that the read of vcore->dpdes comes after the read in kvmppc_doorbell_pending()
895 vc = vcpu->arch.vcore; in kvmppc_doorbell_pending()
1045 struct kvmppc_vcore *vcore = target->arch.vcore; in kvm_arch_vcpu_yield_to() local
1050 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may in kvm_arch_vcpu_yield_to()
1054 * In the case of the P9 single vcpu per vcore case, the real in kvm_arch_vcpu_yield_to()
1056 * source vcore. in kvm_arch_vcpu_yield_to()
1059 spin_lock(&vcore->lock); in kvm_arch_vcpu_yield_to()
1061 vcore->vcore_state != VCORE_INACTIVE && in kvm_arch_vcpu_yield_to()
1062 vcore->runner) in kvm_arch_vcpu_yield_to()
1063 target = vcore->runner; in kvm_arch_vcpu_yield_to()
1064 spin_unlock(&vcore->lock); in kvm_arch_vcpu_yield_to()
1518 * which will update its vcore->dpdes value. in kvmppc_read_dpdes()
1570 vcpu->arch.vcore->dpdes = 0; in kvmppc_emulate_doorbell_instr()
2185 struct kvmppc_vcore *vc = vcpu->arch.vcore; in kvmppc_set_lpcr()
2208 * MSR_LE bit in the intr_msr for each vcpu in this vcore. in kvmppc_set_lpcr()
2215 if (vcpu->arch.vcore != vc) in kvmppc_set_lpcr()
2313 * either vcore->dpdes or doorbell_request. in kvmppc_get_one_reg_hv()
2319 *val = get_reg_val(id, vcpu->arch.vcore->dpdes); in kvmppc_get_one_reg_hv()
2571 vcpu->arch.vcore->dpdes = set_reg_val(id, *val); in kvmppc_set_one_reg_hv()
2756 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2758 atomic_dec(&vcpu->arch.vcore->online_count); in kvmppc_set_one_reg_hv()
2791 struct kvmppc_vcore *vcore; in kvmppc_vcore_create() local
2793 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); in kvmppc_vcore_create()
2795 if (vcore == NULL) in kvmppc_vcore_create()
2798 spin_lock_init(&vcore->lock); in kvmppc_vcore_create()
2799 spin_lock_init(&vcore->stoltb_lock); in kvmppc_vcore_create()
2800 rcuwait_init(&vcore->wait); in kvmppc_vcore_create()
2801 vcore->preempt_tb = TB_NIL; in kvmppc_vcore_create()
2802 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
2803 vcore->first_vcpuid = id; in kvmppc_vcore_create()
2804 vcore->kvm = kvm; in kvmppc_vcore_create()
2805 INIT_LIST_HEAD(&vcore->preempt_list); in kvmppc_vcore_create()
2807 return vcore; in kvmppc_vcore_create()
2965 struct kvmppc_vcore *vcore; in kvmppc_core_vcpu_create_hv() local
3045 vcore = NULL; in kvmppc_core_vcpu_create_hv()
3059 vcore = kvm->arch.vcores[core]; in kvmppc_core_vcpu_create_hv()
3060 if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) { in kvmppc_core_vcpu_create_hv()
3062 vcore = NULL; in kvmppc_core_vcpu_create_hv()
3063 } else if (!vcore) { in kvmppc_core_vcpu_create_hv()
3069 vcore = kvmppc_vcore_create(kvm, in kvmppc_core_vcpu_create_hv()
3072 kvm->arch.vcores[core] = vcore; in kvmppc_core_vcpu_create_hv()
3079 if (!vcore) in kvmppc_core_vcpu_create_hv()
3082 spin_lock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
3083 ++vcore->num_threads; in kvmppc_core_vcpu_create_hv()
3084 spin_unlock(&vcore->lock); in kvmppc_core_vcpu_create_hv()
3085 vcpu->arch.vcore = vcore; in kvmppc_core_vcpu_create_hv()
3086 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; in kvmppc_core_vcpu_create_hv()
3109 * so we pack smt_mode vcpus per vcore. in kvmhv_set_smt_mode()
3116 * so each vcpu gets its own vcore. in kvmhv_set_smt_mode()
3370 * We set the vcore pointer when starting a thread in kvmppc_wait_for_nap()
3372 * for any threads that still have a non-NULL vcore ptr. in kvmppc_wait_for_nap()
3570 * vcore *pvc onto the execution of the other vcores described in *cip.
3659 * It's safe to unlock the vcore in the loop here, because in post_guest_process()
3661 * the vcpu, and the vcore state is VCORE_EXITING here, in post_guest_process()
3861 /* Unlock all except the primary vcore */ in kvmppc_run_core()
3944 * or if the vcore's online count looks bogus. in kvmppc_run_core()
3986 * the vcore pointer in the PACA of the secondaries. in kvmppc_run_core()
4585 * Check to see if any of the runnable vcpus on the vcore have pending
4602 * All the vcpus in this vcore are idle, so wait for a decrementer
4755 vc = vcpu->arch.vcore; in kvmppc_run_vcpu()
4767 * If the vcore is already running, we may be able to start in kvmppc_run_vcpu()
4887 vc = vcpu->arch.vcore; in kvmhv_run_single_vcpu()
4955 * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit in kvmhv_run_single_vcpu()
5126 atomic_inc(&vcpu->arch.vcore->online_count); in kvmppc_vcpu_run_hv()
5155 vcpu->arch.waitp = &vcpu->arch.vcore->wait; in kvmppc_vcpu_run_hv()
5163 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
5858 * all vCPUs in a vcore have to run on the same (sub)core, in kvmppc_core_init_vm_hv()