Lines Matching full:lpcr

514 	pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.16lx\n",  in kvmppc_dump_regs()
515 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, in kvmppc_dump_regs()
2139 * Enforce limits on guest LPCR values based on hardware availability,
2143 unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) in kvmppc_filter_lpcr_hv() argument
2147 lpcr &= ~LPCR_TC; in kvmppc_filter_lpcr_hv()
2151 lpcr &= ~LPCR_AIL; in kvmppc_filter_lpcr_hv()
2152 if ((lpcr & LPCR_AIL) != LPCR_AIL_3) in kvmppc_filter_lpcr_hv()
2153 lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ in kvmppc_filter_lpcr_hv()
2161 lpcr &= ~LPCR_AIL; in kvmppc_filter_lpcr_hv()
2168 lpcr &= ~LPCR_LD; in kvmppc_filter_lpcr_hv()
2170 return lpcr; in kvmppc_filter_lpcr_hv()
2173 static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) in verify_lpcr() argument
2175 if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { in verify_lpcr()
2176 WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", in verify_lpcr()
2177 lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); in verify_lpcr()
2199 /* Broken 32-bit version of LPCR must not clear top bits */ in kvmppc_set_lpcr()
2204 (vc->lpcr & ~mask) | (new_lpcr & mask)); in kvmppc_set_lpcr()
2210 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { in kvmppc_set_lpcr()
2224 vc->lpcr = new_lpcr; in kvmppc_set_lpcr()
2802 vcore->lpcr = kvm->arch.lpcr; in kvmppc_vcore_create()
3287 if (kvm->arch.lpcr & LPCR_GTSE) in do_migrate_away_vcpu()
4250 unsigned long lpcr, u64 *tb) in kvmhv_vcpu_entry_nestedv2() argument
4273 kvmppc_gse_put_u64(io->vcpu_run_input, KVMPPC_GSID_LPCR, lpcr); in kvmhv_vcpu_entry_nestedv2()
4310 static int kvmhv_vcpu_entry_p9_nested(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u6… in kvmhv_vcpu_entry_p9_nested() argument
4342 hvregs.lpcr = lpcr; in kvmhv_vcpu_entry_p9_nested()
4371 * irq_work_raise could check a flag (or possibly LPCR[HDICE] in kvmhv_vcpu_entry_p9_nested()
4397 if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ in kvmhv_vcpu_entry_p9_nested()
4415 unsigned long lpcr, u64 *tb) in kvmhv_p9_guest_entry() argument
4436 trap = kvmhv_vcpu_entry_p9_nested(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4438 trap = kvmhv_vcpu_entry_nestedv2(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4450 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4457 trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr, tb); in kvmhv_p9_guest_entry()
4869 unsigned long lpcr) in kvmhv_run_single_vcpu() argument
4952 lpcr |= LPCR_MER; in kvmhv_run_single_vcpu()
4955 * L1's copy of L2's LPCR (vcpu->arch.vcore->lpcr) can get its MER bit in kvmhv_run_single_vcpu()
4964 lpcr &= ~LPCR_MER; in kvmhv_run_single_vcpu()
4990 trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr, &tb); in kvmhv_run_single_vcpu()
5163 vcpu->arch.vcore->lpcr); in kvmppc_vcpu_run_hv()
5409 * Update LPCR values in kvm->arch and in vcores.
5411 * of kvm->arch.lpcr update).
5413 void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) in kvmppc_update_lpcr() argument
5418 if ((kvm->arch.lpcr & mask) == lpcr) in kvmppc_update_lpcr()
5421 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5429 vc->lpcr = (vc->lpcr & ~mask) | lpcr; in kvmppc_update_lpcr()
5430 verify_lpcr(kvm, vc->lpcr); in kvmppc_update_lpcr()
5477 unsigned long lpcr = 0, senc; in kvmppc_hv_setup_htab_rma() local
5536 /* Update VRMASD field in the LPCR */ in kvmppc_hv_setup_htab_rma()
5539 lpcr = senc << (LPCR_VRMASD_SH - 4); in kvmppc_hv_setup_htab_rma()
5540 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); in kvmppc_hv_setup_htab_rma()
5543 /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ in kvmppc_hv_setup_htab_rma()
5562 unsigned long lpcr, lpcr_mask; in kvmppc_switch_mmu_to_hpt() local
5574 lpcr = LPCR_VPM1; in kvmppc_switch_mmu_to_hpt()
5578 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); in kvmppc_switch_mmu_to_hpt()
5589 unsigned long lpcr, lpcr_mask; in kvmppc_switch_mmu_to_radix() local
5602 lpcr = LPCR_UPRT | LPCR_GTSE | LPCR_HR; in kvmppc_switch_mmu_to_radix()
5608 lpcr |= LPCR_HAIL; in kvmppc_switch_mmu_to_radix()
5610 kvmppc_update_lpcr(kvm, lpcr, lpcr_mask); in kvmppc_switch_mmu_to_radix()
5700 unsigned long lpcr, lpid; in kvmppc_core_init_vm_hv() local
5763 /* Init LPCR for virtual RMA mode */ in kvmppc_core_init_vm_hv()
5766 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); in kvmppc_core_init_vm_hv()
5767 lpcr &= LPCR_PECE | LPCR_LPES; in kvmppc_core_init_vm_hv()
5773 lpcr = 0; in kvmppc_core_init_vm_hv()
5775 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | in kvmppc_core_init_vm_hv()
5781 lpcr |= LPCR_ONL; in kvmppc_core_init_vm_hv()
5787 * EE in HV mode with this LPCR still set) in kvmppc_core_init_vm_hv()
5790 lpcr &= ~LPCR_VPM0; in kvmppc_core_init_vm_hv()
5791 lpcr |= LPCR_HVICE | LPCR_HEIC; in kvmppc_core_init_vm_hv()
5798 lpcr |= LPCR_LPES; in kvmppc_core_init_vm_hv()
5807 lpcr &= ~LPCR_VPM1; in kvmppc_core_init_vm_hv()
5808 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; in kvmppc_core_init_vm_hv()
5812 lpcr |= LPCR_HAIL; in kvmppc_core_init_vm_hv()
5824 verify_lpcr(kvm, lpcr); in kvmppc_core_init_vm_hv()
5825 kvm->arch.lpcr = lpcr; in kvmppc_core_init_vm_hv()
6260 unsigned long lpcr; in kvmhv_configure_mmu() local
6312 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; in kvmhv_configure_mmu()
6313 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); in kvmhv_configure_mmu()