Lines Matching full:arch
56 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_reset_vcpu()
57 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_riscv_reset_vcpu()
58 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_reset_vcpu()
59 struct kvm_cpu_context *reset_cntx = &vcpu->arch.guest_reset_context; in kvm_riscv_reset_vcpu()
72 vcpu->arch.last_exit_cpu = -1; in kvm_riscv_reset_vcpu()
76 spin_lock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu()
78 spin_unlock(&vcpu->arch.reset_cntx_lock); in kvm_riscv_reset_vcpu()
88 bitmap_zero(vcpu->arch.irqs_pending, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
89 bitmap_zero(vcpu->arch.irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS); in kvm_riscv_reset_vcpu()
93 vcpu->arch.hfence_head = 0; in kvm_riscv_reset_vcpu()
94 vcpu->arch.hfence_tail = 0; in kvm_riscv_reset_vcpu()
95 memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); in kvm_riscv_reset_vcpu()
114 struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; in kvm_arch_vcpu_create()
116 spin_lock_init(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_create()
119 vcpu->arch.ran_atleast_once = false; in kvm_arch_vcpu_create()
120 vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; in kvm_arch_vcpu_create()
121 bitmap_zero(vcpu->arch.isa, RISCV_ISA_EXT_MAX); in kvm_arch_vcpu_create()
126 /* Setup vendor, arch, and implementation details */ in kvm_arch_vcpu_create()
127 vcpu->arch.mvendorid = sbi_get_mvendorid(); in kvm_arch_vcpu_create()
128 vcpu->arch.marchid = sbi_get_marchid(); in kvm_arch_vcpu_create()
129 vcpu->arch.mimpid = sbi_get_mimpid(); in kvm_arch_vcpu_create()
132 spin_lock_init(&vcpu->arch.hfence_lock); in kvm_arch_vcpu_create()
135 spin_lock_init(&vcpu->arch.reset_cntx_lock); in kvm_arch_vcpu_create()
137 spin_lock(&vcpu->arch.reset_cntx_lock); in kvm_arch_vcpu_create()
138 cntx = &vcpu->arch.guest_reset_context; in kvm_arch_vcpu_create()
144 spin_unlock(&vcpu->arch.reset_cntx_lock); in kvm_arch_vcpu_create()
197 kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); in kvm_arch_vcpu_destroy()
221 !kvm_riscv_vcpu_stopped(vcpu) && !vcpu->arch.pause); in kvm_arch_vcpu_runnable()
231 return (vcpu->arch.guest_context.sstatus & SR_SPP) ? true : false; in kvm_arch_vcpu_in_kernel()
237 return vcpu->arch.guest_context.sepc; in kvm_arch_vcpu_get_ip()
354 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_flush_interrupts()
357 if (READ_ONCE(vcpu->arch.irqs_pending_mask[0])) { in kvm_riscv_vcpu_flush_interrupts()
358 mask = xchg_acquire(&vcpu->arch.irqs_pending_mask[0], 0); in kvm_riscv_vcpu_flush_interrupts()
359 val = READ_ONCE(vcpu->arch.irqs_pending[0]) & mask; in kvm_riscv_vcpu_flush_interrupts()
372 struct kvm_vcpu_arch *v = &vcpu->arch; in kvm_riscv_vcpu_sync_interrupts()
373 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_sync_interrupts()
420 set_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_set_interrupt()
422 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_set_interrupt()
443 clear_bit(irq, vcpu->arch.irqs_pending); in kvm_riscv_vcpu_unset_interrupt()
445 set_bit(irq, vcpu->arch.irqs_pending_mask); in kvm_riscv_vcpu_unset_interrupt()
454 ie = ((vcpu->arch.guest_csr.vsie & VSIP_VALID_MASK) in kvm_riscv_vcpu_has_interrupts()
456 ie |= vcpu->arch.guest_csr.vsie & ~IRQ_LOCAL_MASK & in kvm_riscv_vcpu_has_interrupts()
458 if (READ_ONCE(vcpu->arch.irqs_pending[0]) & ie) in kvm_riscv_vcpu_has_interrupts()
467 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); in __kvm_riscv_vcpu_power_off()
474 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
476 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_off()
481 WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); in __kvm_riscv_vcpu_power_on()
487 spin_lock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
489 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_riscv_vcpu_power_on()
494 return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; in kvm_riscv_vcpu_stopped()
500 *mp_state = READ_ONCE(vcpu->arch.mp_state); in kvm_arch_vcpu_ioctl_get_mpstate()
510 spin_lock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
514 WRITE_ONCE(vcpu->arch.mp_state, *mp_state); in kvm_arch_vcpu_ioctl_set_mpstate()
523 spin_unlock(&vcpu->arch.mp_state_lock); in kvm_arch_vcpu_ioctl_set_mpstate()
533 vcpu->arch.cfg.hedeleg &= ~BIT(EXC_BREAKPOINT); in kvm_arch_vcpu_ioctl_set_guest_debug()
536 vcpu->arch.cfg.hedeleg |= BIT(EXC_BREAKPOINT); in kvm_arch_vcpu_ioctl_set_guest_debug()
544 const unsigned long *isa = vcpu->arch.isa; in kvm_riscv_vcpu_setup_config()
545 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_setup_config()
581 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_load()
582 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_arch_vcpu_load()
629 kvm_riscv_vcpu_host_fp_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
630 kvm_riscv_vcpu_guest_fp_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
631 vcpu->arch.isa); in kvm_arch_vcpu_load()
632 kvm_riscv_vcpu_host_vector_save(&vcpu->arch.host_context); in kvm_arch_vcpu_load()
633 kvm_riscv_vcpu_guest_vector_restore(&vcpu->arch.guest_context, in kvm_arch_vcpu_load()
634 vcpu->arch.isa); in kvm_arch_vcpu_load()
646 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_arch_vcpu_put()
652 kvm_riscv_vcpu_guest_fp_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
653 vcpu->arch.isa); in kvm_arch_vcpu_put()
654 kvm_riscv_vcpu_host_fp_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
657 kvm_riscv_vcpu_guest_vector_save(&vcpu->arch.guest_context, in kvm_arch_vcpu_put()
658 vcpu->arch.isa); in kvm_arch_vcpu_put()
659 kvm_riscv_vcpu_host_vector_restore(&vcpu->arch.host_context); in kvm_arch_vcpu_put()
693 (!kvm_riscv_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), in kvm_riscv_check_vcpu_requests()
697 if (kvm_riscv_vcpu_stopped(vcpu) || vcpu->arch.pause) { in kvm_riscv_check_vcpu_requests()
735 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_update_hvip()
743 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_guest_state()
744 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_guest_state()
745 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_guest_state()
747 vcpu->arch.host_scounteren = csr_swap(CSR_SCOUNTEREN, csr->scounteren); in kvm_riscv_vcpu_swap_in_guest_state()
748 vcpu->arch.host_senvcfg = csr_swap(CSR_SENVCFG, csr->senvcfg); in kvm_riscv_vcpu_swap_in_guest_state()
751 vcpu->arch.host_sstateen0 = csr_swap(CSR_SSTATEEN0, in kvm_riscv_vcpu_swap_in_guest_state()
757 struct kvm_vcpu_smstateen_csr *smcsr = &vcpu->arch.smstateen_csr; in kvm_riscv_vcpu_swap_in_host_state()
758 struct kvm_vcpu_csr *csr = &vcpu->arch.guest_csr; in kvm_riscv_vcpu_swap_in_host_state()
759 struct kvm_vcpu_config *cfg = &vcpu->arch.cfg; in kvm_riscv_vcpu_swap_in_host_state()
761 csr->scounteren = csr_swap(CSR_SCOUNTEREN, vcpu->arch.host_scounteren); in kvm_riscv_vcpu_swap_in_host_state()
762 csr->senvcfg = csr_swap(CSR_SENVCFG, vcpu->arch.host_senvcfg); in kvm_riscv_vcpu_swap_in_host_state()
766 vcpu->arch.host_sstateen0); in kvm_riscv_vcpu_swap_in_host_state()
780 struct kvm_cpu_context *gcntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_enter_exit()
781 struct kvm_cpu_context *hcntx = &vcpu->arch.host_context; in kvm_riscv_vcpu_enter_exit()
820 __kvm_riscv_nacl_switch_to(&vcpu->arch, SBI_EXT_NACL, in kvm_riscv_vcpu_enter_exit()
839 __kvm_riscv_switch_to(&vcpu->arch); in kvm_riscv_vcpu_enter_exit()
851 vcpu->arch.last_exit_cpu = vcpu->cpu; in kvm_riscv_vcpu_enter_exit()
862 if (!vcpu->arch.ran_atleast_once) in kvm_arch_vcpu_ioctl_run()
866 vcpu->arch.ran_atleast_once = true; in kvm_arch_vcpu_ioctl_run()
945 if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || in kvm_arch_vcpu_ioctl_run()