/linux-6.14.4/arch/riscv/include/asm/ |
D | kvm_vcpu_fp.h | 15 struct kvm_cpu_context; 18 void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context); 19 void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context); 20 void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context); 21 void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); 24 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, 26 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, 28 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx); 29 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx); 34 static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_save() [all …]
|
D | kvm_vcpu_vector.h | 19 static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context) in __kvm_riscv_vector_save() 24 static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context) in __kvm_riscv_vector_restore() 30 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, 32 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, 34 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx); 35 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx); 37 struct kvm_cpu_context *cntx); 41 struct kvm_cpu_context; 47 static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_save() 52 static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_restore() [all …]
|
D | kvm_host.h | 132 struct kvm_cpu_context { struct 219 struct kvm_cpu_context host_context; 222 struct kvm_cpu_context guest_context; 231 struct kvm_cpu_context guest_reset_context;
|
/linux-6.14.4/arch/riscv/kernel/ |
D | asm-offsets.c | 219 OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]); in asm_offsets() 220 OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]); in asm_offsets() 221 OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]); in asm_offsets() 222 OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]); in asm_offsets() 223 OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]); in asm_offsets() 224 OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]); in asm_offsets() 225 OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]); in asm_offsets() 226 OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]); in asm_offsets() 227 OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]); in asm_offsets() 228 OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]); in asm_offsets() [all …]
|
/linux-6.14.4/arch/arm64/kvm/hyp/nvhe/ |
D | hyp-main.c | 25 void __kvm_hyp_host_forward_smc(struct kvm_cpu_context *host_ctxt); 165 static void handle___pkvm_vcpu_load(struct kvm_cpu_context *host_ctxt) in handle___pkvm_vcpu_load() 186 static void handle___pkvm_vcpu_put(struct kvm_cpu_context *host_ctxt) in handle___pkvm_vcpu_put() 198 static void handle___kvm_vcpu_run(struct kvm_cpu_context *host_ctxt) in handle___kvm_vcpu_run() 248 static void handle___pkvm_host_share_guest(struct kvm_cpu_context *host_ctxt) in handle___pkvm_host_share_guest() 272 static void handle___pkvm_host_unshare_guest(struct kvm_cpu_context *host_ctxt) in handle___pkvm_host_unshare_guest() 292 static void handle___pkvm_host_relax_perms_guest(struct kvm_cpu_context *host_ctxt) in handle___pkvm_host_relax_perms_guest() 311 static void handle___pkvm_host_wrprotect_guest(struct kvm_cpu_context *host_ctxt) in handle___pkvm_host_wrprotect_guest() 331 static void handle___pkvm_host_test_clear_young_guest(struct kvm_cpu_context *host_ctxt) in handle___pkvm_host_test_clear_young_guest() 352 static void handle___pkvm_host_mkyoung_guest(struct kvm_cpu_context *host_ctxt) in handle___pkvm_host_mkyoung_guest() [all …]
|
D | psci-relay.c | 20 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt); 72 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt) in psci_forward() 107 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_on() 151 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_cpu_suspend() 179 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_system_suspend() 206 struct kvm_cpu_context *host_ctxt; in __kvm_host_psci_cpu_entry() 227 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_1_handler() 239 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_0_2_handler() 265 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt) in psci_1_0_handler() 281 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_psci_handler()
|
D | sysreg-sr.c | 21 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_save_state_nvhe() 29 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt) in __sysreg_restore_state_nvhe()
|
D | switch.c | 33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 106 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt; in __activate_traps() 285 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run() 286 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run() 402 struct kvm_cpu_context *host_ctxt; in hyp_panic()
|
D | ffa.c | 97 static void ffa_set_retval(struct kvm_cpu_context *ctxt, in ffa_set_retval() 192 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_map() 271 struct kvm_cpu_context *ctxt) in do_ffa_rxtx_unmap() 372 struct kvm_cpu_context *ctxt) in do_ffa_mem_frag_tx() 431 struct kvm_cpu_context *ctxt) in __do_ffa_mem_xfer() 525 struct kvm_cpu_context *ctxt) in do_ffa_mem_reclaim() 638 struct kvm_cpu_context *ctxt) in do_ffa_features() 704 struct kvm_cpu_context *ctxt) in do_ffa_version() 744 struct kvm_cpu_context *ctxt) in do_ffa_part_get() 792 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id) in kvm_host_ffa_handler()
|
D | tlb.c | 24 struct kvm_cpu_context *host_ctxt; in enter_vmid_context() 121 struct kvm_cpu_context *host_ctxt; in exit_vmid_context()
|
/linux-6.14.4/arch/arm64/kvm/hyp/include/hyp/ |
D | sysreg-sr.h | 19 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt); 21 static inline struct kvm_vcpu *ctxt_to_vcpu(struct kvm_cpu_context *ctxt) in ctxt_to_vcpu() 31 static inline bool ctxt_is_guest(struct kvm_cpu_context *ctxt) in ctxt_is_guest() 36 static inline u64 *ctxt_mdscr_el1(struct kvm_cpu_context *ctxt) in ctxt_mdscr_el1() 46 static inline void __sysreg_save_common_state(struct kvm_cpu_context *ctxt) in __sysreg_save_common_state() 55 static inline void __sysreg_save_user_state(struct kvm_cpu_context *ctxt) in __sysreg_save_user_state() 61 static inline bool ctxt_has_mte(struct kvm_cpu_context *ctxt) in ctxt_has_mte() 68 static inline bool ctxt_has_s1pie(struct kvm_cpu_context *ctxt) in ctxt_has_s1pie() 79 static inline bool ctxt_has_tcrx(struct kvm_cpu_context *ctxt) in ctxt_has_tcrx() 90 static inline bool ctxt_has_s1poe(struct kvm_cpu_context *ctxt) in ctxt_has_s1poe() [all …]
|
D | debug-sr.h | 107 struct kvm_cpu_context *ctxt) in __debug_save_state() 121 struct kvm_cpu_context *ctxt) in __debug_restore_state() 136 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_guest_common() 137 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_guest_common() 155 struct kvm_cpu_context *host_ctxt; in __debug_switch_to_host_common() 156 struct kvm_cpu_context *guest_ctxt; in __debug_switch_to_host_common()
|
D | switch.h | 152 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __activate_traps_hfgxtr() 188 struct kvm_cpu_context *hctxt = host_data_ptr(host_ctxt); in __deactivate_traps_hfgxtr() 248 struct kvm_cpu_context *hctxt; in __activate_traps_common() 285 struct kvm_cpu_context *hctxt; in __deactivate_traps_common()
|
/linux-6.14.4/arch/arm64/include/asm/ |
D | kvm_hyp.h | 15 DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 93 void __sysreg_save_state_nvhe(struct kvm_cpu_context *ctxt); 94 void __sysreg_restore_state_nvhe(struct kvm_cpu_context *ctxt); 98 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt); 99 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt); 100 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt); 101 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt); 119 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt, u32 func_id); 122 void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr, 131 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
|
/linux-6.14.4/arch/riscv/kvm/ |
D | vcpu_vector.c | 22 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vector_reset() 34 static void kvm_riscv_vcpu_vector_clean(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_vector_clean() 40 void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_save() 50 void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_vector_restore() 60 void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_vector_save() 67 void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_vector_restore() 74 struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_alloc_vector_context() 100 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_vreg_addr() 181 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_vector()
|
D | vcpu_fp.c | 19 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_fp_reset() 29 static void kvm_riscv_vcpu_fp_clean(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_fp_clean() 35 void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_save() 47 void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, in kvm_riscv_vcpu_guest_fp_restore() 59 void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_save() 68 void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx) in kvm_riscv_vcpu_host_fp_restore() 81 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_get_reg_fp() 126 struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; in kvm_riscv_vcpu_set_reg_fp()
|
D | vcpu_sbi_hsm.c | 18 struct kvm_cpu_context *reset_cntx; in kvm_sbi_hsm_vcpu_start() 19 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_start() 76 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_hsm_vcpu_get_status() 95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_hsm_handler()
|
D | vcpu_sbi_replace.c | 20 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_time_handler() 51 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_ipi_handler() 95 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_rfence_handler() 150 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_srst_handler() 192 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_dbcn_handler()
|
D | vcpu_sbi_system.c | 15 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_susp_handler() 16 struct kvm_cpu_context *reset_cntx; in kvm_sbi_ext_susp_handler()
|
D | vcpu_sbi_sta.c | 83 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_sta_steal_time_set_shmem() 131 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_sbi_ext_sta_handler()
|
D | vcpu_sbi.c | 121 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_forward() 161 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_return() 427 struct kvm_cpu_context *cp = &vcpu->arch.guest_context; in kvm_riscv_vcpu_sbi_ecall()
|
/linux-6.14.4/arch/arm64/kvm/hyp/vhe/ |
D | sysreg-sr.c | 153 void sysreg_save_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_host_state_vhe() 159 void sysreg_save_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_save_guest_state_vhe() 166 void sysreg_restore_host_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_host_state_vhe() 172 void sysreg_restore_guest_state_vhe(struct kvm_cpu_context *ctxt) in sysreg_restore_guest_state_vhe() 192 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in __vcpu_load_switch_sysregs() 193 struct kvm_cpu_context *host_ctxt; in __vcpu_load_switch_sysregs() 258 struct kvm_cpu_context *guest_ctxt = &vcpu->arch.ctxt; in __vcpu_put_switch_sysregs() 259 struct kvm_cpu_context *host_ctxt; in __vcpu_put_switch_sysregs()
|
D | switch.c | 33 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); 573 struct kvm_cpu_context *host_ctxt; in __kvm_vcpu_run_vhe() 574 struct kvm_cpu_context *guest_ctxt; in __kvm_vcpu_run_vhe() 657 struct kvm_cpu_context *host_ctxt; in __hyp_call_panic()
|
/linux-6.14.4/arch/arm64/kernel/ |
D | asm-offsets.c | 107 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_cpu_context, regs)); in main() 108 DEFINE(CPU_ELR_EL2, offsetof(struct kvm_cpu_context, sys_regs[ELR_EL2])); in main() 109 DEFINE(CPU_RGSR_EL1, offsetof(struct kvm_cpu_context, sys_regs[RGSR_EL1])); in main() 110 DEFINE(CPU_GCR_EL1, offsetof(struct kvm_cpu_context, sys_regs[GCR_EL1])); in main() 111 DEFINE(CPU_APIAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIAKEYLO_EL1])); in main() 112 DEFINE(CPU_APIBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APIBKEYLO_EL1])); in main() 113 DEFINE(CPU_APDAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDAKEYLO_EL1])); in main() 114 DEFINE(CPU_APDBKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APDBKEYLO_EL1])); in main() 115 DEFINE(CPU_APGAKEYLO_EL1, offsetof(struct kvm_cpu_context, sys_regs[APGAKEYLO_EL1])); in main() 116 DEFINE(HOST_CONTEXT_VCPU, offsetof(struct kvm_cpu_context, __hyp_running_vcpu)); in main()
|
/linux-6.14.4/arch/arm64/kvm/hyp/include/nvhe/ |
D | ffa.h | 15 bool kvm_host_ffa_handler(struct kvm_cpu_context *host_ctxt, u32 func_id);
|