Lines Matching defs:kvm_vcpu_arch
528 struct kvm_vcpu_arch { struct
529 ulong host_stack;
530 u32 host_pid;
532 struct kvmppc_slb slb[64];
533 int slb_max; /* 1 + index of last valid entry in slb[] */
534 int slb_nr; /* total number of entries in SLB */
535 struct kvmppc_mmu mmu;
536 struct kvmppc_vcpu_book3s *book3s;
539 struct kvmppc_book3s_shadow_vcpu *shadow_vcpu;
548 struct thread_fp_state fp;
551 ulong evr[32];
552 ulong spefscr;
553 ulong host_spefscr;
554 u64 acc;
557 struct thread_vr_state vr;
561 u32 host_mas4;
562 u32 host_mas6;
563 u32 shadow_epcr;
564 u32 shadow_msrp;
565 u32 eplc;
566 u32 epsc;
567 u32 oldpir;
572 u32 epcr;
578 u32 qpr[32];
582 ulong tar;
586 ulong hflags;
587 ulong guest_owned_ext;
588 ulong purr;
589 ulong spurr;
590 ulong ic;
591 ulong dscr;
592 ulong amr;
593 ulong uamor;
594 ulong iamr;
595 u32 ctrl;
596 u32 dabrx;
597 ulong dabr;
598 ulong dawr0;
599 ulong dawrx0;
600 ulong dawr1;
601 ulong dawrx1;
602 ulong dexcr;
603 ulong hashkeyr;
604 ulong hashpkeyr;
605 ulong ciabr;
606 ulong cfar;
607 ulong ppr;
608 u32 pspb;
609 u8 load_ebb;
611 u8 load_tm;
613 ulong fscr;
614 ulong shadow_fscr;
615 ulong ebbhr;
616 ulong ebbrr;
617 ulong bescr;
618 ulong csigr;
619 ulong tacr;
620 ulong tcscr;
621 ulong acop;
622 ulong wort;
623 ulong tid;
624 ulong psscr;
625 ulong hfscr;
626 ulong shadow_srr1;
628 u32 vrsave; /* also USPRG0 */
629 u32 mmucr;
631 ulong shadow_msr;
632 ulong csrr0;
633 ulong csrr1;
634 ulong dsrr0;
635 ulong dsrr1;
636 ulong mcsrr0;
637 ulong mcsrr1;
638 ulong mcsr;
639 ulong dec;
641 u32 decar;
644 u64 entry_tb;
645 u64 entry_vtb;
646 u64 entry_ic;
647 u32 tcr;
648 ulong tsr; /* we need to perform set/clr_bits() which requires ulong */
649 u32 ivor[64];
650 ulong ivpr;
651 u32 pvr;
653 u32 shadow_pid;
654 u32 shadow_pid1;
655 u32 pid;
656 u32 swap_pid;
658 u32 ccr0;
659 u32 ccr1;
660 u32 dbsr;
662 u64 mmcr[4]; /* MMCR0, MMCR1, MMCR2, MMCR3 */
663 u64 mmcra;
664 u64 mmcrs;
665 u32 pmc[8];
666 u32 spmc[2];
667 u64 siar;
668 u64 sdar;
669 u64 sier[3];
671 u64 tfhar;
672 u64 texasr;
673 u64 tfiar;
674 u64 orig_texasr;
676 u32 cr_tm;
677 u64 xer_tm;
678 u64 lr_tm;
679 u64 ctr_tm;
680 u64 amr_tm;
681 u64 ppr_tm;
682 u64 dscr_tm;
683 u64 tar_tm;
685 ulong gpr_tm[32];
687 struct thread_fp_state fp_tm;
689 struct thread_vr_state vr_tm;
690 u32 vrsave_tm; /* also USPRG0 */
694 struct mutex exit_timing_lock;
695 struct kvmppc_exit_timing timing_exit;
696 struct kvmppc_exit_timing timing_last_enter;
697 u32 last_exit_type;
698 u32 timing_count_type[__NUMBER_OF_KVM_EXIT_TYPES];
699 u64 timing_sum_duration[__NUMBER_OF_KVM_EXIT_TYPES];
700 u64 timing_sum_quad_duration[__NUMBER_OF_KVM_EXIT_TYPES];
701 u64 timing_min_duration[__NUMBER_OF_KVM_EXIT_TYPES];
702 u64 timing_max_duration[__NUMBER_OF_KVM_EXIT_TYPES];
703 u64 timing_last_exit;
707 ulong fault_dar;
708 u32 fault_dsisr;
709 unsigned long intr_msr;
715 ulong fault_gpa;
719 ulong fault_dear;
720 ulong fault_esr;
721 ulong queued_dear;
722 ulong queued_esr;
723 spinlock_t wdt_lock;
724 struct timer_list wdt_timer;
725 u32 tlbcfg[4];
726 u32 tlbps[4];
727 u32 mmucfg;
728 u32 eptcfg;
729 u32 epr;
730 u64 sprg9;
731 u32 pwrmgtcr0;
732 u32 crit_save;
734 struct debug_reg dbg_reg;
736 gpa_t paddr_accessed;
737 gva_t vaddr_accessed;
738 pgd_t *pgdir;
740 u16 io_gpr; /* GPR used as IO source/target */
741 u8 mmio_host_swabbed;
742 u8 mmio_sign_extend;
744 u8 mmio_sp64_extend;
754 u8 mmio_vsx_copy_nums;
755 u8 mmio_vsx_offset;
756 u8 mmio_vmx_copy_nums;
757 u8 mmio_vmx_offset;
758 u8 mmio_copy_type;
759 u8 osi_needed;
760 u8 osi_enabled;
761 u8 papr_enabled;
762 u8 watchdog_enabled;
763 u8 sane;
764 u8 cpu_type;
765 u8 hcall_needed;
766 u8 epr_flags; /* KVMPPC_EPR_xxx */
767 u8 epr_needed;
768 u8 external_oneshot; /* clear external irq after delivery */
770 u32 cpr0_cfgaddr; /* holds the last set cpr0_cfgaddr */
795 struct kvm_vcpu_arch_shared *shared; argument
818 struct kvm_vcpu_arch_shared shregs; argument
820 struct mmio_hpte_cache mmio_cache;
821 unsigned long pgfault_addr;
822 long pgfault_index;
823 unsigned long pgfault_hpte[2];
824 struct mmio_hpte_cache_entry *pgfault_cache;
826 struct task_struct *run_task;
828 spinlock_t vpa_update_lock;
829 struct kvmppc_vpa vpa;
830 struct kvmppc_vpa dtl;
831 struct dtl_entry *dtl_ptr;
832 unsigned long dtl_index;
833 u64 stolen_logged;
834 struct kvmppc_vpa slb_shadow;
836 spinlock_t tbacct_lock;
837 u64 busy_stolen;
838 u64 busy_preempt;
840 u64 emul_inst;
842 u32 online;
844 u64 hfscr_permitted; /* A mask of permitted HFSCR facilities */
847 struct kvm_nested_guest *nested;
848 u64 nested_hfscr; /* HFSCR that the L1 requested for the nested guest */
849 u32 nested_vcpu_id;
850 gpa_t nested_io_gpr;
852 struct kvmhv_nestedv2_io nestedv2_io;
856 struct kvmhv_tb_accumulator *cur_activity; /* What we're timing */
857 u64 cur_tb_start; /* when it started */
859 struct kvmhv_tb_accumulator vcpu_entry;
860 struct kvmhv_tb_accumulator vcpu_exit;
861 struct kvmhv_tb_accumulator in_guest;
862 struct kvmhv_tb_accumulator hcall;
863 struct kvmhv_tb_accumulator pg_fault;
864 struct kvmhv_tb_accumulator guest_entry;
865 struct kvmhv_tb_accumulator guest_exit;
867 struct kvmhv_tb_accumulator rm_entry; /* real-mode entry code */
868 struct kvmhv_tb_accumulator rm_intr; /* real-mode intr handling */
869 struct kvmhv_tb_accumulator rm_exit; /* real-mode exit code */
870 struct kvmhv_tb_accumulator guest_time; /* guest execution */
871 struct kvmhv_tb_accumulator cede_time; /* time napping inside guest */
875 u64 l1_to_l2_cs;
876 u64 l2_to_l1_cs;
877 u64 l2_runtime_agg;