Lines Matching full:vcpu

19 static __always_inline unsigned long kvm_##lname##_read(struct kvm_vcpu *vcpu)\
21 return vcpu->arch.regs[VCPU_REGS_##uname]; \
23 static __always_inline void kvm_##lname##_write(struct kvm_vcpu *vcpu, \
26 vcpu->arch.regs[VCPU_REGS_##uname] = val; \
50 * the vCPU task is in the process of updating the cache. The exception is if in BUILD_KVM_GPR_ACCESSORS()
55 #define kvm_assert_register_caching_allowed(vcpu) \ in BUILD_KVM_GPR_ACCESSORS() argument
56 lockdep_assert_once(in_task() || kvm_arch_pmi_in_guest(vcpu)) in BUILD_KVM_GPR_ACCESSORS()
62 * 1 0 register in vcpu->arch in BUILD_KVM_GPR_ACCESSORS()
63 * 1 1 register in vcpu->arch, needs to be stored back in BUILD_KVM_GPR_ACCESSORS()
65 static inline bool kvm_register_is_available(struct kvm_vcpu *vcpu, in BUILD_KVM_GPR_ACCESSORS()
68 kvm_assert_register_caching_allowed(vcpu); in BUILD_KVM_GPR_ACCESSORS()
69 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in BUILD_KVM_GPR_ACCESSORS()
72 static inline bool kvm_register_is_dirty(struct kvm_vcpu *vcpu, in kvm_register_is_dirty() argument
75 kvm_assert_register_caching_allowed(vcpu); in kvm_register_is_dirty()
76 return test_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_is_dirty()
79 static inline void kvm_register_mark_available(struct kvm_vcpu *vcpu, in kvm_register_mark_available() argument
82 kvm_assert_register_caching_allowed(vcpu); in kvm_register_mark_available()
83 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_available()
86 static inline void kvm_register_mark_dirty(struct kvm_vcpu *vcpu, in kvm_register_mark_dirty() argument
89 kvm_assert_register_caching_allowed(vcpu); in kvm_register_mark_dirty()
90 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_mark_dirty()
91 __set_bit(reg, (unsigned long *)&vcpu->arch.regs_dirty); in kvm_register_mark_dirty()
100 static __always_inline bool kvm_register_test_and_mark_available(struct kvm_vcpu *vcpu, in kvm_register_test_and_mark_available() argument
103 kvm_assert_register_caching_allowed(vcpu); in kvm_register_test_and_mark_available()
104 return arch___test_and_set_bit(reg, (unsigned long *)&vcpu->arch.regs_avail); in kvm_register_test_and_mark_available()
109 * register are read/written irrespective of current vCPU mode. In other words,
112 static inline unsigned long kvm_register_read_raw(struct kvm_vcpu *vcpu, int reg) in kvm_register_read_raw() argument
117 if (!kvm_register_is_available(vcpu, reg)) in kvm_register_read_raw()
118 kvm_x86_call(cache_reg)(vcpu, reg); in kvm_register_read_raw()
120 return vcpu->arch.regs[reg]; in kvm_register_read_raw()
123 static inline void kvm_register_write_raw(struct kvm_vcpu *vcpu, int reg, in kvm_register_write_raw() argument
129 vcpu->arch.regs[reg] = val; in kvm_register_write_raw()
130 kvm_register_mark_dirty(vcpu, reg); in kvm_register_write_raw()
133 static inline unsigned long kvm_rip_read(struct kvm_vcpu *vcpu) in kvm_rip_read() argument
135 return kvm_register_read_raw(vcpu, VCPU_REGS_RIP); in kvm_rip_read()
138 static inline void kvm_rip_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rip_write() argument
140 kvm_register_write_raw(vcpu, VCPU_REGS_RIP, val); in kvm_rip_write()
143 static inline unsigned long kvm_rsp_read(struct kvm_vcpu *vcpu) in kvm_rsp_read() argument
145 return kvm_register_read_raw(vcpu, VCPU_REGS_RSP); in kvm_rsp_read()
148 static inline void kvm_rsp_write(struct kvm_vcpu *vcpu, unsigned long val) in kvm_rsp_write() argument
150 kvm_register_write_raw(vcpu, VCPU_REGS_RSP, val); in kvm_rsp_write()
153 static inline u64 kvm_pdptr_read(struct kvm_vcpu *vcpu, int index) in kvm_pdptr_read() argument
157 if (!kvm_register_is_available(vcpu, VCPU_EXREG_PDPTR)) in kvm_pdptr_read()
158 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_PDPTR); in kvm_pdptr_read()
160 return vcpu->arch.walk_mmu->pdptrs[index]; in kvm_pdptr_read()
163 static inline void kvm_pdptr_write(struct kvm_vcpu *vcpu, int index, u64 value) in kvm_pdptr_write() argument
165 vcpu->arch.walk_mmu->pdptrs[index] = value; in kvm_pdptr_write()
168 static inline ulong kvm_read_cr0_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr0_bits() argument
171 if ((tmask & vcpu->arch.cr0_guest_owned_bits) && in kvm_read_cr0_bits()
172 !kvm_register_is_available(vcpu, VCPU_EXREG_CR0)) in kvm_read_cr0_bits()
173 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR0); in kvm_read_cr0_bits()
174 return vcpu->arch.cr0 & mask; in kvm_read_cr0_bits()
177 static __always_inline bool kvm_is_cr0_bit_set(struct kvm_vcpu *vcpu, in kvm_is_cr0_bit_set() argument
182 return !!kvm_read_cr0_bits(vcpu, cr0_bit); in kvm_is_cr0_bit_set()
185 static inline ulong kvm_read_cr0(struct kvm_vcpu *vcpu) in kvm_read_cr0() argument
187 return kvm_read_cr0_bits(vcpu, ~0UL); in kvm_read_cr0()
190 static inline ulong kvm_read_cr4_bits(struct kvm_vcpu *vcpu, ulong mask) in kvm_read_cr4_bits() argument
193 if ((tmask & vcpu->arch.cr4_guest_owned_bits) && in kvm_read_cr4_bits()
194 !kvm_register_is_available(vcpu, VCPU_EXREG_CR4)) in kvm_read_cr4_bits()
195 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR4); in kvm_read_cr4_bits()
196 return vcpu->arch.cr4 & mask; in kvm_read_cr4_bits()
199 static __always_inline bool kvm_is_cr4_bit_set(struct kvm_vcpu *vcpu, in kvm_is_cr4_bit_set() argument
204 return !!kvm_read_cr4_bits(vcpu, cr4_bit); in kvm_is_cr4_bit_set()
207 static inline ulong kvm_read_cr3(struct kvm_vcpu *vcpu) in kvm_read_cr3() argument
209 if (!kvm_register_is_available(vcpu, VCPU_EXREG_CR3)) in kvm_read_cr3()
210 kvm_x86_call(cache_reg)(vcpu, VCPU_EXREG_CR3); in kvm_read_cr3()
211 return vcpu->arch.cr3; in kvm_read_cr3()
214 static inline ulong kvm_read_cr4(struct kvm_vcpu *vcpu) in kvm_read_cr4() argument
216 return kvm_read_cr4_bits(vcpu, ~0UL); in kvm_read_cr4()
219 static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu) in kvm_read_edx_eax() argument
221 return (kvm_rax_read(vcpu) & -1u) in kvm_read_edx_eax()
222 | ((u64)(kvm_rdx_read(vcpu) & -1u) << 32); in kvm_read_edx_eax()
225 static inline void enter_guest_mode(struct kvm_vcpu *vcpu) in enter_guest_mode() argument
227 vcpu->arch.hflags |= HF_GUEST_MASK; in enter_guest_mode()
228 vcpu->stat.guest_mode = 1; in enter_guest_mode()
231 static inline void leave_guest_mode(struct kvm_vcpu *vcpu) in leave_guest_mode() argument
233 vcpu->arch.hflags &= ~HF_GUEST_MASK; in leave_guest_mode()
235 if (vcpu->arch.load_eoi_exitmap_pending) { in leave_guest_mode()
236 vcpu->arch.load_eoi_exitmap_pending = false; in leave_guest_mode()
237 kvm_make_request(KVM_REQ_LOAD_EOI_EXITMAP, vcpu); in leave_guest_mode()
240 vcpu->stat.guest_mode = 0; in leave_guest_mode()
243 static inline bool is_guest_mode(struct kvm_vcpu *vcpu) in is_guest_mode() argument
245 return vcpu->arch.hflags & HF_GUEST_MASK; in is_guest_mode()