Lines Matching full:hv

111 	struct kvm_hv *hv = to_kvm_hv(vcpu->kvm);  in synic_update_vector()  local
140 hv->synic_auto_eoi_used++; in synic_update_vector()
142 hv->synic_auto_eoi_used--; in synic_update_vector()
150 !!hv->synic_auto_eoi_used); in synic_update_vector()
338 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_syndbg_complete_userspace() local
341 hv->hv_syndbg.control.status = in kvm_hv_syndbg_complete_userspace()
570 struct kvm_hv *hv = to_kvm_hv(kvm); in get_time_ref_counter() local
578 if (hv->hv_tsc_page_status != HV_TSC_PAGE_SET) in get_time_ref_counter()
583 return mul_u64_u64_shr(tsc, hv->tsc_ref.tsc_scale, 64) in get_time_ref_counter()
584 + hv->tsc_ref.tsc_offset; in get_time_ref_counter()
1035 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_msr_get_crash_data() local
1036 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_get_crash_data()
1041 *pdata = hv->hv_crash_param[array_index_nospec(index, size)]; in kvm_hv_msr_get_crash_data()
1047 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_msr_get_crash_ctl() local
1049 *pdata = hv->hv_crash_ctl; in kvm_hv_msr_get_crash_ctl()
1055 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_msr_set_crash_ctl() local
1057 hv->hv_crash_ctl = data & HV_CRASH_CTL_CRASH_NOTIFY; in kvm_hv_msr_set_crash_ctl()
1064 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_msr_set_crash_data() local
1065 size_t size = ARRAY_SIZE(hv->hv_crash_param); in kvm_hv_msr_set_crash_data()
1070 hv->hv_crash_param[array_index_nospec(index, size)] = data; in kvm_hv_msr_set_crash_data()
1152 static inline bool tsc_page_update_unsafe(struct kvm_hv *hv) in tsc_page_update_unsafe() argument
1154 return (hv->hv_tsc_page_status != HV_TSC_PAGE_GUEST_CHANGED) && in tsc_page_update_unsafe()
1155 hv->hv_tsc_emulation_control; in tsc_page_update_unsafe()
1161 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_setup_tsc_page() local
1165 BUILD_BUG_ON(sizeof(tsc_seq) != sizeof(hv->tsc_ref.tsc_sequence)); in kvm_hv_setup_tsc_page()
1168 mutex_lock(&hv->hv_lock); in kvm_hv_setup_tsc_page()
1170 if (hv->hv_tsc_page_status == HV_TSC_PAGE_BROKEN || in kvm_hv_setup_tsc_page()
1171 hv->hv_tsc_page_status == HV_TSC_PAGE_SET || in kvm_hv_setup_tsc_page()
1172 hv->hv_tsc_page_status == HV_TSC_PAGE_UNSET) in kvm_hv_setup_tsc_page()
1175 if (!(hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE)) in kvm_hv_setup_tsc_page()
1178 gfn = hv->hv_tsc_page >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; in kvm_hv_setup_tsc_page()
1187 if (tsc_seq && tsc_page_update_unsafe(hv)) { in kvm_hv_setup_tsc_page()
1188 if (kvm_read_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1191 hv->hv_tsc_page_status = HV_TSC_PAGE_SET; in kvm_hv_setup_tsc_page()
1199 hv->tsc_ref.tsc_sequence = 0; in kvm_hv_setup_tsc_page()
1201 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1204 if (!compute_tsc_page_parameters(hv_clock, &hv->tsc_ref)) in kvm_hv_setup_tsc_page()
1209 if (kvm_write_guest(kvm, gfn_to_gpa(gfn), &hv->tsc_ref, sizeof(hv->tsc_ref))) in kvm_hv_setup_tsc_page()
1222 hv->tsc_ref.tsc_sequence = tsc_seq; in kvm_hv_setup_tsc_page()
1224 &hv->tsc_ref, sizeof(hv->tsc_ref.tsc_sequence))) in kvm_hv_setup_tsc_page()
1227 hv->hv_tsc_page_status = HV_TSC_PAGE_SET; in kvm_hv_setup_tsc_page()
1231 hv->hv_tsc_page_status = HV_TSC_PAGE_BROKEN; in kvm_hv_setup_tsc_page()
1233 mutex_unlock(&hv->hv_lock); in kvm_hv_setup_tsc_page()
1238 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_request_tsc_page_update() local
1240 mutex_lock(&hv->hv_lock); in kvm_hv_request_tsc_page_update()
1242 if (hv->hv_tsc_page_status == HV_TSC_PAGE_SET && in kvm_hv_request_tsc_page_update()
1243 !tsc_page_update_unsafe(hv)) in kvm_hv_request_tsc_page_update()
1244 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; in kvm_hv_request_tsc_page_update()
1246 mutex_unlock(&hv->hv_lock); in kvm_hv_request_tsc_page_update()
1338 struct kvm_hv *hv = to_kvm_hv(kvm); in __kvm_hv_xsaves_xsavec_maybe_warn() local
1341 if (hv->xsaves_xsavec_checked) in __kvm_hv_xsaves_xsavec_maybe_warn()
1344 if ((hv->hv_guest_os_id & KVM_HV_WIN2016_GUEST_ID_MASK) != in __kvm_hv_xsaves_xsavec_maybe_warn()
1348 hv->xsaves_xsavec_checked = true; in __kvm_hv_xsaves_xsavec_maybe_warn()
1364 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_xsaves_xsavec_maybe_warn() local
1367 hv->xsaves_xsavec_checked) in kvm_hv_xsaves_xsavec_maybe_warn()
1370 mutex_lock(&hv->hv_lock); in kvm_hv_xsaves_xsavec_maybe_warn()
1372 mutex_unlock(&hv->hv_lock); in kvm_hv_xsaves_xsavec_maybe_warn()
1379 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_set_msr_pw() local
1386 hv->hv_guest_os_id = data; in kvm_hv_set_msr_pw()
1388 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1389 hv->hv_hypercall &= ~HV_X64_MSR_HYPERCALL_ENABLE; in kvm_hv_set_msr_pw()
1397 if (!hv->hv_guest_os_id) in kvm_hv_set_msr_pw()
1400 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1429 hv->hv_hypercall = data; in kvm_hv_set_msr_pw()
1433 hv->hv_tsc_page = data; in kvm_hv_set_msr_pw()
1434 if (hv->hv_tsc_page & HV_X64_MSR_TSC_REFERENCE_ENABLE) { in kvm_hv_set_msr_pw()
1436 hv->hv_tsc_page_status = HV_TSC_PAGE_GUEST_CHANGED; in kvm_hv_set_msr_pw()
1438 hv->hv_tsc_page_status = HV_TSC_PAGE_HOST_CHANGED; in kvm_hv_set_msr_pw()
1441 hv->hv_tsc_page_status = HV_TSC_PAGE_UNSET; in kvm_hv_set_msr_pw()
1453 vcpu_debug(vcpu, "hv crash (0x%llx 0x%llx 0x%llx 0x%llx 0x%llx)\n", in kvm_hv_set_msr_pw()
1454 hv->hv_crash_param[0], in kvm_hv_set_msr_pw()
1455 hv->hv_crash_param[1], in kvm_hv_set_msr_pw()
1456 hv->hv_crash_param[2], in kvm_hv_set_msr_pw()
1457 hv->hv_crash_param[3], in kvm_hv_set_msr_pw()
1458 hv->hv_crash_param[4]); in kvm_hv_set_msr_pw()
1471 hv->hv_reenlightenment_control = data; in kvm_hv_set_msr_pw()
1474 hv->hv_tsc_emulation_control = data; in kvm_hv_set_msr_pw()
1480 hv->hv_tsc_emulation_status = data; in kvm_hv_set_msr_pw()
1493 if (!host && hv->hv_invtsc_control && !data) in kvm_hv_set_msr_pw()
1496 hv->hv_invtsc_control = data; in kvm_hv_set_msr_pw()
1527 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_set_msr() local
1543 atomic_inc(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1545 atomic_dec(&hv->num_mismatched_vp_indexes); in kvm_hv_set_msr()
1635 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_get_msr_pw() local
1642 data = hv->hv_guest_os_id; in kvm_hv_get_msr_pw()
1645 data = hv->hv_hypercall; in kvm_hv_get_msr_pw()
1651 data = hv->hv_tsc_page; in kvm_hv_get_msr_pw()
1663 data = hv->hv_reenlightenment_control; in kvm_hv_get_msr_pw()
1666 data = hv->hv_tsc_emulation_control; in kvm_hv_get_msr_pw()
1669 data = hv->hv_tsc_emulation_status; in kvm_hv_get_msr_pw()
1672 data = hv->hv_invtsc_control; in kvm_hv_get_msr_pw()
1753 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_set_msr_common() local
1764 mutex_lock(&hv->hv_lock); in kvm_hv_set_msr_common()
1766 mutex_unlock(&hv->hv_lock); in kvm_hv_set_msr_common()
1774 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hv_get_msr_common() local
1785 mutex_lock(&hv->hv_lock); in kvm_hv_get_msr_common()
1787 mutex_unlock(&hv->hv_lock); in kvm_hv_get_msr_common()
1796 struct kvm_hv *hv = to_kvm_hv(kvm); in sparse_set_to_vcpu_mask() local
1797 bool has_mismatch = atomic_read(&hv->num_mismatched_vp_indexes); in sparse_set_to_vcpu_mask()
2410 struct kvm_hv *hv = to_kvm_hv(vcpu->kvm); in kvm_hvcall_signal_event() local
2440 eventfd = idr_find(&hv->conn_to_evt, hc->ingpa); in kvm_hvcall_signal_event()
2695 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_init_vm() local
2697 mutex_init(&hv->hv_lock); in kvm_hv_init_vm()
2698 idr_init(&hv->conn_to_evt); in kvm_hv_init_vm()
2703 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_destroy_vm() local
2707 idr_for_each_entry(&hv->conn_to_evt, eventfd, i) in kvm_hv_destroy_vm()
2709 idr_destroy(&hv->conn_to_evt); in kvm_hv_destroy_vm()
2714 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_eventfd_assign() local
2722 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_assign()
2723 ret = idr_alloc(&hv->conn_to_evt, eventfd, conn_id, conn_id + 1, in kvm_hv_eventfd_assign()
2725 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_assign()
2738 struct kvm_hv *hv = to_kvm_hv(kvm); in kvm_hv_eventfd_deassign() local
2741 mutex_lock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
2742 eventfd = idr_remove(&hv->conn_to_evt, conn_id); in kvm_hv_eventfd_deassign()
2743 mutex_unlock(&hv->hv_lock); in kvm_hv_eventfd_deassign()
2797 memcpy(signature, "Linux KVM Hv", 12); in kvm_get_hv_cpuid()
2894 memcpy(signature, "Linux KVM Hv", 12); in kvm_get_hv_cpuid()