Lines Matching +full:vm +full:- +full:active +full:- +full:channels
1 // SPDX-License-Identifier: GPL-2.0
40 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_shared_info_init()
46 int idx = srcu_read_lock(&kvm->srcu); in kvm_xen_shared_info_init()
48 read_lock_irq(&gpc->lock); in kvm_xen_shared_info_init()
50 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
56 read_lock_irq(&gpc->lock); in kvm_xen_shared_info_init()
65 /* Paranoia checks on the 32-bit struct layout */ in kvm_xen_shared_info_init()
71 /* Paranoia checks on the 64-bit struct layout */ in kvm_xen_shared_info_init()
75 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_shared_info_init()
76 struct shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
78 wc_sec_hi = &shinfo->wc_sec_hi; in kvm_xen_shared_info_init()
79 wc = &shinfo->wc; in kvm_xen_shared_info_init()
83 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_shared_info_init()
85 wc_sec_hi = &shinfo->arch.wc_sec_hi; in kvm_xen_shared_info_init()
86 wc = &shinfo->wc; in kvm_xen_shared_info_init()
90 wc_version = wc->version = (wc->version + 1) | 1; in kvm_xen_shared_info_init()
93 wc->nsec = do_div(wall_nsec, NSEC_PER_SEC); in kvm_xen_shared_info_init()
94 wc->sec = (u32)wall_nsec; in kvm_xen_shared_info_init()
98 wc->version = wc_version + 1; in kvm_xen_shared_info_init()
99 read_unlock_irq(&gpc->lock); in kvm_xen_shared_info_init()
104 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_shared_info_init()
110 if (atomic_read(&vcpu->arch.xen.timer_pending) > 0) { in kvm_xen_inject_timer_irqs()
113 e.vcpu_id = vcpu->vcpu_id; in kvm_xen_inject_timer_irqs()
114 e.vcpu_idx = vcpu->vcpu_idx; in kvm_xen_inject_timer_irqs()
115 e.port = vcpu->arch.xen.timer_virq; in kvm_xen_inject_timer_irqs()
118 kvm_xen_set_evtchn(&e, vcpu->kvm); in kvm_xen_inject_timer_irqs()
120 vcpu->arch.xen.timer_expires = 0; in kvm_xen_inject_timer_irqs()
121 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_inject_timer_irqs()
132 if (atomic_read(&vcpu->arch.xen.timer_pending)) in xen_timer_callback()
135 e.vcpu_id = vcpu->vcpu_id; in xen_timer_callback()
136 e.vcpu_idx = vcpu->vcpu_idx; in xen_timer_callback()
137 e.port = vcpu->arch.xen.timer_virq; in xen_timer_callback()
140 rc = kvm_xen_set_evtchn_fast(&e, vcpu->kvm); in xen_timer_callback()
141 if (rc != -EWOULDBLOCK) { in xen_timer_callback()
142 vcpu->arch.xen.timer_expires = 0; in xen_timer_callback()
146 atomic_inc(&vcpu->arch.xen.timer_pending); in xen_timer_callback()
176 if (vcpu->arch.hv_clock.version && vcpu->kvm->arch.use_master_clock && in kvm_xen_start_timer()
200 guest_now = __pvclock_read_cycles(&vcpu->arch.hv_clock, in kvm_xen_start_timer()
214 * be in the "past" by the time the VM is running again after in kvm_xen_start_timer()
217 guest_now = get_kvmclock_ns(vcpu->kvm); in kvm_xen_start_timer()
221 delta = guest_abs - guest_now; in kvm_xen_start_timer()
245 if (vcpu->arch.xen.timer_expires) in kvm_xen_start_timer()
246 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_start_timer()
248 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_start_timer()
249 vcpu->arch.xen.timer_expires = guest_abs; in kvm_xen_start_timer()
252 xen_timer_callback(&vcpu->arch.xen.timer); in kvm_xen_start_timer()
254 hrtimer_start(&vcpu->arch.xen.timer, in kvm_xen_start_timer()
261 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_stop_timer()
262 vcpu->arch.xen.timer_expires = 0; in kvm_xen_stop_timer()
263 atomic_set(&vcpu->arch.xen.timer_pending, 0); in kvm_xen_stop_timer()
268 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate_guest()
269 struct gfn_to_pfn_cache *gpc1 = &vx->runstate_cache; in kvm_xen_update_runstate_guest()
270 struct gfn_to_pfn_cache *gpc2 = &vx->runstate2_cache; in kvm_xen_update_runstate_guest()
281 * The only difference between 32-bit and 64-bit versions of the in kvm_xen_update_runstate_guest()
282 * runstate struct is the alignment of uint64_t in 32-bit, which in kvm_xen_update_runstate_guest()
283 * means that the 64-bit version has an additional 4 bytes of in kvm_xen_update_runstate_guest()
293 * The 64-bit structure has 4 bytes of padding before 'state_entry_time' in kvm_xen_update_runstate_guest()
304 * and is the same size (int) as vx->current_runstate. in kvm_xen_update_runstate_guest()
309 sizeof(vx->current_runstate)); in kvm_xen_update_runstate_guest()
311 sizeof(vx->current_runstate)); in kvm_xen_update_runstate_guest()
316 * is little-endian means that it's in the last *byte* of the word. in kvm_xen_update_runstate_guest()
326 * The time array is four 64-bit quantities in both versions, matching in kvm_xen_update_runstate_guest()
327 * the vx->runstate_times and immediately following state_entry_time. in kvm_xen_update_runstate_guest()
330 offsetof(struct vcpu_runstate_info, time) - sizeof(uint64_t)); in kvm_xen_update_runstate_guest()
332 offsetof(struct compat_vcpu_runstate_info, time) - sizeof(uint64_t)); in kvm_xen_update_runstate_guest()
336 sizeof(vx->runstate_times)); in kvm_xen_update_runstate_guest()
338 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { in kvm_xen_update_runstate_guest()
351 * alignment (and the 32-bit ABI doesn't align the 64-bit integers in kvm_xen_update_runstate_guest()
352 * anyway, even if the overall struct had been 64-bit aligned). in kvm_xen_update_runstate_guest()
354 if ((gpc1->gpa & ~PAGE_MASK) + user_len >= PAGE_SIZE) { in kvm_xen_update_runstate_guest()
355 user_len1 = PAGE_SIZE - (gpc1->gpa & ~PAGE_MASK); in kvm_xen_update_runstate_guest()
356 user_len2 = user_len - user_len1; in kvm_xen_update_runstate_guest()
370 if (!read_trylock(&gpc1->lock)) { in kvm_xen_update_runstate_guest()
375 read_lock_irqsave(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
378 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
387 read_lock_irqsave(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
400 rs_state = gpc1->khva; in kvm_xen_update_runstate_guest()
401 rs_times = gpc1->khva + times_ofs; in kvm_xen_update_runstate_guest()
402 if (v->kvm->arch.xen.runstate_update_flag) in kvm_xen_update_runstate_guest()
403 update_bit = ((void *)(&rs_times[1])) - 1; in kvm_xen_update_runstate_guest()
412 lock_set_subclass(&gpc1->lock.dep_map, 1, _THIS_IP_); in kvm_xen_update_runstate_guest()
414 if (!read_trylock(&gpc2->lock)) { in kvm_xen_update_runstate_guest()
415 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
419 read_lock(&gpc2->lock); in kvm_xen_update_runstate_guest()
423 read_unlock(&gpc2->lock); in kvm_xen_update_runstate_guest()
424 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
432 * area was configured in 32-bit mode and only extends in kvm_xen_update_runstate_guest()
434 * 64-bit mode, the second GPC won't have been set up. in kvm_xen_update_runstate_guest()
436 if (kvm_gpc_activate(gpc2, gpc1->gpa + user_len1, in kvm_xen_update_runstate_guest()
458 * is the 32-bit field that the compiler thinks is padding. in kvm_xen_update_runstate_guest()
460 rs_state = ((void *)rs_times) - times_ofs; in kvm_xen_update_runstate_guest()
466 if (v->kvm->arch.xen.runstate_update_flag) { in kvm_xen_update_runstate_guest()
468 update_bit = gpc1->khva + times_ofs + in kvm_xen_update_runstate_guest()
469 sizeof(uint64_t) - 1; in kvm_xen_update_runstate_guest()
471 update_bit = gpc2->khva + times_ofs + in kvm_xen_update_runstate_guest()
472 sizeof(uint64_t) - 1 - user_len1; in kvm_xen_update_runstate_guest()
477 * Don't leak kernel memory through the padding in the 64-bit in kvm_xen_update_runstate_guest()
487 * that (and write-barrier) before writing to the rest of the in kvm_xen_update_runstate_guest()
490 * different cache line to the rest of the 64-bit word, due to in kvm_xen_update_runstate_guest()
493 entry_time = vx->runstate_entry_time; in kvm_xen_update_runstate_guest()
496 *update_bit = (vx->runstate_entry_time | XEN_RUNSTATE_UPDATE) >> 56; in kvm_xen_update_runstate_guest()
505 *rs_state = vx->current_runstate; in kvm_xen_update_runstate_guest()
507 memcpy(rs_times + 1, vx->runstate_times, sizeof(vx->runstate_times)); in kvm_xen_update_runstate_guest()
511 memcpy(gpc1->khva, rs_state, user_len1); in kvm_xen_update_runstate_guest()
512 memcpy(gpc2->khva, ((void *)rs_state) + user_len1, user_len2); in kvm_xen_update_runstate_guest()
525 read_unlock(&gpc2->lock); in kvm_xen_update_runstate_guest()
529 read_unlock_irqrestore(&gpc1->lock, flags); in kvm_xen_update_runstate_guest()
534 struct kvm_vcpu_xen *vx = &v->arch.xen; in kvm_xen_update_runstate()
535 u64 now = get_kvmclock_ns(v->kvm); in kvm_xen_update_runstate()
536 u64 delta_ns = now - vx->runstate_entry_time; in kvm_xen_update_runstate()
537 u64 run_delay = current->sched_info.run_delay; in kvm_xen_update_runstate()
539 if (unlikely(!vx->runstate_entry_time)) in kvm_xen_update_runstate()
540 vx->current_runstate = RUNSTATE_offline; in kvm_xen_update_runstate()
546 if (vx->current_runstate == RUNSTATE_running) { in kvm_xen_update_runstate()
547 u64 steal_ns = run_delay - vx->last_steal; in kvm_xen_update_runstate()
549 delta_ns -= steal_ns; in kvm_xen_update_runstate()
551 vx->runstate_times[RUNSTATE_runnable] += steal_ns; in kvm_xen_update_runstate()
553 vx->last_steal = run_delay; in kvm_xen_update_runstate()
555 vx->runstate_times[vx->current_runstate] += delta_ns; in kvm_xen_update_runstate()
556 vx->current_runstate = state; in kvm_xen_update_runstate()
557 vx->runstate_entry_time = now; in kvm_xen_update_runstate()
559 if (vx->runstate_cache.active) in kvm_xen_update_runstate()
567 irq.dest_id = v->vcpu_id; in kvm_xen_inject_vcpu_vector()
568 irq.vector = v->arch.xen.upcall_vector; in kvm_xen_inject_vcpu_vector()
574 kvm_irq_delivery_to_apic(v->kvm, NULL, &irq, NULL); in kvm_xen_inject_vcpu_vector()
579 * In that case, there are bits in vcpu->arch.xen.evtchn_pending_sel which
586 unsigned long evtchn_pending_sel = READ_ONCE(v->arch.xen.evtchn_pending_sel); in kvm_xen_inject_pending_events()
587 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in kvm_xen_inject_pending_events()
594 * Yes, this is an open-coded loop. But that's just what put_user() in kvm_xen_inject_pending_events()
598 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
600 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
605 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_inject_pending_events()
608 /* Now gpc->khva is a valid kernel address for the vcpu_info */ in kvm_xen_inject_pending_events()
609 if (IS_ENABLED(CONFIG_64BIT) && v->kvm->arch.xen.long_mode) { in kvm_xen_inject_pending_events()
610 struct vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
616 "+m" (vi->evtchn_pending_sel), in kvm_xen_inject_pending_events()
617 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
619 WRITE_ONCE(vi->evtchn_upcall_pending, 1); in kvm_xen_inject_pending_events()
622 struct compat_vcpu_info *vi = gpc->khva; in kvm_xen_inject_pending_events()
628 "+m" (vi->evtchn_pending_sel), in kvm_xen_inject_pending_events()
629 "+m" (v->arch.xen.evtchn_pending_sel) in kvm_xen_inject_pending_events()
631 WRITE_ONCE(vi->evtchn_upcall_pending, 1); in kvm_xen_inject_pending_events()
635 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_inject_pending_events()
637 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_inject_pending_events()
638 if (v->arch.xen.upcall_vector) in kvm_xen_inject_pending_events()
644 struct gfn_to_pfn_cache *gpc = &v->arch.xen.vcpu_info_cache; in __kvm_xen_has_interrupt()
661 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
663 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
683 read_lock_irqsave(&gpc->lock, flags); in __kvm_xen_has_interrupt()
686 rc = ((struct vcpu_info *)gpc->khva)->evtchn_upcall_pending; in __kvm_xen_has_interrupt()
687 read_unlock_irqrestore(&gpc->lock, flags); in __kvm_xen_has_interrupt()
693 int r = -ENOENT; in kvm_xen_hvm_set_attr()
696 switch (data->type) { in kvm_xen_hvm_set_attr()
698 if (!IS_ENABLED(CONFIG_64BIT) && data->u.long_mode) { in kvm_xen_hvm_set_attr()
699 r = -EINVAL; in kvm_xen_hvm_set_attr()
701 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
702 kvm->arch.xen.long_mode = !!data->u.long_mode; in kvm_xen_hvm_set_attr()
705 * Re-initialize shared_info to put the wallclock in the in kvm_xen_hvm_set_attr()
710 r = kvm->arch.xen.shinfo_cache.active ? in kvm_xen_hvm_set_attr()
712 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
720 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
722 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_hvm_set_attr()
724 if (data->type == KVM_XEN_ATTR_TYPE_SHARED_INFO) { in kvm_xen_hvm_set_attr()
725 gfn_t gfn = data->u.shared_info.gfn; in kvm_xen_hvm_set_attr()
728 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); in kvm_xen_hvm_set_attr()
731 r = kvm_gpc_activate(&kvm->arch.xen.shinfo_cache, in kvm_xen_hvm_set_attr()
735 void __user * hva = u64_to_user_ptr(data->u.shared_info.hva); in kvm_xen_hvm_set_attr()
738 r = -EINVAL; in kvm_xen_hvm_set_attr()
740 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); in kvm_xen_hvm_set_attr()
743 r = kvm_gpc_activate_hva(&kvm->arch.xen.shinfo_cache, in kvm_xen_hvm_set_attr()
748 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_hvm_set_attr()
750 if (!r && kvm->arch.xen.shinfo_cache.active) in kvm_xen_hvm_set_attr()
753 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
757 if (data->u.vector && data->u.vector < 0x10) in kvm_xen_hvm_set_attr()
758 r = -EINVAL; in kvm_xen_hvm_set_attr()
760 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
761 kvm->arch.xen.upcall_vector = data->u.vector; in kvm_xen_hvm_set_attr()
762 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
772 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
773 kvm->arch.xen.xen_version = data->u.xen_version; in kvm_xen_hvm_set_attr()
774 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
780 r = -EOPNOTSUPP; in kvm_xen_hvm_set_attr()
783 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
784 kvm->arch.xen.runstate_update_flag = !!data->u.runstate_update_flag; in kvm_xen_hvm_set_attr()
785 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_set_attr()
798 int r = -ENOENT; in kvm_xen_hvm_get_attr()
800 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_get_attr()
802 switch (data->type) { in kvm_xen_hvm_get_attr()
804 data->u.long_mode = kvm->arch.xen.long_mode; in kvm_xen_hvm_get_attr()
809 if (kvm_gpc_is_gpa_active(&kvm->arch.xen.shinfo_cache)) in kvm_xen_hvm_get_attr()
810 data->u.shared_info.gfn = gpa_to_gfn(kvm->arch.xen.shinfo_cache.gpa); in kvm_xen_hvm_get_attr()
812 data->u.shared_info.gfn = KVM_XEN_INVALID_GFN; in kvm_xen_hvm_get_attr()
817 if (kvm_gpc_is_hva_active(&kvm->arch.xen.shinfo_cache)) in kvm_xen_hvm_get_attr()
818 data->u.shared_info.hva = kvm->arch.xen.shinfo_cache.uhva; in kvm_xen_hvm_get_attr()
820 data->u.shared_info.hva = 0; in kvm_xen_hvm_get_attr()
825 data->u.vector = kvm->arch.xen.upcall_vector; in kvm_xen_hvm_get_attr()
830 data->u.xen_version = kvm->arch.xen.xen_version; in kvm_xen_hvm_get_attr()
836 r = -EOPNOTSUPP; in kvm_xen_hvm_get_attr()
839 data->u.runstate_update_flag = kvm->arch.xen.runstate_update_flag; in kvm_xen_hvm_get_attr()
847 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_get_attr()
853 int idx, r = -ENOENT; in kvm_xen_vcpu_set_attr()
855 mutex_lock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_set_attr()
856 idx = srcu_read_lock(&vcpu->kvm->srcu); in kvm_xen_vcpu_set_attr()
858 switch (data->type) { in kvm_xen_vcpu_set_attr()
867 if (data->type == KVM_XEN_VCPU_ATTR_TYPE_VCPU_INFO) { in kvm_xen_vcpu_set_attr()
868 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
869 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_vcpu_set_attr()
874 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_info_cache, in kvm_xen_vcpu_set_attr()
875 data->u.gpa, sizeof(struct vcpu_info)); in kvm_xen_vcpu_set_attr()
877 if (data->u.hva == 0) { in kvm_xen_vcpu_set_attr()
878 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_vcpu_set_attr()
883 r = kvm_gpc_activate_hva(&vcpu->arch.xen.vcpu_info_cache, in kvm_xen_vcpu_set_attr()
884 data->u.hva, sizeof(struct vcpu_info)); in kvm_xen_vcpu_set_attr()
893 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
894 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_vcpu_set_attr()
899 r = kvm_gpc_activate(&vcpu->arch.xen.vcpu_time_info_cache, in kvm_xen_vcpu_set_attr()
900 data->u.gpa, in kvm_xen_vcpu_set_attr()
910 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
913 if (data->u.gpa == KVM_XEN_INVALID_GPA) { in kvm_xen_vcpu_set_attr()
916 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); in kvm_xen_vcpu_set_attr()
917 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_vcpu_set_attr()
922 * If the guest switches to 64-bit mode after setting the runstate in kvm_xen_vcpu_set_attr()
926 if (IS_ENABLED(CONFIG_64BIT) && vcpu->kvm->arch.xen.long_mode) in kvm_xen_vcpu_set_attr()
932 sz1 = PAGE_SIZE - (data->u.gpa & ~PAGE_MASK); in kvm_xen_vcpu_set_attr()
933 r = kvm_gpc_activate(&vcpu->arch.xen.runstate_cache, in kvm_xen_vcpu_set_attr()
934 data->u.gpa, sz1); in kvm_xen_vcpu_set_attr()
940 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_vcpu_set_attr()
942 sz2 = sz - sz1; in kvm_xen_vcpu_set_attr()
943 BUG_ON((data->u.gpa + sz1) & ~PAGE_MASK); in kvm_xen_vcpu_set_attr()
944 r = kvm_gpc_activate(&vcpu->arch.xen.runstate2_cache, in kvm_xen_vcpu_set_attr()
945 data->u.gpa + sz1, sz2); in kvm_xen_vcpu_set_attr()
955 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
958 if (data->u.runstate.state > RUNSTATE_offline) { in kvm_xen_vcpu_set_attr()
959 r = -EINVAL; in kvm_xen_vcpu_set_attr()
963 kvm_xen_update_runstate(vcpu, data->u.runstate.state); in kvm_xen_vcpu_set_attr()
969 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
972 if (data->u.runstate.state > RUNSTATE_offline) { in kvm_xen_vcpu_set_attr()
973 r = -EINVAL; in kvm_xen_vcpu_set_attr()
976 if (data->u.runstate.state_entry_time != in kvm_xen_vcpu_set_attr()
977 (data->u.runstate.time_running + in kvm_xen_vcpu_set_attr()
978 data->u.runstate.time_runnable + in kvm_xen_vcpu_set_attr()
979 data->u.runstate.time_blocked + in kvm_xen_vcpu_set_attr()
980 data->u.runstate.time_offline)) { in kvm_xen_vcpu_set_attr()
981 r = -EINVAL; in kvm_xen_vcpu_set_attr()
984 if (get_kvmclock_ns(vcpu->kvm) < in kvm_xen_vcpu_set_attr()
985 data->u.runstate.state_entry_time) { in kvm_xen_vcpu_set_attr()
986 r = -EINVAL; in kvm_xen_vcpu_set_attr()
990 vcpu->arch.xen.current_runstate = data->u.runstate.state; in kvm_xen_vcpu_set_attr()
991 vcpu->arch.xen.runstate_entry_time = in kvm_xen_vcpu_set_attr()
992 data->u.runstate.state_entry_time; in kvm_xen_vcpu_set_attr()
993 vcpu->arch.xen.runstate_times[RUNSTATE_running] = in kvm_xen_vcpu_set_attr()
994 data->u.runstate.time_running; in kvm_xen_vcpu_set_attr()
995 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] = in kvm_xen_vcpu_set_attr()
996 data->u.runstate.time_runnable; in kvm_xen_vcpu_set_attr()
997 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] = in kvm_xen_vcpu_set_attr()
998 data->u.runstate.time_blocked; in kvm_xen_vcpu_set_attr()
999 vcpu->arch.xen.runstate_times[RUNSTATE_offline] = in kvm_xen_vcpu_set_attr()
1000 data->u.runstate.time_offline; in kvm_xen_vcpu_set_attr()
1001 vcpu->arch.xen.last_steal = current->sched_info.run_delay; in kvm_xen_vcpu_set_attr()
1007 r = -EOPNOTSUPP; in kvm_xen_vcpu_set_attr()
1010 if (data->u.runstate.state > RUNSTATE_offline && in kvm_xen_vcpu_set_attr()
1011 data->u.runstate.state != (u64)-1) { in kvm_xen_vcpu_set_attr()
1012 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1016 if (data->u.runstate.state_entry_time != in kvm_xen_vcpu_set_attr()
1017 (data->u.runstate.time_running + in kvm_xen_vcpu_set_attr()
1018 data->u.runstate.time_runnable + in kvm_xen_vcpu_set_attr()
1019 data->u.runstate.time_blocked + in kvm_xen_vcpu_set_attr()
1020 data->u.runstate.time_offline)) { in kvm_xen_vcpu_set_attr()
1021 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1025 if (get_kvmclock_ns(vcpu->kvm) < in kvm_xen_vcpu_set_attr()
1026 (vcpu->arch.xen.runstate_entry_time + in kvm_xen_vcpu_set_attr()
1027 data->u.runstate.state_entry_time)) { in kvm_xen_vcpu_set_attr()
1028 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1032 vcpu->arch.xen.runstate_entry_time += in kvm_xen_vcpu_set_attr()
1033 data->u.runstate.state_entry_time; in kvm_xen_vcpu_set_attr()
1034 vcpu->arch.xen.runstate_times[RUNSTATE_running] += in kvm_xen_vcpu_set_attr()
1035 data->u.runstate.time_running; in kvm_xen_vcpu_set_attr()
1036 vcpu->arch.xen.runstate_times[RUNSTATE_runnable] += in kvm_xen_vcpu_set_attr()
1037 data->u.runstate.time_runnable; in kvm_xen_vcpu_set_attr()
1038 vcpu->arch.xen.runstate_times[RUNSTATE_blocked] += in kvm_xen_vcpu_set_attr()
1039 data->u.runstate.time_blocked; in kvm_xen_vcpu_set_attr()
1040 vcpu->arch.xen.runstate_times[RUNSTATE_offline] += in kvm_xen_vcpu_set_attr()
1041 data->u.runstate.time_offline; in kvm_xen_vcpu_set_attr()
1043 if (data->u.runstate.state <= RUNSTATE_offline) in kvm_xen_vcpu_set_attr()
1044 kvm_xen_update_runstate(vcpu, data->u.runstate.state); in kvm_xen_vcpu_set_attr()
1045 else if (vcpu->arch.xen.runstate_cache.active) in kvm_xen_vcpu_set_attr()
1051 if (data->u.vcpu_id >= KVM_MAX_VCPUS) in kvm_xen_vcpu_set_attr()
1052 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1054 vcpu->arch.xen.vcpu_id = data->u.vcpu_id; in kvm_xen_vcpu_set_attr()
1060 if (data->u.timer.port && in kvm_xen_vcpu_set_attr()
1061 data->u.timer.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) { in kvm_xen_vcpu_set_attr()
1062 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1068 vcpu->arch.xen.timer_virq = data->u.timer.port; in kvm_xen_vcpu_set_attr()
1071 if (data->u.timer.port && data->u.timer.expires_ns) in kvm_xen_vcpu_set_attr()
1072 kvm_xen_start_timer(vcpu, data->u.timer.expires_ns, false); in kvm_xen_vcpu_set_attr()
1078 if (data->u.vector && data->u.vector < 0x10) in kvm_xen_vcpu_set_attr()
1079 r = -EINVAL; in kvm_xen_vcpu_set_attr()
1081 vcpu->arch.xen.upcall_vector = data->u.vector; in kvm_xen_vcpu_set_attr()
1090 srcu_read_unlock(&vcpu->kvm->srcu, idx); in kvm_xen_vcpu_set_attr()
1091 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_set_attr()
1097 int r = -ENOENT; in kvm_xen_vcpu_get_attr()
1099 mutex_lock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_get_attr()
1101 switch (data->type) { in kvm_xen_vcpu_get_attr()
1103 if (kvm_gpc_is_gpa_active(&vcpu->arch.xen.vcpu_info_cache)) in kvm_xen_vcpu_get_attr()
1104 data->u.gpa = vcpu->arch.xen.vcpu_info_cache.gpa; in kvm_xen_vcpu_get_attr()
1106 data->u.gpa = KVM_XEN_INVALID_GPA; in kvm_xen_vcpu_get_attr()
1111 if (kvm_gpc_is_hva_active(&vcpu->arch.xen.vcpu_info_cache)) in kvm_xen_vcpu_get_attr()
1112 data->u.hva = vcpu->arch.xen.vcpu_info_cache.uhva; in kvm_xen_vcpu_get_attr()
1114 data->u.hva = 0; in kvm_xen_vcpu_get_attr()
1119 if (vcpu->arch.xen.vcpu_time_info_cache.active) in kvm_xen_vcpu_get_attr()
1120 data->u.gpa = vcpu->arch.xen.vcpu_time_info_cache.gpa; in kvm_xen_vcpu_get_attr()
1122 data->u.gpa = KVM_XEN_INVALID_GPA; in kvm_xen_vcpu_get_attr()
1128 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
1131 if (vcpu->arch.xen.runstate_cache.active) { in kvm_xen_vcpu_get_attr()
1132 data->u.gpa = vcpu->arch.xen.runstate_cache.gpa; in kvm_xen_vcpu_get_attr()
1139 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
1142 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
1148 r = -EOPNOTSUPP; in kvm_xen_vcpu_get_attr()
1151 data->u.runstate.state = vcpu->arch.xen.current_runstate; in kvm_xen_vcpu_get_attr()
1152 data->u.runstate.state_entry_time = in kvm_xen_vcpu_get_attr()
1153 vcpu->arch.xen.runstate_entry_time; in kvm_xen_vcpu_get_attr()
1154 data->u.runstate.time_running = in kvm_xen_vcpu_get_attr()
1155 vcpu->arch.xen.runstate_times[RUNSTATE_running]; in kvm_xen_vcpu_get_attr()
1156 data->u.runstate.time_runnable = in kvm_xen_vcpu_get_attr()
1157 vcpu->arch.xen.runstate_times[RUNSTATE_runnable]; in kvm_xen_vcpu_get_attr()
1158 data->u.runstate.time_blocked = in kvm_xen_vcpu_get_attr()
1159 vcpu->arch.xen.runstate_times[RUNSTATE_blocked]; in kvm_xen_vcpu_get_attr()
1160 data->u.runstate.time_offline = in kvm_xen_vcpu_get_attr()
1161 vcpu->arch.xen.runstate_times[RUNSTATE_offline]; in kvm_xen_vcpu_get_attr()
1166 r = -EINVAL; in kvm_xen_vcpu_get_attr()
1170 data->u.vcpu_id = vcpu->arch.xen.vcpu_id; in kvm_xen_vcpu_get_attr()
1181 * zero, that means the timer isn't active (or even in the in kvm_xen_vcpu_get_attr()
1184 if (vcpu->arch.xen.timer_expires) { in kvm_xen_vcpu_get_attr()
1185 hrtimer_cancel(&vcpu->arch.xen.timer); in kvm_xen_vcpu_get_attr()
1189 data->u.timer.port = vcpu->arch.xen.timer_virq; in kvm_xen_vcpu_get_attr()
1190 data->u.timer.priority = KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL; in kvm_xen_vcpu_get_attr()
1191 data->u.timer.expires_ns = vcpu->arch.xen.timer_expires; in kvm_xen_vcpu_get_attr()
1201 if (vcpu->arch.xen.timer_expires) in kvm_xen_vcpu_get_attr()
1202 hrtimer_start_expires(&vcpu->arch.xen.timer, in kvm_xen_vcpu_get_attr()
1209 data->u.vector = vcpu->arch.xen.upcall_vector; in kvm_xen_vcpu_get_attr()
1217 mutex_unlock(&vcpu->kvm->arch.xen.xen_lock); in kvm_xen_vcpu_get_attr()
1223 struct kvm *kvm = vcpu->kvm; in kvm_xen_write_hypercall_page()
1229 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_write_hypercall_page()
1230 if (kvm->arch.xen.long_mode != lm) { in kvm_xen_write_hypercall_page()
1231 kvm->arch.xen.long_mode = lm; in kvm_xen_write_hypercall_page()
1234 * Re-initialize shared_info to put the wallclock in the in kvm_xen_write_hypercall_page()
1237 if (kvm->arch.xen.shinfo_cache.active && in kvm_xen_write_hypercall_page()
1241 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_write_hypercall_page()
1269 memset(instructions + 9, 0xcc, sizeof(instructions) - 9); in kvm_xen_write_hypercall_page()
1280 * Note, truncation is a non-issue as 'lm' is guaranteed to be in kvm_xen_write_hypercall_page()
1281 * false for a 32-bit kernel, i.e. when hva_t is only 4 bytes. in kvm_xen_write_hypercall_page()
1283 hva_t blob_addr = lm ? kvm->arch.xen_hvm_config.blob_addr_64 in kvm_xen_write_hypercall_page()
1284 : kvm->arch.xen_hvm_config.blob_addr_32; in kvm_xen_write_hypercall_page()
1285 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64 in kvm_xen_write_hypercall_page()
1286 : kvm->arch.xen_hvm_config.blob_size_32; in kvm_xen_write_hypercall_page()
1315 if (xhc->flags & ~permitted_flags) in kvm_xen_hvm_config()
1316 return -EINVAL; in kvm_xen_hvm_config()
1322 if ((xhc->flags & KVM_XEN_HVM_CONFIG_INTERCEPT_HCALL) && in kvm_xen_hvm_config()
1323 (xhc->blob_addr_32 || xhc->blob_addr_64 || in kvm_xen_hvm_config()
1324 xhc->blob_size_32 || xhc->blob_size_64)) in kvm_xen_hvm_config()
1325 return -EINVAL; in kvm_xen_hvm_config()
1327 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_config()
1329 if (xhc->msr && !kvm->arch.xen_hvm_config.msr) in kvm_xen_hvm_config()
1331 else if (!xhc->msr && kvm->arch.xen_hvm_config.msr) in kvm_xen_hvm_config()
1334 old_flags = kvm->arch.xen_hvm_config.flags; in kvm_xen_hvm_config()
1335 memcpy(&kvm->arch.xen_hvm_config, xhc, sizeof(*xhc)); in kvm_xen_hvm_config()
1337 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_hvm_config()
1339 if ((old_flags ^ xhc->flags) & KVM_XEN_HVM_CONFIG_PVCLOCK_TSC_UNSTABLE) in kvm_xen_hvm_config()
1353 struct kvm_run *run = vcpu->run; in kvm_xen_hypercall_complete_userspace()
1355 if (unlikely(!kvm_is_linear_rip(vcpu, vcpu->arch.xen.hypercall_rip))) in kvm_xen_hypercall_complete_userspace()
1358 return kvm_xen_hypercall_set_result(vcpu, run->xen.u.hcall.result); in kvm_xen_hypercall_complete_userspace()
1363 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) in max_evtchn_port()
1372 struct kvm *kvm = vcpu->kvm; in wait_pending_event()
1373 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in wait_pending_event()
1379 idx = srcu_read_lock(&kvm->srcu); in wait_pending_event()
1380 read_lock_irqsave(&gpc->lock, flags); in wait_pending_event()
1385 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in wait_pending_event()
1386 struct shared_info *shinfo = gpc->khva; in wait_pending_event()
1387 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
1389 struct compat_shared_info *shinfo = gpc->khva; in wait_pending_event()
1390 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in wait_pending_event()
1401 read_unlock_irqrestore(&gpc->lock, flags); in wait_pending_event()
1402 srcu_read_unlock(&kvm->srcu, idx); in wait_pending_event()
1416 !(vcpu->kvm->arch.xen_hvm_config.flags & KVM_XEN_HVM_CONFIG_EVTCHN_SEND)) in kvm_xen_schedop_poll()
1426 *r = -EFAULT; in kvm_xen_schedop_poll()
1431 * This is a 32-bit pointer to an array of evtchn_port_t which in kvm_xen_schedop_poll()
1441 *r = -EFAULT; in kvm_xen_schedop_poll()
1449 *r = -EINVAL; in kvm_xen_schedop_poll()
1456 *r = -ENOMEM; in kvm_xen_schedop_poll()
1464 *r = -EFAULT; in kvm_xen_schedop_poll()
1469 if (ports[i] >= max_evtchn_port(vcpu->kvm)) { in kvm_xen_schedop_poll()
1470 *r = -EINVAL; in kvm_xen_schedop_poll()
1476 vcpu->arch.xen.poll_evtchn = port; in kvm_xen_schedop_poll()
1478 vcpu->arch.xen.poll_evtchn = -1; in kvm_xen_schedop_poll()
1480 set_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1483 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; in kvm_xen_schedop_poll()
1486 mod_timer(&vcpu->arch.xen.poll_timer, in kvm_xen_schedop_poll()
1492 del_timer(&vcpu->arch.xen.poll_timer); in kvm_xen_schedop_poll()
1494 vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; in kvm_xen_schedop_poll()
1497 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_schedop_poll()
1501 clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask); in kvm_xen_schedop_poll()
1551 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1552 *r = -EINVAL; in kvm_xen_hcall_vcpu_op()
1557 * The only difference for 32-bit compat is the 4 bytes of in kvm_xen_hcall_vcpu_op()
1560 * the padding and return -EFAULT if we can't. Otherwise we in kvm_xen_hcall_vcpu_op()
1561 * might as well just have copied the 12-byte 32-bit struct. in kvm_xen_hcall_vcpu_op()
1574 *r = -EFAULT; in kvm_xen_hcall_vcpu_op()
1583 if (vcpu->arch.xen.vcpu_id != vcpu_id) { in kvm_xen_hcall_vcpu_op()
1584 *r = -EINVAL; in kvm_xen_hcall_vcpu_op()
1613 u64 input, params[6], r = -ENOSYS; in kvm_xen_hypercall()
1619 /* Hyper-V hypercalls get bit 31 set in EAX */ in kvm_xen_hypercall()
1656 if (params[0] == XENVER_version && vcpu->kvm->arch.xen.xen_version) { in kvm_xen_hypercall()
1657 r = vcpu->kvm->arch.xen.xen_version; in kvm_xen_hypercall()
1675 /* In 32-bit mode, the 64-bit timeout is in two 32-bit params. */ in kvm_xen_hypercall()
1689 vcpu->run->exit_reason = KVM_EXIT_XEN; in kvm_xen_hypercall()
1690 vcpu->run->xen.type = KVM_EXIT_XEN_HCALL; in kvm_xen_hypercall()
1691 vcpu->run->xen.u.hcall.longmode = longmode; in kvm_xen_hypercall()
1692 vcpu->run->xen.u.hcall.cpl = cpl; in kvm_xen_hypercall()
1693 vcpu->run->xen.u.hcall.input = input; in kvm_xen_hypercall()
1694 vcpu->run->xen.u.hcall.params[0] = params[0]; in kvm_xen_hypercall()
1695 vcpu->run->xen.u.hcall.params[1] = params[1]; in kvm_xen_hypercall()
1696 vcpu->run->xen.u.hcall.params[2] = params[2]; in kvm_xen_hypercall()
1697 vcpu->run->xen.u.hcall.params[3] = params[3]; in kvm_xen_hypercall()
1698 vcpu->run->xen.u.hcall.params[4] = params[4]; in kvm_xen_hypercall()
1699 vcpu->run->xen.u.hcall.params[5] = params[5]; in kvm_xen_hypercall()
1700 vcpu->arch.xen.hypercall_rip = kvm_get_linear_rip(vcpu); in kvm_xen_hypercall()
1701 vcpu->arch.complete_userspace_io = in kvm_xen_hypercall()
1709 int poll_evtchn = vcpu->arch.xen.poll_evtchn; in kvm_xen_check_poller()
1711 if ((poll_evtchn == port || poll_evtchn == -1) && in kvm_xen_check_poller()
1712 test_and_clear_bit(vcpu->vcpu_idx, vcpu->kvm->arch.xen.poll_mask)) { in kvm_xen_check_poller()
1726 * only check on its return value is a comparison with -EWOULDBLOCK'.
1730 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn_fast()
1738 vcpu_idx = READ_ONCE(xe->vcpu_idx); in kvm_xen_set_evtchn_fast()
1742 vcpu = kvm_get_vcpu_by_id(kvm, xe->vcpu_id); in kvm_xen_set_evtchn_fast()
1744 return -EINVAL; in kvm_xen_set_evtchn_fast()
1745 WRITE_ONCE(xe->vcpu_idx, vcpu->vcpu_idx); in kvm_xen_set_evtchn_fast()
1748 if (xe->port >= max_evtchn_port(kvm)) in kvm_xen_set_evtchn_fast()
1749 return -EINVAL; in kvm_xen_set_evtchn_fast()
1751 rc = -EWOULDBLOCK; in kvm_xen_set_evtchn_fast()
1753 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_set_evtchn_fast()
1755 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1759 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1760 struct shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1761 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in kvm_xen_set_evtchn_fast()
1762 mask_bits = (unsigned long *)&shinfo->evtchn_mask; in kvm_xen_set_evtchn_fast()
1763 port_word_bit = xe->port / 64; in kvm_xen_set_evtchn_fast()
1765 struct compat_shared_info *shinfo = gpc->khva; in kvm_xen_set_evtchn_fast()
1766 pending_bits = (unsigned long *)&shinfo->evtchn_pending; in kvm_xen_set_evtchn_fast()
1767 mask_bits = (unsigned long *)&shinfo->evtchn_mask; in kvm_xen_set_evtchn_fast()
1768 port_word_bit = xe->port / 32; in kvm_xen_set_evtchn_fast()
1773 * we try to set the corresponding bit in the in-kernel shadow of in kvm_xen_set_evtchn_fast()
1778 if (test_and_set_bit(xe->port, pending_bits)) { in kvm_xen_set_evtchn_fast()
1780 } else if (test_bit(xe->port, mask_bits)) { in kvm_xen_set_evtchn_fast()
1781 rc = -ENOTCONN; /* Masked */ in kvm_xen_set_evtchn_fast()
1782 kvm_xen_check_poller(vcpu, xe->port); in kvm_xen_set_evtchn_fast()
1786 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1787 gpc = &vcpu->arch.xen.vcpu_info_cache; in kvm_xen_set_evtchn_fast()
1789 read_lock_irqsave(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1792 * Could not access the vcpu_info. Set the bit in-kernel in kvm_xen_set_evtchn_fast()
1795 if (!test_and_set_bit(port_word_bit, &vcpu->arch.xen.evtchn_pending_sel)) in kvm_xen_set_evtchn_fast()
1800 if (IS_ENABLED(CONFIG_64BIT) && kvm->arch.xen.long_mode) { in kvm_xen_set_evtchn_fast()
1801 struct vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1802 if (!test_and_set_bit(port_word_bit, &vcpu_info->evtchn_pending_sel)) { in kvm_xen_set_evtchn_fast()
1803 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); in kvm_xen_set_evtchn_fast()
1807 struct compat_vcpu_info *vcpu_info = gpc->khva; in kvm_xen_set_evtchn_fast()
1809 (unsigned long *)&vcpu_info->evtchn_pending_sel)) { in kvm_xen_set_evtchn_fast()
1810 WRITE_ONCE(vcpu_info->evtchn_upcall_pending, 1); in kvm_xen_set_evtchn_fast()
1815 /* For the per-vCPU lapic vector, deliver it as MSI. */ in kvm_xen_set_evtchn_fast()
1816 if (kick_vcpu && vcpu->arch.xen.upcall_vector) { in kvm_xen_set_evtchn_fast()
1823 read_unlock_irqrestore(&gpc->lock, flags); in kvm_xen_set_evtchn_fast()
1824 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_set_evtchn_fast()
1840 if (rc != -EWOULDBLOCK) in kvm_xen_set_evtchn()
1843 if (current->mm != kvm->mm) { in kvm_xen_set_evtchn()
1848 if (WARN_ON_ONCE(current->mm)) in kvm_xen_set_evtchn()
1849 return -EINVAL; in kvm_xen_set_evtchn()
1851 kthread_use_mm(kvm->mm); in kvm_xen_set_evtchn()
1870 struct gfn_to_pfn_cache *gpc = &kvm->arch.xen.shinfo_cache; in kvm_xen_set_evtchn()
1874 if (rc != -EWOULDBLOCK) in kvm_xen_set_evtchn()
1877 idx = srcu_read_lock(&kvm->srcu); in kvm_xen_set_evtchn()
1879 srcu_read_unlock(&kvm->srcu, idx); in kvm_xen_set_evtchn()
1883 kthread_unuse_mm(kvm->mm); in kvm_xen_set_evtchn()
1893 return -EINVAL; in evtchn_set_fn()
1895 return kvm_xen_set_evtchn(&e->xen_evtchn, kvm); in evtchn_set_fn()
1909 if (ue->u.xen_evtchn.port >= max_evtchn_port(kvm)) in kvm_xen_setup_evtchn()
1910 return -EINVAL; in kvm_xen_setup_evtchn()
1912 /* We only support 2 level event channels for now */ in kvm_xen_setup_evtchn()
1913 if (ue->u.xen_evtchn.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_setup_evtchn()
1914 return -EINVAL; in kvm_xen_setup_evtchn()
1924 vcpu = kvm_get_vcpu_by_id(kvm, ue->u.xen_evtchn.vcpu); in kvm_xen_setup_evtchn()
1926 e->xen_evtchn.vcpu_idx = vcpu->vcpu_idx; in kvm_xen_setup_evtchn()
1928 e->xen_evtchn.vcpu_idx = -1; in kvm_xen_setup_evtchn()
1930 e->xen_evtchn.port = ue->u.xen_evtchn.port; in kvm_xen_setup_evtchn()
1931 e->xen_evtchn.vcpu_id = ue->u.xen_evtchn.vcpu; in kvm_xen_setup_evtchn()
1932 e->xen_evtchn.priority = ue->u.xen_evtchn.priority; in kvm_xen_setup_evtchn()
1933 e->set = evtchn_set_fn; in kvm_xen_setup_evtchn()
1946 if (!uxe->port || uxe->port >= max_evtchn_port(kvm)) in kvm_xen_hvm_evtchn_send()
1947 return -EINVAL; in kvm_xen_hvm_evtchn_send()
1949 /* We only support 2 level event channels for now */ in kvm_xen_hvm_evtchn_send()
1950 if (uxe->priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_hvm_evtchn_send()
1951 return -EINVAL; in kvm_xen_hvm_evtchn_send()
1953 e.port = uxe->port; in kvm_xen_hvm_evtchn_send()
1954 e.vcpu_id = uxe->vcpu; in kvm_xen_hvm_evtchn_send()
1955 e.vcpu_idx = -1; in kvm_xen_hvm_evtchn_send()
1956 e.priority = uxe->priority; in kvm_xen_hvm_evtchn_send()
1962 * We don't care if it was masked (-ENOTCONN) either. in kvm_xen_hvm_evtchn_send()
1964 if (ret > 0 || ret == -ENOTCONN) in kvm_xen_hvm_evtchn_send()
1991 u32 port = data->u.evtchn.send_port; in kvm_xen_eventfd_update()
1996 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_update()
1997 evtchnfd = idr_find(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_update()
1999 ret = -ENOENT; in kvm_xen_eventfd_update()
2004 ret = -EINVAL; in kvm_xen_eventfd_update()
2005 if (evtchnfd->type != data->u.evtchn.type) in kvm_xen_eventfd_update()
2012 if (!evtchnfd->deliver.port.port || in kvm_xen_eventfd_update()
2013 evtchnfd->deliver.port.port != data->u.evtchn.deliver.port.port) in kvm_xen_eventfd_update()
2016 /* We only support 2 level event channels for now */ in kvm_xen_eventfd_update()
2017 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_eventfd_update()
2020 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; in kvm_xen_eventfd_update()
2021 if (evtchnfd->deliver.port.vcpu_id != data->u.evtchn.deliver.port.vcpu) { in kvm_xen_eventfd_update()
2022 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; in kvm_xen_eventfd_update()
2023 evtchnfd->deliver.port.vcpu_idx = -1; in kvm_xen_eventfd_update()
2027 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_update()
2038 u32 port = data->u.evtchn.send_port; in kvm_xen_eventfd_assign()
2041 int ret = -EINVAL; in kvm_xen_eventfd_assign()
2045 return -ENOMEM; in kvm_xen_eventfd_assign()
2047 switch(data->u.evtchn.type) { in kvm_xen_eventfd_assign()
2050 if (data->u.evtchn.deliver.port.port != data->u.evtchn.send_port) in kvm_xen_eventfd_assign()
2051 goto out_noeventfd; /* -EINVAL */ in kvm_xen_eventfd_assign()
2055 if (data->u.evtchn.deliver.port.port) { in kvm_xen_eventfd_assign()
2056 if (data->u.evtchn.deliver.port.port >= max_evtchn_port(kvm)) in kvm_xen_eventfd_assign()
2057 goto out_noeventfd; /* -EINVAL */ in kvm_xen_eventfd_assign()
2059 eventfd = eventfd_ctx_fdget(data->u.evtchn.deliver.eventfd.fd); in kvm_xen_eventfd_assign()
2072 goto out; /* -EINVAL */ in kvm_xen_eventfd_assign()
2075 evtchnfd->send_port = data->u.evtchn.send_port; in kvm_xen_eventfd_assign()
2076 evtchnfd->type = data->u.evtchn.type; in kvm_xen_eventfd_assign()
2078 evtchnfd->deliver.eventfd.ctx = eventfd; in kvm_xen_eventfd_assign()
2080 /* We only support 2 level event channels for now */ in kvm_xen_eventfd_assign()
2081 if (data->u.evtchn.deliver.port.priority != KVM_IRQ_ROUTING_XEN_EVTCHN_PRIO_2LEVEL) in kvm_xen_eventfd_assign()
2082 goto out; /* -EINVAL; */ in kvm_xen_eventfd_assign()
2084 evtchnfd->deliver.port.port = data->u.evtchn.deliver.port.port; in kvm_xen_eventfd_assign()
2085 evtchnfd->deliver.port.vcpu_id = data->u.evtchn.deliver.port.vcpu; in kvm_xen_eventfd_assign()
2086 evtchnfd->deliver.port.vcpu_idx = -1; in kvm_xen_eventfd_assign()
2087 evtchnfd->deliver.port.priority = data->u.evtchn.deliver.port.priority; in kvm_xen_eventfd_assign()
2090 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_assign()
2091 ret = idr_alloc(&kvm->arch.xen.evtchn_ports, evtchnfd, port, port + 1, in kvm_xen_eventfd_assign()
2093 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_assign()
2097 if (ret == -ENOSPC) in kvm_xen_eventfd_assign()
2098 ret = -EEXIST; in kvm_xen_eventfd_assign()
2111 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_deassign()
2112 evtchnfd = idr_remove(&kvm->arch.xen.evtchn_ports, port); in kvm_xen_eventfd_deassign()
2113 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_deassign()
2116 return -ENOENT; in kvm_xen_eventfd_deassign()
2118 synchronize_srcu(&kvm->srcu); in kvm_xen_eventfd_deassign()
2119 if (!evtchnfd->deliver.port.port) in kvm_xen_eventfd_deassign()
2120 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_eventfd_deassign()
2131 mutex_lock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
2138 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) in kvm_xen_eventfd_reset()
2143 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
2144 return -ENOMEM; in kvm_xen_eventfd_reset()
2148 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_eventfd_reset()
2150 idr_remove(&kvm->arch.xen.evtchn_ports, evtchnfd->send_port); in kvm_xen_eventfd_reset()
2152 mutex_unlock(&kvm->arch.xen.xen_lock); in kvm_xen_eventfd_reset()
2154 synchronize_srcu(&kvm->srcu); in kvm_xen_eventfd_reset()
2156 while (n--) { in kvm_xen_eventfd_reset()
2158 if (!evtchnfd->deliver.port.port) in kvm_xen_eventfd_reset()
2159 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_eventfd_reset()
2169 u32 port = data->u.evtchn.send_port; in kvm_xen_setattr_evtchn()
2171 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_RESET) in kvm_xen_setattr_evtchn()
2175 return -EINVAL; in kvm_xen_setattr_evtchn()
2177 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_DEASSIGN) in kvm_xen_setattr_evtchn()
2179 if (data->u.evtchn.flags == KVM_XEN_EVTCHN_UPDATE) in kvm_xen_setattr_evtchn()
2181 if (data->u.evtchn.flags) in kvm_xen_setattr_evtchn()
2182 return -EINVAL; in kvm_xen_setattr_evtchn()
2193 /* Sanity check: this structure is the same for 32-bit and 64-bit */ in kvm_xen_hcall_evtchn_send()
2196 *r = -EFAULT; in kvm_xen_hcall_evtchn_send()
2201 * evtchnfd is protected by kvm->srcu; the idr lookup instead in kvm_xen_hcall_evtchn_send()
2205 evtchnfd = idr_find(&vcpu->kvm->arch.xen.evtchn_ports, send.port); in kvm_xen_hcall_evtchn_send()
2210 if (evtchnfd->deliver.port.port) { in kvm_xen_hcall_evtchn_send()
2211 int ret = kvm_xen_set_evtchn(&evtchnfd->deliver.port, vcpu->kvm); in kvm_xen_hcall_evtchn_send()
2212 if (ret < 0 && ret != -ENOTCONN) in kvm_xen_hcall_evtchn_send()
2215 eventfd_signal(evtchnfd->deliver.eventfd.ctx); in kvm_xen_hcall_evtchn_send()
2224 vcpu->arch.xen.vcpu_id = vcpu->vcpu_idx; in kvm_xen_init_vcpu()
2225 vcpu->arch.xen.poll_evtchn = 0; in kvm_xen_init_vcpu()
2227 timer_setup(&vcpu->arch.xen.poll_timer, cancel_evtchn_poll, 0); in kvm_xen_init_vcpu()
2228 hrtimer_init(&vcpu->arch.xen.timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_HARD); in kvm_xen_init_vcpu()
2229 vcpu->arch.xen.timer.function = xen_timer_callback; in kvm_xen_init_vcpu()
2231 kvm_gpc_init(&vcpu->arch.xen.runstate_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2232 kvm_gpc_init(&vcpu->arch.xen.runstate2_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2233 kvm_gpc_init(&vcpu->arch.xen.vcpu_info_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2234 kvm_gpc_init(&vcpu->arch.xen.vcpu_time_info_cache, vcpu->kvm); in kvm_xen_init_vcpu()
2242 kvm_gpc_deactivate(&vcpu->arch.xen.runstate_cache); in kvm_xen_destroy_vcpu()
2243 kvm_gpc_deactivate(&vcpu->arch.xen.runstate2_cache); in kvm_xen_destroy_vcpu()
2244 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_info_cache); in kvm_xen_destroy_vcpu()
2245 kvm_gpc_deactivate(&vcpu->arch.xen.vcpu_time_info_cache); in kvm_xen_destroy_vcpu()
2247 del_timer_sync(&vcpu->arch.xen.poll_timer); in kvm_xen_destroy_vcpu()
2255 if (!vcpu->arch.xen.cpuid.base) in kvm_xen_update_tsc_info()
2258 function = vcpu->arch.xen.cpuid.base | XEN_CPUID_LEAF(3); in kvm_xen_update_tsc_info()
2259 if (function > vcpu->arch.xen.cpuid.limit) in kvm_xen_update_tsc_info()
2264 entry->ecx = vcpu->arch.hv_clock.tsc_to_system_mul; in kvm_xen_update_tsc_info()
2265 entry->edx = vcpu->arch.hv_clock.tsc_shift; in kvm_xen_update_tsc_info()
2270 entry->eax = vcpu->arch.hw_tsc_khz; in kvm_xen_update_tsc_info()
2275 mutex_init(&kvm->arch.xen.xen_lock); in kvm_xen_init_vm()
2276 idr_init(&kvm->arch.xen.evtchn_ports); in kvm_xen_init_vm()
2277 kvm_gpc_init(&kvm->arch.xen.shinfo_cache, kvm); in kvm_xen_init_vm()
2285 kvm_gpc_deactivate(&kvm->arch.xen.shinfo_cache); in kvm_xen_destroy_vm()
2287 idr_for_each_entry(&kvm->arch.xen.evtchn_ports, evtchnfd, i) { in kvm_xen_destroy_vm()
2288 if (!evtchnfd->deliver.port.port) in kvm_xen_destroy_vm()
2289 eventfd_ctx_put(evtchnfd->deliver.eventfd.ctx); in kvm_xen_destroy_vm()
2292 idr_destroy(&kvm->arch.xen.evtchn_ports); in kvm_xen_destroy_vm()
2294 if (kvm->arch.xen_hvm_config.msr) in kvm_xen_destroy_vm()