Lines Matching +full:irq +full:- +full:active +full:- +full:high
1 // SPDX-License-Identifier: GPL-2.0-only
7 #include <linux/irq.h>
26 * kvm->lock (mutex)
27 * vcpu->mutex (mutex)
28 * kvm->arch.config_lock (mutex)
29 * its->cmd_lock (mutex)
30 * its->its_lock (mutex)
31 * vgic_cpu->ap_list_lock must be taken with IRQs disabled
32 * vgic_dist->lpi_xa.xa_lock must be taken with IRQs disabled
33 * vgic_irq->irq_lock must be taken with IRQs disabled
40 * kvm->slots_lock
41 * kvm->srcu
42 * kvm->arch.config_lock
47 * have to drop the lower ranking lock first and re-acquire it after having
52 * vcpuX->vcpu_id < vcpuY->vcpu_id:
53 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
54 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
62 * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
64 * finished with the IRQ.
68 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_get_lpi()
69 struct vgic_irq *irq = NULL; in vgic_get_lpi() local
73 irq = xa_load(&dist->lpi_xa, intid); in vgic_get_lpi()
74 if (!vgic_try_get_irq_kref(irq)) in vgic_get_lpi()
75 irq = NULL; in vgic_get_lpi()
79 return irq; in vgic_get_lpi()
85 * to call vgic_put_irq() once it's finished with this IRQ.
91 intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) { in vgic_get_irq()
92 intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); in vgic_get_irq()
93 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; in vgic_get_irq()
111 return &vcpu->arch.vgic_cpu.private_irqs[intid]; in vgic_get_vcpu_irq()
114 return vgic_get_irq(vcpu->kvm, intid); in vgic_get_vcpu_irq()
126 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
128 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_put_irq()
131 if (irq->intid < VGIC_MIN_LPI) in vgic_put_irq()
134 if (!kref_put(&irq->refcount, vgic_irq_release)) in vgic_put_irq()
137 xa_lock_irqsave(&dist->lpi_xa, flags); in vgic_put_irq()
138 __xa_erase(&dist->lpi_xa, irq->intid); in vgic_put_irq()
139 xa_unlock_irqrestore(&dist->lpi_xa, flags); in vgic_put_irq()
141 kfree_rcu(irq, rcu); in vgic_put_irq()
146 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_flush_pending_lpis()
147 struct vgic_irq *irq, *tmp; in vgic_flush_pending_lpis() local
150 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); in vgic_flush_pending_lpis()
152 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_pending_lpis()
153 if (irq->intid >= VGIC_MIN_LPI) { in vgic_flush_pending_lpis()
154 raw_spin_lock(&irq->irq_lock); in vgic_flush_pending_lpis()
155 list_del(&irq->ap_list); in vgic_flush_pending_lpis()
156 irq->vcpu = NULL; in vgic_flush_pending_lpis()
157 raw_spin_unlock(&irq->irq_lock); in vgic_flush_pending_lpis()
158 vgic_put_irq(vcpu->kvm, irq); in vgic_flush_pending_lpis()
162 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); in vgic_flush_pending_lpis()
165 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
167 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
172 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
176 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
178 if (irq->ops && irq->ops->get_input_level) in vgic_get_phys_line_level()
179 return irq->ops->get_input_level(irq->intid); in vgic_get_phys_line_level()
181 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
187 /* Set/Clear the physical active state */
188 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
191 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
192 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
194 active)); in vgic_irq_set_phys_active()
198 * vgic_target_oracle - compute the target vcpu for an irq
200 * @irq: The irq to route. Must be already locked.
203 * active, vcpu and target_vcpu), compute the next vcpu this should be
206 * Requires the IRQ lock to be held.
208 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
210 lockdep_assert_held(&irq->irq_lock); in vgic_target_oracle()
212 /* If the interrupt is active, it must stay on the current vcpu */ in vgic_target_oracle()
213 if (irq->active) in vgic_target_oracle()
214 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
217 * If the IRQ is not active but enabled and pending, we should direct in vgic_target_oracle()
222 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
223 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
224 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
227 return irq->target_vcpu; in vgic_target_oracle()
230 /* If neither active nor pending and enabled, then this IRQ should not in vgic_target_oracle()
241 * A hard rule is that active interrupts can never be pushed out of the LRs
266 raw_spin_lock(&irqa->irq_lock); in vgic_irq_cmp()
267 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); in vgic_irq_cmp()
269 if (irqa->active || irqb->active) { in vgic_irq_cmp()
270 ret = (int)irqb->active - (int)irqa->active; in vgic_irq_cmp()
274 penda = irqa->enabled && irq_is_pending(irqa); in vgic_irq_cmp()
275 pendb = irqb->enabled && irq_is_pending(irqb); in vgic_irq_cmp()
278 ret = (int)pendb - (int)penda; in vgic_irq_cmp()
283 ret = irqa->priority - irqb->priority; in vgic_irq_cmp()
285 raw_spin_unlock(&irqb->irq_lock); in vgic_irq_cmp()
286 raw_spin_unlock(&irqa->irq_lock); in vgic_irq_cmp()
293 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_sort_ap_list()
295 lockdep_assert_held(&vgic_cpu->ap_list_lock); in vgic_sort_ap_list()
297 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); in vgic_sort_ap_list()
301 * Only valid injection if changing level for level-triggered IRQs or for a
302 * rising edge, and in-kernel connected IRQ lines can only be controlled by
305 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
307 if (irq->owner != owner) in vgic_validate_injection()
310 switch (irq->config) { in vgic_validate_injection()
312 return irq->line_level != level; in vgic_validate_injection()
321 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
323 * Returns true when the IRQ was queued, false otherwise.
325 * Needs to be entered with the IRQ lock already held, but will return
328 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
329 unsigned long flags) __releases(&irq->irq_lock) in vgic_queue_irq_unlock()
333 lockdep_assert_held(&irq->irq_lock); in vgic_queue_irq_unlock()
336 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
337 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
339 * If this IRQ is already on a VCPU's ap_list, then it in vgic_queue_irq_unlock()
343 * Otherwise, if the irq is not pending and enabled, it does in vgic_queue_irq_unlock()
347 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
351 * queueing an edge-triggered interrupt for which we in vgic_queue_irq_unlock()
353 * while the IRQ is already on the VCPU's AP list, the in vgic_queue_irq_unlock()
366 * We must unlock the irq lock to take the ap_list_lock where in vgic_queue_irq_unlock()
369 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
371 /* someone can do stuff here, which we re-check below */ in vgic_queue_irq_unlock()
373 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); in vgic_queue_irq_unlock()
374 raw_spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
380 * 1) The irq lost its pending state or was disabled behind our in vgic_queue_irq_unlock()
382 * 2) Someone changed the affinity on this irq behind our in vgic_queue_irq_unlock()
388 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
389 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
390 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, in vgic_queue_irq_unlock()
393 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
398 * Grab a reference to the irq to reflect the fact that it is in vgic_queue_irq_unlock()
400 * reference on the irq. in vgic_queue_irq_unlock()
402 vgic_get_irq_kref(irq); in vgic_queue_irq_unlock()
403 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
404 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
406 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
407 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); in vgic_queue_irq_unlock()
416 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
420 * @level: Edge-triggered: true: to trigger the interrupt
422 * Level-sensitive true: raise the input signal
424 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
425 * that the caller is allowed to inject this IRQ. Userspace
428 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
429 * level-sensitive interrupts. You can think of the level parameter as 1
430 * being HIGH and 0 being LOW and all devices being active-HIGH.
435 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
444 return -EINVAL; in kvm_vgic_inject_irq()
446 trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level); in kvm_vgic_inject_irq()
449 irq = vgic_get_vcpu_irq(vcpu, intid); in kvm_vgic_inject_irq()
451 irq = vgic_get_irq(kvm, intid); in kvm_vgic_inject_irq()
452 if (!irq) in kvm_vgic_inject_irq()
453 return -EINVAL; in kvm_vgic_inject_irq()
455 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
457 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
459 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
460 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
464 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
465 irq->line_level = level; in kvm_vgic_inject_irq()
467 irq->pending_latch = true; in kvm_vgic_inject_irq()
469 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
470 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
475 /* @irq->irq_lock must be held */
476 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
484 * Find the physical IRQ number corresponding to @host_irq in kvm_vgic_map_irq()
489 return -EINVAL; in kvm_vgic_map_irq()
492 while (data->parent_data) in kvm_vgic_map_irq()
493 data = data->parent_data; in kvm_vgic_map_irq()
495 irq->hw = true; in kvm_vgic_map_irq()
496 irq->host_irq = host_irq; in kvm_vgic_map_irq()
497 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
498 irq->ops = ops; in kvm_vgic_map_irq()
502 /* @irq->irq_lock must be held */
503 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
505 irq->hw = false; in kvm_vgic_unmap_irq()
506 irq->hwintid = 0; in kvm_vgic_unmap_irq()
507 irq->ops = NULL; in kvm_vgic_unmap_irq()
513 struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_map_phys_irq() local
517 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
519 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
520 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops); in kvm_vgic_map_phys_irq()
521 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
522 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
528 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
532 * Reset the active and pending states of a mapped interrupt. Kernel
538 struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
541 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
544 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
545 irq->active = false; in kvm_vgic_reset_mapped_irq()
546 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
547 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
548 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
550 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
555 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
558 if (!vgic_initialized(vcpu->kvm)) in kvm_vgic_unmap_phys_irq()
559 return -EAGAIN; in kvm_vgic_unmap_phys_irq()
561 irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_unmap_phys_irq()
562 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
564 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
565 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
566 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
567 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
574 struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_get_map() local
576 int ret = -1; in kvm_vgic_get_map()
578 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_get_map()
579 if (irq->hw) in kvm_vgic_get_map()
580 ret = irq->hwintid; in kvm_vgic_get_map()
581 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_get_map()
583 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_get_map()
588 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
594 * Returns 0 if intid is not already used by another in-kernel device and the
599 struct vgic_irq *irq; in kvm_vgic_set_owner() local
603 if (!vgic_initialized(vcpu->kvm)) in kvm_vgic_set_owner()
604 return -EAGAIN; in kvm_vgic_set_owner()
607 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid)) in kvm_vgic_set_owner()
608 return -EINVAL; in kvm_vgic_set_owner()
610 irq = vgic_get_vcpu_irq(vcpu, intid); in kvm_vgic_set_owner()
611 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
612 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
613 ret = -EEXIST; in kvm_vgic_set_owner()
615 irq->owner = owner; in kvm_vgic_set_owner()
616 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
622 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
631 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_prune_ap_list()
632 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
637 raw_spin_lock(&vgic_cpu->ap_list_lock); in vgic_prune_ap_list()
639 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
643 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
645 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
647 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
654 list_del(&irq->ap_list); in vgic_prune_ap_list()
655 irq->vcpu = NULL; in vgic_prune_ap_list()
656 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
662 * we remove the irq from the list, we drop in vgic_prune_ap_list()
665 vgic_put_irq(vcpu->kvm, irq); in vgic_prune_ap_list()
671 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
677 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
678 raw_spin_unlock(&vgic_cpu->ap_list_lock); in vgic_prune_ap_list()
684 if (vcpu->vcpu_id < target_vcpu->vcpu_id) { in vgic_prune_ap_list()
692 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); in vgic_prune_ap_list()
693 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, in vgic_prune_ap_list()
695 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
706 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
707 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; in vgic_prune_ap_list()
709 list_del(&irq->ap_list); in vgic_prune_ap_list()
710 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
711 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
715 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
716 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); in vgic_prune_ap_list()
717 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); in vgic_prune_ap_list()
727 raw_spin_unlock(&vgic_cpu->ap_list_lock); in vgic_prune_ap_list()
740 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
742 lockdep_assert_held(&irq->irq_lock); in vgic_populate_lr()
745 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
747 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
770 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in compute_ap_list_depth()
771 struct vgic_irq *irq; in compute_ap_list_depth() local
776 lockdep_assert_held(&vgic_cpu->ap_list_lock); in compute_ap_list_depth()
778 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
781 raw_spin_lock(&irq->irq_lock); in compute_ap_list_depth()
783 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
784 raw_spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
795 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_flush_lr_state()
796 struct vgic_irq *irq; in vgic_flush_lr_state() local
802 lockdep_assert_held(&vgic_cpu->ap_list_lock); in vgic_flush_lr_state()
810 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
811 raw_spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
814 * If we have multi-SGIs in the pipeline, we need to in vgic_flush_lr_state()
815 * guarantee that they are all seen before any IRQ of in vgic_flush_lr_state()
820 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
821 _raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
825 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
826 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
828 if (irq->source) in vgic_flush_lr_state()
829 prio = irq->priority; in vgic_flush_lr_state()
832 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
835 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
836 &vgic_cpu->ap_list_head)) in vgic_flush_lr_state()
847 vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count; in vgic_flush_lr_state()
849 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count; in vgic_flush_lr_state()
856 * memory-mapped, and VHE systems can access GICv3 EL2 system in can_access_vgic_from_kernel()
867 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); in vgic_save_state()
876 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) in kvm_vgic_sync_hwstate()
883 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs; in kvm_vgic_sync_hwstate()
885 used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs; in kvm_vgic_sync_hwstate()
897 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); in vgic_restore_state()
904 * If there are no virtual interrupts active or pending for this in kvm_vgic_flush_hwstate()
915 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && in kvm_vgic_flush_hwstate()
916 !vgic_supports_direct_msis(vcpu->kvm)) in kvm_vgic_flush_hwstate()
921 if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { in kvm_vgic_flush_hwstate()
922 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); in kvm_vgic_flush_hwstate()
924 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); in kvm_vgic_flush_hwstate()
930 if (vgic_supports_direct_msis(vcpu->kvm)) in kvm_vgic_flush_hwstate()
936 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) { in kvm_vgic_load()
938 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in kvm_vgic_load()
950 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) { in kvm_vgic_put()
952 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in kvm_vgic_put()
964 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in kvm_vgic_vcpu_pending_irq()
965 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
970 if (!vcpu->kvm->arch.vgic.enabled) in kvm_vgic_vcpu_pending_irq()
973 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last) in kvm_vgic_vcpu_pending_irq()
978 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); in kvm_vgic_vcpu_pending_irq()
980 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
981 raw_spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
982 pending = irq_is_pending(irq) && irq->enabled && in kvm_vgic_vcpu_pending_irq()
983 !irq->active && in kvm_vgic_vcpu_pending_irq()
984 irq->priority < vmcr.pmr; in kvm_vgic_vcpu_pending_irq()
985 raw_spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
991 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); in kvm_vgic_vcpu_pending_irq()
1015 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
1019 if (!vgic_initialized(vcpu->kvm)) in kvm_vgic_map_is_active()
1022 irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_map_is_active()
1023 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1024 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
1025 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1026 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()
1032 * Level-triggered mapped IRQs are special because we only observe rising
1039 * We could also have entered the guest with the interrupt active+pending.
1040 * On the next exit, we need to re-evaluate the pending state, as it could
1045 * active state, since we will otherwise never be told when the interrupt
1051 void vgic_irq_handle_resampling(struct vgic_irq *irq, in vgic_irq_handle_resampling() argument
1054 if (vgic_irq_is_mapped_level(irq)) { in vgic_irq_handle_resampling()
1057 if (unlikely(vgic_irq_needs_resampling(irq))) { in vgic_irq_handle_resampling()
1058 resample = !(irq->active || irq->pending_latch); in vgic_irq_handle_resampling()
1059 } else if (lr_pending || (lr_deactivated && irq->line_level)) { in vgic_irq_handle_resampling()
1060 irq->line_level = vgic_get_phys_line_level(irq); in vgic_irq_handle_resampling()
1061 resample = !irq->line_level; in vgic_irq_handle_resampling()
1065 vgic_irq_set_phys_active(irq, false); in vgic_irq_handle_resampling()