Lines Matching +full:irq +full:- +full:active +full:- +full:high

1 // SPDX-License-Identifier: GPL-2.0-only
7 #include <linux/irq.h>
26 * kvm->lock (mutex)
27 * vcpu->mutex (mutex)
28 * kvm->arch.config_lock (mutex)
29 * its->cmd_lock (mutex)
30 * its->its_lock (mutex)
31 * vgic_dist->lpi_xa.xa_lock
32 * vgic_cpu->ap_list_lock must be taken with IRQs disabled
33 * vgic_irq->irq_lock must be taken with IRQs disabled
40 * kvm->slots_lock
41 * kvm->srcu
42 * kvm->arch.config_lock
47 * have to drop the lower ranking lock first and re-acquire it after having
52 * vcpuX->vcpu_id < vcpuY->vcpu_id:
53 * raw_spin_lock(vcpuX->arch.vgic_cpu.ap_list_lock);
54 * raw_spin_lock(vcpuY->arch.vgic_cpu.ap_list_lock);
62 * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
64 * finished with the IRQ.
68 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_get_lpi()
69 struct vgic_irq *irq = NULL; in vgic_get_lpi() local
73 irq = xa_load(&dist->lpi_xa, intid); in vgic_get_lpi()
74 if (!vgic_try_get_irq_ref(irq)) in vgic_get_lpi()
75 irq = NULL; in vgic_get_lpi()
79 return irq; in vgic_get_lpi()
85 * to call vgic_put_irq() once it's finished with this IRQ.
91 intid < (kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS)) { in vgic_get_irq()
92 intid = array_index_nospec(intid, kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS); in vgic_get_irq()
93 return &kvm->arch.vgic.spis[intid - VGIC_NR_PRIVATE_IRQS]; in vgic_get_irq()
111 return &vcpu->arch.vgic_cpu.private_irqs[intid]; in vgic_get_vcpu_irq()
114 return vgic_get_irq(vcpu->kvm, intid); in vgic_get_vcpu_irq()
117 static void vgic_release_lpi_locked(struct vgic_dist *dist, struct vgic_irq *irq) in vgic_release_lpi_locked() argument
119 lockdep_assert_held(&dist->lpi_xa.xa_lock); in vgic_release_lpi_locked()
120 __xa_erase(&dist->lpi_xa, irq->intid); in vgic_release_lpi_locked()
121 kfree_rcu(irq, rcu); in vgic_release_lpi_locked()
124 static __must_check bool __vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in __vgic_put_irq() argument
126 if (irq->intid < VGIC_MIN_LPI) in __vgic_put_irq()
129 return refcount_dec_and_test(&irq->refcount); in __vgic_put_irq()
132 static __must_check bool vgic_put_irq_norelease(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq_norelease() argument
134 if (!__vgic_put_irq(kvm, irq)) in vgic_put_irq_norelease()
137 irq->pending_release = true; in vgic_put_irq_norelease()
141 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
143 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_put_irq()
145 if (irq->intid >= VGIC_MIN_LPI) in vgic_put_irq()
146 might_lock(&dist->lpi_xa.xa_lock); in vgic_put_irq()
148 if (!__vgic_put_irq(kvm, irq)) in vgic_put_irq()
151 xa_lock(&dist->lpi_xa); in vgic_put_irq()
152 vgic_release_lpi_locked(dist, irq); in vgic_put_irq()
153 xa_unlock(&dist->lpi_xa); in vgic_put_irq()
158 struct vgic_dist *dist = &kvm->arch.vgic; in vgic_release_deleted_lpis()
160 struct vgic_irq *irq; in vgic_release_deleted_lpis() local
162 xa_lock(&dist->lpi_xa); in vgic_release_deleted_lpis()
164 xa_for_each(&dist->lpi_xa, intid, irq) { in vgic_release_deleted_lpis()
165 if (irq->pending_release) in vgic_release_deleted_lpis()
166 vgic_release_lpi_locked(dist, irq); in vgic_release_deleted_lpis()
169 xa_unlock(&dist->lpi_xa); in vgic_release_deleted_lpis()
174 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_flush_pending_lpis()
175 struct vgic_irq *irq, *tmp; in vgic_flush_pending_lpis() local
179 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); in vgic_flush_pending_lpis()
181 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_pending_lpis()
182 if (irq->intid >= VGIC_MIN_LPI) { in vgic_flush_pending_lpis()
183 raw_spin_lock(&irq->irq_lock); in vgic_flush_pending_lpis()
184 list_del(&irq->ap_list); in vgic_flush_pending_lpis()
185 irq->vcpu = NULL; in vgic_flush_pending_lpis()
186 raw_spin_unlock(&irq->irq_lock); in vgic_flush_pending_lpis()
187 deleted |= vgic_put_irq_norelease(vcpu->kvm, irq); in vgic_flush_pending_lpis()
191 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); in vgic_flush_pending_lpis()
194 vgic_release_deleted_lpis(vcpu->kvm); in vgic_flush_pending_lpis()
197 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
199 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
204 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
208 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
210 if (irq->ops && irq->ops->get_input_level) in vgic_get_phys_line_level()
211 return irq->ops->get_input_level(irq->intid); in vgic_get_phys_line_level()
213 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
219 /* Set/Clear the physical active state */
220 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
223 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
224 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
226 active)); in vgic_irq_set_phys_active()
230 * vgic_target_oracle - compute the target vcpu for an irq
232 * @irq: The irq to route. Must be already locked.
235 * active, vcpu and target_vcpu), compute the next vcpu this should be
238 * Requires the IRQ lock to be held.
240 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
242 lockdep_assert_held(&irq->irq_lock); in vgic_target_oracle()
244 /* If the interrupt is active, it must stay on the current vcpu */ in vgic_target_oracle()
245 if (irq->active) in vgic_target_oracle()
246 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
249 * If the IRQ is not active but enabled and pending, we should direct in vgic_target_oracle()
254 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
255 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
256 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
259 return irq->target_vcpu; in vgic_target_oracle()
262 /* If neither active nor pending and enabled, then this IRQ should not in vgic_target_oracle()
273 * A hard rule is that active interrupts can never be pushed out of the LRs
298 raw_spin_lock(&irqa->irq_lock); in vgic_irq_cmp()
299 raw_spin_lock_nested(&irqb->irq_lock, SINGLE_DEPTH_NESTING); in vgic_irq_cmp()
301 if (irqa->active || irqb->active) { in vgic_irq_cmp()
302 ret = (int)irqb->active - (int)irqa->active; in vgic_irq_cmp()
306 penda = irqa->enabled && irq_is_pending(irqa); in vgic_irq_cmp()
307 pendb = irqb->enabled && irq_is_pending(irqb); in vgic_irq_cmp()
310 ret = (int)pendb - (int)penda; in vgic_irq_cmp()
315 ret = irqa->priority - irqb->priority; in vgic_irq_cmp()
317 raw_spin_unlock(&irqb->irq_lock); in vgic_irq_cmp()
318 raw_spin_unlock(&irqa->irq_lock); in vgic_irq_cmp()
325 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_sort_ap_list()
327 lockdep_assert_held(&vgic_cpu->ap_list_lock); in vgic_sort_ap_list()
329 list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp); in vgic_sort_ap_list()
333 * Only valid injection if changing level for level-triggered IRQs or for a
334 * rising edge, and in-kernel connected IRQ lines can only be controlled by
337 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
339 if (irq->owner != owner) in vgic_validate_injection()
342 switch (irq->config) { in vgic_validate_injection()
344 return irq->line_level != level; in vgic_validate_injection()
353 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
355 * Returns true when the IRQ was queued, false otherwise.
357 * Needs to be entered with the IRQ lock already held, but will return
360 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
361 unsigned long flags) __releases(&irq->irq_lock) in vgic_queue_irq_unlock()
365 lockdep_assert_held(&irq->irq_lock); in vgic_queue_irq_unlock()
368 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
369 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
371 * If this IRQ is already on a VCPU's ap_list, then it in vgic_queue_irq_unlock()
375 * Otherwise, if the irq is not pending and enabled, it does in vgic_queue_irq_unlock()
379 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
383 * queueing an edge-triggered interrupt for which we in vgic_queue_irq_unlock()
385 * while the IRQ is already on the VCPU's AP list, the in vgic_queue_irq_unlock()
398 * We must unlock the irq lock to take the ap_list_lock where in vgic_queue_irq_unlock()
401 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
403 /* someone can do stuff here, which we re-check below */ in vgic_queue_irq_unlock()
405 raw_spin_lock_irqsave(&vcpu->arch.vgic_cpu.ap_list_lock, flags); in vgic_queue_irq_unlock()
406 raw_spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
412 * 1) The irq lost its pending state or was disabled behind our in vgic_queue_irq_unlock()
414 * 2) Someone changed the affinity on this irq behind our in vgic_queue_irq_unlock()
420 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
421 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
422 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, in vgic_queue_irq_unlock()
425 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
430 * Grab a reference to the irq to reflect the fact that it is in vgic_queue_irq_unlock()
432 * reference on the irq. in vgic_queue_irq_unlock()
434 vgic_get_irq_ref(irq); in vgic_queue_irq_unlock()
435 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
436 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
438 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
439 raw_spin_unlock_irqrestore(&vcpu->arch.vgic_cpu.ap_list_lock, flags); in vgic_queue_irq_unlock()
448 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
452 * @level: Edge-triggered: true: to trigger the interrupt
454 * Level-sensitive true: raise the input signal
456 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
457 * that the caller is allowed to inject this IRQ. Userspace
460 * The VGIC is not concerned with devices being active-LOW or active-HIGH for
461 * level-sensitive interrupts. You can think of the level parameter as 1
462 * being HIGH and 0 being LOW and all devices being active-HIGH.
467 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
476 return -EINVAL; in kvm_vgic_inject_irq()
478 trace_vgic_update_irq_pending(vcpu ? vcpu->vcpu_idx : 0, intid, level); in kvm_vgic_inject_irq()
481 irq = vgic_get_vcpu_irq(vcpu, intid); in kvm_vgic_inject_irq()
483 irq = vgic_get_irq(kvm, intid); in kvm_vgic_inject_irq()
484 if (!irq) in kvm_vgic_inject_irq()
485 return -EINVAL; in kvm_vgic_inject_irq()
487 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
489 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
491 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
492 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
496 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
497 irq->line_level = level; in kvm_vgic_inject_irq()
499 irq->pending_latch = true; in kvm_vgic_inject_irq()
501 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
502 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
507 /* @irq->irq_lock must be held */
508 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
516 * Find the physical IRQ number corresponding to @host_irq in kvm_vgic_map_irq()
521 return -EINVAL; in kvm_vgic_map_irq()
524 while (data->parent_data) in kvm_vgic_map_irq()
525 data = data->parent_data; in kvm_vgic_map_irq()
527 irq->hw = true; in kvm_vgic_map_irq()
528 irq->host_irq = host_irq; in kvm_vgic_map_irq()
529 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
530 irq->ops = ops; in kvm_vgic_map_irq()
534 /* @irq->irq_lock must be held */
535 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
537 irq->hw = false; in kvm_vgic_unmap_irq()
538 irq->hwintid = 0; in kvm_vgic_unmap_irq()
539 irq->ops = NULL; in kvm_vgic_unmap_irq()
545 struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_map_phys_irq() local
549 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
551 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
552 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops); in kvm_vgic_map_phys_irq()
553 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
554 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
560 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
564 * Reset the active and pending states of a mapped interrupt. Kernel
570 struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
573 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
576 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
577 irq->active = false; in kvm_vgic_reset_mapped_irq()
578 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
579 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
580 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
582 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
587 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
590 if (!vgic_initialized(vcpu->kvm)) in kvm_vgic_unmap_phys_irq()
591 return -EAGAIN; in kvm_vgic_unmap_phys_irq()
593 irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_unmap_phys_irq()
594 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
596 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
597 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
598 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
599 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
606 struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_get_map() local
608 int ret = -1; in kvm_vgic_get_map()
610 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_get_map()
611 if (irq->hw) in kvm_vgic_get_map()
612 ret = irq->hwintid; in kvm_vgic_get_map()
613 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_get_map()
615 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_get_map()
620 * kvm_vgic_set_owner - Set the owner of an interrupt for a VM
626 * Returns 0 if intid is not already used by another in-kernel device and the
631 struct vgic_irq *irq; in kvm_vgic_set_owner() local
635 if (!vgic_initialized(vcpu->kvm)) in kvm_vgic_set_owner()
636 return -EAGAIN; in kvm_vgic_set_owner()
639 if (!irq_is_ppi(intid) && !vgic_valid_spi(vcpu->kvm, intid)) in kvm_vgic_set_owner()
640 return -EINVAL; in kvm_vgic_set_owner()
642 irq = vgic_get_vcpu_irq(vcpu, intid); in kvm_vgic_set_owner()
643 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
644 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
645 ret = -EEXIST; in kvm_vgic_set_owner()
647 irq->owner = owner; in kvm_vgic_set_owner()
648 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
654 * vgic_prune_ap_list - Remove non-relevant interrupts from the list
663 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_prune_ap_list()
664 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
670 raw_spin_lock(&vgic_cpu->ap_list_lock); in vgic_prune_ap_list()
672 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
676 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
678 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
680 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
687 list_del(&irq->ap_list); in vgic_prune_ap_list()
688 irq->vcpu = NULL; in vgic_prune_ap_list()
689 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
695 * we remove the irq from the list, we drop in vgic_prune_ap_list()
698 deleted_lpis |= vgic_put_irq_norelease(vcpu->kvm, irq); in vgic_prune_ap_list()
704 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
710 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
711 raw_spin_unlock(&vgic_cpu->ap_list_lock); in vgic_prune_ap_list()
717 if (vcpu->vcpu_id < target_vcpu->vcpu_id) { in vgic_prune_ap_list()
725 raw_spin_lock(&vcpuA->arch.vgic_cpu.ap_list_lock); in vgic_prune_ap_list()
726 raw_spin_lock_nested(&vcpuB->arch.vgic_cpu.ap_list_lock, in vgic_prune_ap_list()
728 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
739 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
740 struct vgic_cpu *new_cpu = &target_vcpu->arch.vgic_cpu; in vgic_prune_ap_list()
742 list_del(&irq->ap_list); in vgic_prune_ap_list()
743 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
744 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
748 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
749 raw_spin_unlock(&vcpuB->arch.vgic_cpu.ap_list_lock); in vgic_prune_ap_list()
750 raw_spin_unlock(&vcpuA->arch.vgic_cpu.ap_list_lock); in vgic_prune_ap_list()
760 raw_spin_unlock(&vgic_cpu->ap_list_lock); in vgic_prune_ap_list()
763 vgic_release_deleted_lpis(vcpu->kvm); in vgic_prune_ap_list()
776 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
778 lockdep_assert_held(&irq->irq_lock); in vgic_populate_lr()
781 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
783 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
806 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in compute_ap_list_depth()
807 struct vgic_irq *irq; in compute_ap_list_depth() local
812 lockdep_assert_held(&vgic_cpu->ap_list_lock); in compute_ap_list_depth()
814 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
817 raw_spin_lock(&irq->irq_lock); in compute_ap_list_depth()
819 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
820 raw_spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
831 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in vgic_flush_lr_state()
832 struct vgic_irq *irq; in vgic_flush_lr_state() local
838 lockdep_assert_held(&vgic_cpu->ap_list_lock); in vgic_flush_lr_state()
846 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
847 raw_spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
850 * If we have multi-SGIs in the pipeline, we need to in vgic_flush_lr_state()
851 * guarantee that they are all seen before any IRQ of in vgic_flush_lr_state()
856 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
857 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
861 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
862 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
864 if (irq->source) in vgic_flush_lr_state()
865 prio = irq->priority; in vgic_flush_lr_state()
868 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
871 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
872 &vgic_cpu->ap_list_head)) in vgic_flush_lr_state()
883 vcpu->arch.vgic_cpu.vgic_v2.used_lrs = count; in vgic_flush_lr_state()
885 vcpu->arch.vgic_cpu.vgic_v3.used_lrs = count; in vgic_flush_lr_state()
892 * memory-mapped, and VHE systems can access GICv3 EL2 system in can_access_vgic_from_kernel()
903 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3); in vgic_save_state()
921 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) in kvm_vgic_sync_hwstate()
928 used_lrs = vcpu->arch.vgic_cpu.vgic_v2.used_lrs; in kvm_vgic_sync_hwstate()
930 used_lrs = vcpu->arch.vgic_cpu.vgic_v3.used_lrs; in kvm_vgic_sync_hwstate()
942 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3); in vgic_restore_state()
951 * - If we have any pending IRQ for the guest and the guest in kvm_vgic_flush_hwstate()
954 * virtual EL2 mode, then we have to emulate an IRQ in kvm_vgic_flush_hwstate()
961 * - Otherwise, do exactly *NOTHING*. The guest state is in kvm_vgic_flush_hwstate()
978 * If there are no virtual interrupts active or pending for this in kvm_vgic_flush_hwstate()
989 if (list_empty(&vcpu->arch.vgic_cpu.ap_list_head) && in kvm_vgic_flush_hwstate()
990 !vgic_supports_direct_irqs(vcpu->kvm)) in kvm_vgic_flush_hwstate()
995 if (!list_empty(&vcpu->arch.vgic_cpu.ap_list_head)) { in kvm_vgic_flush_hwstate()
996 raw_spin_lock(&vcpu->arch.vgic_cpu.ap_list_lock); in kvm_vgic_flush_hwstate()
998 raw_spin_unlock(&vcpu->arch.vgic_cpu.ap_list_lock); in kvm_vgic_flush_hwstate()
1004 if (vgic_supports_direct_irqs(vcpu->kvm)) in kvm_vgic_flush_hwstate()
1010 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) { in kvm_vgic_load()
1012 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in kvm_vgic_load()
1024 if (unlikely(!irqchip_in_kernel(vcpu->kvm) || !vgic_initialized(vcpu->kvm))) { in kvm_vgic_put()
1026 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3); in kvm_vgic_put()
1038 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu; in kvm_vgic_vcpu_pending_irq()
1039 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
1044 if (!vcpu->kvm->arch.vgic.enabled) in kvm_vgic_vcpu_pending_irq()
1047 if (vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last) in kvm_vgic_vcpu_pending_irq()
1052 raw_spin_lock_irqsave(&vgic_cpu->ap_list_lock, flags); in kvm_vgic_vcpu_pending_irq()
1054 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
1055 raw_spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
1056 pending = irq_is_pending(irq) && irq->enabled && in kvm_vgic_vcpu_pending_irq()
1057 !irq->active && in kvm_vgic_vcpu_pending_irq()
1058 irq->priority < vmcr.pmr; in kvm_vgic_vcpu_pending_irq()
1059 raw_spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
1065 raw_spin_unlock_irqrestore(&vgic_cpu->ap_list_lock, flags); in kvm_vgic_vcpu_pending_irq()
1089 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
1093 if (!vgic_initialized(vcpu->kvm)) in kvm_vgic_map_is_active()
1096 irq = vgic_get_vcpu_irq(vcpu, vintid); in kvm_vgic_map_is_active()
1097 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1098 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
1099 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1100 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()
1106 * Level-triggered mapped IRQs are special because we only observe rising
1113 * We could also have entered the guest with the interrupt active+pending.
1114 * On the next exit, we need to re-evaluate the pending state, as it could
1119 * active state, since we will otherwise never be told when the interrupt
1125 void vgic_irq_handle_resampling(struct vgic_irq *irq, in vgic_irq_handle_resampling() argument
1128 if (vgic_irq_is_mapped_level(irq)) { in vgic_irq_handle_resampling()
1131 if (unlikely(vgic_irq_needs_resampling(irq))) { in vgic_irq_handle_resampling()
1132 resample = !(irq->active || irq->pending_latch); in vgic_irq_handle_resampling()
1133 } else if (lr_pending || (lr_deactivated && irq->line_level)) { in vgic_irq_handle_resampling()
1134 irq->line_level = vgic_get_phys_line_level(irq); in vgic_irq_handle_resampling()
1135 resample = !irq->line_level; in vgic_irq_handle_resampling()
1139 vgic_irq_set_phys_active(irq, false); in vgic_irq_handle_resampling()