Lines Matching full:irq
7 #include <linux/irq.h>
62 * Index the VM's xarray of mapped LPIs and return a reference to the IRQ
64 * finished with the IRQ.
69 struct vgic_irq *irq = NULL; in vgic_get_lpi() local
73 irq = xa_load(&dist->lpi_xa, intid); in vgic_get_lpi()
74 if (!vgic_try_get_irq_kref(irq)) in vgic_get_lpi()
75 irq = NULL; in vgic_get_lpi()
79 return irq; in vgic_get_lpi()
85 * to call vgic_put_irq() once it's finished with this IRQ.
118 void vgic_put_irq(struct kvm *kvm, struct vgic_irq *irq) in vgic_put_irq() argument
123 if (irq->intid < VGIC_MIN_LPI) in vgic_put_irq()
126 if (!kref_put(&irq->refcount, vgic_irq_release)) in vgic_put_irq()
130 __xa_erase(&dist->lpi_xa, irq->intid); in vgic_put_irq()
133 kfree_rcu(irq, rcu); in vgic_put_irq()
139 struct vgic_irq *irq, *tmp; in vgic_flush_pending_lpis() local
144 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_pending_lpis()
145 if (irq->intid >= VGIC_MIN_LPI) { in vgic_flush_pending_lpis()
146 raw_spin_lock(&irq->irq_lock); in vgic_flush_pending_lpis()
147 list_del(&irq->ap_list); in vgic_flush_pending_lpis()
148 irq->vcpu = NULL; in vgic_flush_pending_lpis()
149 raw_spin_unlock(&irq->irq_lock); in vgic_flush_pending_lpis()
150 vgic_put_irq(vcpu->kvm, irq); in vgic_flush_pending_lpis()
157 void vgic_irq_set_phys_pending(struct vgic_irq *irq, bool pending) in vgic_irq_set_phys_pending() argument
159 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_pending()
164 bool vgic_get_phys_line_level(struct vgic_irq *irq) in vgic_get_phys_line_level() argument
168 BUG_ON(!irq->hw); in vgic_get_phys_line_level()
170 if (irq->ops && irq->ops->get_input_level) in vgic_get_phys_line_level()
171 return irq->ops->get_input_level(irq->intid); in vgic_get_phys_line_level()
173 WARN_ON(irq_get_irqchip_state(irq->host_irq, in vgic_get_phys_line_level()
180 void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active) in vgic_irq_set_phys_active() argument
183 BUG_ON(!irq->hw); in vgic_irq_set_phys_active()
184 WARN_ON(irq_set_irqchip_state(irq->host_irq, in vgic_irq_set_phys_active()
190 * vgic_target_oracle - compute the target vcpu for an irq
192 * @irq: The irq to route. Must be already locked.
198 * Requires the IRQ lock to be held.
200 static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq) in vgic_target_oracle() argument
202 lockdep_assert_held(&irq->irq_lock); in vgic_target_oracle()
205 if (irq->active) in vgic_target_oracle()
206 return irq->vcpu ? : irq->target_vcpu; in vgic_target_oracle()
209 * If the IRQ is not active but enabled and pending, we should direct in vgic_target_oracle()
214 if (irq->enabled && irq_is_pending(irq)) { in vgic_target_oracle()
215 if (unlikely(irq->target_vcpu && in vgic_target_oracle()
216 !irq->target_vcpu->kvm->arch.vgic.enabled)) in vgic_target_oracle()
219 return irq->target_vcpu; in vgic_target_oracle()
222 /* If neither active nor pending and enabled, then this IRQ should not in vgic_target_oracle()
294 * rising edge, and in-kernel connected IRQ lines can only be controlled by
297 static bool vgic_validate_injection(struct vgic_irq *irq, bool level, void *owner) in vgic_validate_injection() argument
299 if (irq->owner != owner) in vgic_validate_injection()
302 switch (irq->config) { in vgic_validate_injection()
304 return irq->line_level != level; in vgic_validate_injection()
313 * Check whether an IRQ needs to (and can) be queued to a VCPU's ap list.
315 * Returns true when the IRQ was queued, false otherwise.
317 * Needs to be entered with the IRQ lock already held, but will return
320 bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq, in vgic_queue_irq_unlock() argument
321 unsigned long flags) __releases(&irq->irq_lock) in vgic_queue_irq_unlock()
325 lockdep_assert_held(&irq->irq_lock); in vgic_queue_irq_unlock()
328 vcpu = vgic_target_oracle(irq); in vgic_queue_irq_unlock()
329 if (irq->vcpu || !vcpu) { in vgic_queue_irq_unlock()
331 * If this IRQ is already on a VCPU's ap_list, then it in vgic_queue_irq_unlock()
335 * Otherwise, if the irq is not pending and enabled, it does in vgic_queue_irq_unlock()
339 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
345 * while the IRQ is already on the VCPU's AP list, the in vgic_queue_irq_unlock()
358 * We must unlock the irq lock to take the ap_list_lock where in vgic_queue_irq_unlock()
361 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
366 raw_spin_lock(&irq->irq_lock); in vgic_queue_irq_unlock()
372 * 1) The irq lost its pending state or was disabled behind our in vgic_queue_irq_unlock()
374 * 2) Someone changed the affinity on this irq behind our in vgic_queue_irq_unlock()
380 if (unlikely(irq->vcpu || vcpu != vgic_target_oracle(irq))) { in vgic_queue_irq_unlock()
381 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
385 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_queue_irq_unlock()
390 * Grab a reference to the irq to reflect the fact that it is in vgic_queue_irq_unlock()
392 * reference on the irq. in vgic_queue_irq_unlock()
394 vgic_get_irq_kref(irq); in vgic_queue_irq_unlock()
395 list_add_tail(&irq->ap_list, &vcpu->arch.vgic_cpu.ap_list_head); in vgic_queue_irq_unlock()
396 irq->vcpu = vcpu; in vgic_queue_irq_unlock()
398 raw_spin_unlock(&irq->irq_lock); in vgic_queue_irq_unlock()
408 * kvm_vgic_inject_irq - Inject an IRQ from a device to the vgic
416 * @owner: The opaque pointer to the owner of the IRQ being raised to verify
417 * that the caller is allowed to inject this IRQ. Userspace
427 struct vgic_irq *irq; in kvm_vgic_inject_irq() local
440 irq = vgic_get_irq(kvm, vcpu, intid); in kvm_vgic_inject_irq()
441 if (!irq) in kvm_vgic_inject_irq()
444 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
446 if (!vgic_validate_injection(irq, level, owner)) { in kvm_vgic_inject_irq()
448 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_inject_irq()
449 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
453 if (irq->config == VGIC_CONFIG_LEVEL) in kvm_vgic_inject_irq()
454 irq->line_level = level; in kvm_vgic_inject_irq()
456 irq->pending_latch = true; in kvm_vgic_inject_irq()
458 vgic_queue_irq_unlock(kvm, irq, flags); in kvm_vgic_inject_irq()
459 vgic_put_irq(kvm, irq); in kvm_vgic_inject_irq()
464 /* @irq->irq_lock must be held */
465 static int kvm_vgic_map_irq(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in kvm_vgic_map_irq() argument
473 * Find the physical IRQ number corresponding to @host_irq in kvm_vgic_map_irq()
484 irq->hw = true; in kvm_vgic_map_irq()
485 irq->host_irq = host_irq; in kvm_vgic_map_irq()
486 irq->hwintid = data->hwirq; in kvm_vgic_map_irq()
487 irq->ops = ops; in kvm_vgic_map_irq()
491 /* @irq->irq_lock must be held */
492 static inline void kvm_vgic_unmap_irq(struct vgic_irq *irq) in kvm_vgic_unmap_irq() argument
494 irq->hw = false; in kvm_vgic_unmap_irq()
495 irq->hwintid = 0; in kvm_vgic_unmap_irq()
496 irq->ops = NULL; in kvm_vgic_unmap_irq()
502 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_phys_irq() local
506 BUG_ON(!irq); in kvm_vgic_map_phys_irq()
508 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
509 ret = kvm_vgic_map_irq(vcpu, irq, host_irq, ops); in kvm_vgic_map_phys_irq()
510 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_phys_irq()
511 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_phys_irq()
517 * kvm_vgic_reset_mapped_irq - Reset a mapped IRQ
527 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_reset_mapped_irq() local
530 if (!irq->hw) in kvm_vgic_reset_mapped_irq()
533 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
534 irq->active = false; in kvm_vgic_reset_mapped_irq()
535 irq->pending_latch = false; in kvm_vgic_reset_mapped_irq()
536 irq->line_level = false; in kvm_vgic_reset_mapped_irq()
537 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_reset_mapped_irq()
539 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_reset_mapped_irq()
544 struct vgic_irq *irq; in kvm_vgic_unmap_phys_irq() local
550 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_unmap_phys_irq()
551 BUG_ON(!irq); in kvm_vgic_unmap_phys_irq()
553 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
554 kvm_vgic_unmap_irq(irq); in kvm_vgic_unmap_phys_irq()
555 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_unmap_phys_irq()
556 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_unmap_phys_irq()
563 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_get_map() local
567 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_get_map()
568 if (irq->hw) in kvm_vgic_get_map()
569 ret = irq->hwintid; in kvm_vgic_get_map()
570 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_get_map()
572 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_get_map()
588 struct vgic_irq *irq; in kvm_vgic_set_owner() local
599 irq = vgic_get_irq(vcpu->kvm, vcpu, intid); in kvm_vgic_set_owner()
600 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_set_owner()
601 if (irq->owner && irq->owner != owner) in kvm_vgic_set_owner()
604 irq->owner = owner; in kvm_vgic_set_owner()
605 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_set_owner()
621 struct vgic_irq *irq, *tmp; in vgic_prune_ap_list() local
628 list_for_each_entry_safe(irq, tmp, &vgic_cpu->ap_list_head, ap_list) { in vgic_prune_ap_list()
632 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
634 BUG_ON(vcpu != irq->vcpu); in vgic_prune_ap_list()
636 target_vcpu = vgic_target_oracle(irq); in vgic_prune_ap_list()
643 list_del(&irq->ap_list); in vgic_prune_ap_list()
644 irq->vcpu = NULL; in vgic_prune_ap_list()
645 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
651 * we remove the irq from the list, we drop in vgic_prune_ap_list()
654 vgic_put_irq(vcpu->kvm, irq); in vgic_prune_ap_list()
660 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
666 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
684 raw_spin_lock(&irq->irq_lock); in vgic_prune_ap_list()
695 if (target_vcpu == vgic_target_oracle(irq)) { in vgic_prune_ap_list()
698 list_del(&irq->ap_list); in vgic_prune_ap_list()
699 irq->vcpu = target_vcpu; in vgic_prune_ap_list()
700 list_add_tail(&irq->ap_list, &new_cpu->ap_list_head); in vgic_prune_ap_list()
704 raw_spin_unlock(&irq->irq_lock); in vgic_prune_ap_list()
729 struct vgic_irq *irq, int lr) in vgic_populate_lr() argument
731 lockdep_assert_held(&irq->irq_lock); in vgic_populate_lr()
734 vgic_v2_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
736 vgic_v3_populate_lr(vcpu, irq, lr); in vgic_populate_lr()
760 struct vgic_irq *irq; in compute_ap_list_depth() local
767 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in compute_ap_list_depth()
770 raw_spin_lock(&irq->irq_lock); in compute_ap_list_depth()
772 w = vgic_irq_get_lr_count(irq); in compute_ap_list_depth()
773 raw_spin_unlock(&irq->irq_lock); in compute_ap_list_depth()
785 struct vgic_irq *irq; in vgic_flush_lr_state() local
799 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in vgic_flush_lr_state()
800 raw_spin_lock(&irq->irq_lock); in vgic_flush_lr_state()
804 * guarantee that they are all seen before any IRQ of in vgic_flush_lr_state()
809 if (multi_sgi && irq->priority > prio) { in vgic_flush_lr_state()
810 _raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
814 if (likely(vgic_target_oracle(irq) == vcpu)) { in vgic_flush_lr_state()
815 vgic_populate_lr(vcpu, irq, count++); in vgic_flush_lr_state()
817 if (irq->source) in vgic_flush_lr_state()
818 prio = irq->priority; in vgic_flush_lr_state()
821 raw_spin_unlock(&irq->irq_lock); in vgic_flush_lr_state()
824 if (!list_is_last(&irq->ap_list, in vgic_flush_lr_state()
954 struct vgic_irq *irq; in kvm_vgic_vcpu_pending_irq() local
969 list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) { in kvm_vgic_vcpu_pending_irq()
970 raw_spin_lock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
971 pending = irq_is_pending(irq) && irq->enabled && in kvm_vgic_vcpu_pending_irq()
972 !irq->active && in kvm_vgic_vcpu_pending_irq()
973 irq->priority < vmcr.pmr; in kvm_vgic_vcpu_pending_irq()
974 raw_spin_unlock(&irq->irq_lock); in kvm_vgic_vcpu_pending_irq()
1004 struct vgic_irq *irq; in kvm_vgic_map_is_active() local
1011 irq = vgic_get_irq(vcpu->kvm, vcpu, vintid); in kvm_vgic_map_is_active()
1012 raw_spin_lock_irqsave(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1013 map_is_active = irq->hw && irq->active; in kvm_vgic_map_is_active()
1014 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in kvm_vgic_map_is_active()
1015 vgic_put_irq(vcpu->kvm, irq); in kvm_vgic_map_is_active()
1040 void vgic_irq_handle_resampling(struct vgic_irq *irq, in vgic_irq_handle_resampling() argument
1043 if (vgic_irq_is_mapped_level(irq)) { in vgic_irq_handle_resampling()
1046 if (unlikely(vgic_irq_needs_resampling(irq))) { in vgic_irq_handle_resampling()
1047 resample = !(irq->active || irq->pending_latch); in vgic_irq_handle_resampling()
1048 } else if (lr_pending || (lr_deactivated && irq->line_level)) { in vgic_irq_handle_resampling()
1049 irq->line_level = vgic_get_phys_line_level(irq); in vgic_irq_handle_resampling()
1050 resample = !irq->line_level; in vgic_irq_handle_resampling()
1054 vgic_irq_set_phys_active(irq, false); in vgic_irq_handle_resampling()