Lines Matching full:irq
9 #include <linux/irq.h>
53 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_group() local
55 if (irq->group) in vgic_mmio_read_group()
58 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_group()
64 static void vgic_update_vsgi(struct vgic_irq *irq) in vgic_update_vsgi() argument
66 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group)); in vgic_update_vsgi()
77 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_group() local
79 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_group()
80 irq->group = !!(val & BIT(i)); in vgic_mmio_write_group()
81 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_group()
82 vgic_update_vsgi(irq); in vgic_mmio_write_group()
83 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_group()
85 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_group()
88 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_group()
105 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_enable() local
107 if (irq->enabled) in vgic_mmio_read_enable()
110 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_enable()
125 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_senable() local
127 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_senable()
128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_senable()
129 if (!irq->enabled) { in vgic_mmio_write_senable()
132 irq->enabled = true; in vgic_mmio_write_senable()
133 data = &irq_to_desc(irq->host_irq)->irq_data; in vgic_mmio_write_senable()
135 enable_irq(irq->host_irq); in vgic_mmio_write_senable()
138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_senable()
139 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
142 } else if (vgic_irq_is_mapped_level(irq)) { in vgic_mmio_write_senable()
143 bool was_high = irq->line_level; in vgic_mmio_write_senable()
150 irq->line_level = vgic_get_phys_line_level(irq); in vgic_mmio_write_senable()
155 if (!irq->active && was_high && !irq->line_level) in vgic_mmio_write_senable()
156 vgic_irq_set_phys_active(irq, false); in vgic_mmio_write_senable()
158 irq->enabled = true; in vgic_mmio_write_senable()
159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_senable()
161 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
174 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_cenable() local
176 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled) in vgic_mmio_write_cenable()
178 disable_irq_nosync(irq->host_irq); in vgic_mmio_write_cenable()
180 irq->enabled = false; in vgic_mmio_write_cenable()
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
183 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cenable()
196 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_senable() local
198 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_senable()
199 irq->enabled = true; in vgic_uaccess_write_senable()
200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_uaccess_write_senable()
202 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_senable()
217 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_uaccess_write_cenable() local
219 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
220 irq->enabled = false; in vgic_uaccess_write_cenable()
221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
223 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_cenable()
239 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __read_pending() local
252 raw_spin_lock_irqsave(&irq->irq_lock, flags); in __read_pending()
253 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in __read_pending()
257 err = irq_get_irqchip_state(irq->host_irq, in __read_pending()
260 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in __read_pending()
261 } else if (!is_user && vgic_irq_is_mapped_level(irq)) { in __read_pending()
262 val = vgic_get_phys_line_level(irq); in __read_pending()
267 val = irq->pending_latch; in __read_pending()
272 val = irq_is_pending(irq); in __read_pending()
278 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __read_pending()
280 vgic_put_irq(vcpu->kvm, irq); in __read_pending()
298 static bool is_vgic_v2_sgi(struct kvm_vcpu *vcpu, struct vgic_irq *irq) in is_vgic_v2_sgi() argument
300 return (vgic_irq_is_sgi(irq->intid) && in is_vgic_v2_sgi()
312 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __set_pending() local
315 if (is_vgic_v2_sgi(vcpu, irq) && !is_user) { in __set_pending()
316 vgic_put_irq(vcpu->kvm, irq); in __set_pending()
320 raw_spin_lock_irqsave(&irq->irq_lock, flags); in __set_pending()
327 if (is_vgic_v2_sgi(vcpu, irq)) in __set_pending()
328 irq->source |= BIT(vcpu->vcpu_id); in __set_pending()
330 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in __set_pending()
333 err = irq_set_irqchip_state(irq->host_irq, in __set_pending()
336 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in __set_pending()
338 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __set_pending()
339 vgic_put_irq(vcpu->kvm, irq); in __set_pending()
344 irq->pending_latch = true; in __set_pending()
345 if (irq->hw && !is_user) in __set_pending()
346 vgic_irq_set_phys_active(irq, true); in __set_pending()
348 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in __set_pending()
349 vgic_put_irq(vcpu->kvm, irq); in __set_pending()
368 /* Must be called with irq->irq_lock held */
369 static void vgic_hw_irq_cpending(struct kvm_vcpu *vcpu, struct vgic_irq *irq) in vgic_hw_irq_cpending() argument
371 irq->pending_latch = false; in vgic_hw_irq_cpending()
384 vgic_irq_set_phys_pending(irq, false); in vgic_hw_irq_cpending()
385 if (!irq->active) in vgic_hw_irq_cpending()
386 vgic_irq_set_phys_active(irq, false); in vgic_hw_irq_cpending()
398 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __clear_pending() local
401 if (is_vgic_v2_sgi(vcpu, irq) && !is_user) { in __clear_pending()
402 vgic_put_irq(vcpu->kvm, irq); in __clear_pending()
406 raw_spin_lock_irqsave(&irq->irq_lock, flags); in __clear_pending()
413 if (is_vgic_v2_sgi(vcpu, irq)) in __clear_pending()
414 irq->source = 0; in __clear_pending()
416 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in __clear_pending()
419 err = irq_set_irqchip_state(irq->host_irq, in __clear_pending()
422 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in __clear_pending()
424 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __clear_pending()
425 vgic_put_irq(vcpu->kvm, irq); in __clear_pending()
430 if (irq->hw && !is_user) in __clear_pending()
431 vgic_hw_irq_cpending(vcpu, irq); in __clear_pending()
433 irq->pending_latch = false; in __clear_pending()
435 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __clear_pending()
436 vgic_put_irq(vcpu->kvm, irq); in __clear_pending()
456 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
463 * migrated while we don't hold the IRQ locks and we don't want to be chasing
497 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_read_active() local
503 if (irq->active) in __vgic_mmio_read_active()
506 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_read_active()
535 /* Must be called with irq->irq_lock held */
536 static void vgic_hw_irq_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in vgic_hw_irq_change_active() argument
542 irq->active = active; in vgic_hw_irq_change_active()
543 vgic_irq_set_phys_active(irq, active); in vgic_hw_irq_change_active()
546 static void vgic_mmio_change_active(struct kvm_vcpu *vcpu, struct vgic_irq *irq, in vgic_mmio_change_active() argument
552 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_change_active()
554 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
555 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); in vgic_mmio_change_active()
556 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
562 irq->active = false; in vgic_mmio_change_active()
567 irq->active = active; in vgic_mmio_change_active()
583 active && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_change_active()
584 irq->active_source = active_source; in vgic_mmio_change_active()
587 if (irq->active) in vgic_mmio_change_active()
588 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_change_active()
590 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_change_active()
601 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_write_cactive() local
602 vgic_mmio_change_active(vcpu, irq, false); in __vgic_mmio_write_cactive()
603 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_cactive()
638 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in __vgic_mmio_write_sactive() local
639 vgic_mmio_change_active(vcpu, irq, true); in __vgic_mmio_write_sactive()
640 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_sactive()
675 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_priority() local
677 val |= (u64)irq->priority << (i * 8); in vgic_mmio_read_priority()
679 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_priority()
701 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_priority() local
703 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_priority()
705 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); in vgic_mmio_write_priority()
706 if (irq->hw && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_write_priority()
707 vgic_update_vsgi(irq); in vgic_mmio_write_priority()
708 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_priority()
710 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_priority()
722 struct vgic_irq *irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_read_config() local
724 if (irq->config == VGIC_CONFIG_EDGE) in vgic_mmio_read_config()
727 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_config()
742 struct vgic_irq *irq; in vgic_mmio_write_config() local
753 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_mmio_write_config()
754 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_config()
757 irq->config = VGIC_CONFIG_EDGE; in vgic_mmio_write_config()
759 irq->config = VGIC_CONFIG_LEVEL; in vgic_mmio_write_config()
761 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_config()
762 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_config()
773 struct vgic_irq *irq; in vgic_read_irq_line_level_info() local
778 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_read_irq_line_level_info()
779 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) in vgic_read_irq_line_level_info()
782 vgic_put_irq(vcpu->kvm, irq); in vgic_read_irq_line_level_info()
796 struct vgic_irq *irq; in vgic_write_irq_line_level_info() local
802 irq = vgic_get_irq(vcpu->kvm, vcpu, intid + i); in vgic_write_irq_line_level_info()
805 * Line level is set irrespective of irq type in vgic_write_irq_line_level_info()
807 * restore irq config before line level. in vgic_write_irq_line_level_info()
810 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
811 irq->line_level = new_level; in vgic_write_irq_line_level_info()
813 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_write_irq_line_level_info()
815 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
817 vgic_put_irq(vcpu->kvm, irq); in vgic_write_irq_line_level_info()
942 /* Do we access a non-allocated IRQ? */ in check_region()