Lines Matching +full:data +full:- +full:active
1 // SPDX-License-Identifier: GPL-2.0-only
17 #include "vgic-mmio.h"
28 return -1UL; in vgic_mmio_read_rao()
55 if (irq->group) in vgic_mmio_read_group()
58 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_group()
66 WARN_ON(its_prop_update_vsgi(irq->host_irq, irq->priority, irq->group)); in vgic_update_vsgi()
79 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_group()
80 irq->group = !!(val & BIT(i)); in vgic_mmio_write_group()
81 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_group()
83 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_group()
85 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_group()
88 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_group()
107 if (irq->enabled) in vgic_mmio_read_enable()
110 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_enable()
127 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_senable()
128 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_write_senable()
129 if (!irq->enabled) { in vgic_mmio_write_senable()
130 struct irq_data *data; in vgic_mmio_write_senable() local
132 irq->enabled = true; in vgic_mmio_write_senable()
133 data = &irq_to_desc(irq->host_irq)->irq_data; in vgic_mmio_write_senable()
134 while (irqd_irq_disabled(data)) in vgic_mmio_write_senable()
135 enable_irq(irq->host_irq); in vgic_mmio_write_senable()
138 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_senable()
139 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
143 bool was_high = irq->line_level; in vgic_mmio_write_senable()
150 irq->line_level = vgic_get_phys_line_level(irq); in vgic_mmio_write_senable()
155 if (!irq->active && was_high && !irq->line_level) in vgic_mmio_write_senable()
158 irq->enabled = true; in vgic_mmio_write_senable()
159 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_write_senable()
161 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_senable()
176 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
177 if (irq->hw && vgic_irq_is_sgi(irq->intid) && irq->enabled) in vgic_mmio_write_cenable()
178 disable_irq_nosync(irq->host_irq); in vgic_mmio_write_cenable()
180 irq->enabled = false; in vgic_mmio_write_cenable()
182 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_cenable()
183 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_cenable()
198 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_senable()
199 irq->enabled = true; in vgic_uaccess_write_senable()
200 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_uaccess_write_senable()
202 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_senable()
219 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
220 irq->enabled = false; in vgic_uaccess_write_cenable()
221 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_uaccess_write_cenable()
223 vgic_put_irq(vcpu->kvm, irq); in vgic_uaccess_write_cenable()
249 * Refer to Documentation/virt/kvm/devices/arm-vgic-v3.rst in __read_pending()
252 raw_spin_lock_irqsave(&irq->irq_lock, flags); in __read_pending()
253 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in __read_pending()
257 err = irq_get_irqchip_state(irq->host_irq, in __read_pending()
260 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in __read_pending()
264 switch (vcpu->kvm->arch.vgic.vgic_model) { in __read_pending()
267 val = irq->pending_latch; in __read_pending()
278 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __read_pending()
280 vgic_put_irq(vcpu->kvm, irq); in __read_pending()
300 return (vgic_irq_is_sgi(irq->intid) && in is_vgic_v2_sgi()
301 vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V2); in is_vgic_v2_sgi()
316 vgic_put_irq(vcpu->kvm, irq); in __set_pending()
320 raw_spin_lock_irqsave(&irq->irq_lock, flags); in __set_pending()
328 irq->source |= BIT(vcpu->vcpu_id); in __set_pending()
330 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in __set_pending()
333 err = irq_set_irqchip_state(irq->host_irq, in __set_pending()
336 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in __set_pending()
338 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __set_pending()
339 vgic_put_irq(vcpu->kvm, irq); in __set_pending()
344 irq->pending_latch = true; in __set_pending()
345 if (irq->hw && !is_user) in __set_pending()
348 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in __set_pending()
349 vgic_put_irq(vcpu->kvm, irq); in __set_pending()
368 /* Must be called with irq->irq_lock held */
371 irq->pending_latch = false; in vgic_hw_irq_cpending()
376 * CPENDR for HW interrupts, so we clear the active state on in vgic_hw_irq_cpending()
377 * the physical side if the virtual interrupt is not active. in vgic_hw_irq_cpending()
385 if (!irq->active) in vgic_hw_irq_cpending()
402 vgic_put_irq(vcpu->kvm, irq); in __clear_pending()
406 raw_spin_lock_irqsave(&irq->irq_lock, flags); in __clear_pending()
414 irq->source = 0; in __clear_pending()
416 if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in __clear_pending()
419 err = irq_set_irqchip_state(irq->host_irq, in __clear_pending()
422 WARN_RATELIMIT(err, "IRQ %d", irq->host_irq); in __clear_pending()
424 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __clear_pending()
425 vgic_put_irq(vcpu->kvm, irq); in __clear_pending()
430 if (irq->hw && !is_user) in __clear_pending()
433 irq->pending_latch = false; in __clear_pending()
435 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in __clear_pending()
436 vgic_put_irq(vcpu->kvm, irq); in __clear_pending()
456 * If we are fiddling with an IRQ's active state, we have to make sure the IRQ
458 * active state can be overwritten when the VCPU's state is synced coming back
462 * non-owning CPU, we have to stop all the VCPUs because interrupts can be
469 * active state, which guarantees that the VCPU is not running.
473 if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 && in vgic_access_active_prepare()
476 kvm_arm_halt_guest(vcpu->kvm); in vgic_access_active_prepare()
482 if ((vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3 && in vgic_access_active_finish()
485 kvm_arm_resume_guest(vcpu->kvm); in vgic_access_active_finish()
503 if (irq->active) in __vgic_mmio_read_active()
506 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_read_active()
518 mutex_lock(&vcpu->kvm->arch.config_lock); in vgic_mmio_read_active()
524 mutex_unlock(&vcpu->kvm->arch.config_lock); in vgic_mmio_read_active()
535 /* Must be called with irq->irq_lock held */
537 bool active, bool is_uaccess) in vgic_hw_irq_change_active() argument
542 irq->active = active; in vgic_hw_irq_change_active()
543 vgic_irq_set_phys_active(irq, active); in vgic_hw_irq_change_active()
547 bool active) in vgic_mmio_change_active() argument
552 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_change_active()
554 if (irq->hw && !vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
555 vgic_hw_irq_change_active(vcpu, irq, active, !requester_vcpu); in vgic_mmio_change_active()
556 } else if (irq->hw && vgic_irq_is_sgi(irq->intid)) { in vgic_mmio_change_active()
558 * GICv4.1 VSGI feature doesn't track an active state, in vgic_mmio_change_active()
562 irq->active = false; in vgic_mmio_change_active()
564 u32 model = vcpu->kvm->arch.vgic.vgic_model; in vgic_mmio_change_active()
567 irq->active = active; in vgic_mmio_change_active()
572 * the active state is stored somewhere, but at the same time in vgic_mmio_change_active()
580 active_source = (requester_vcpu) ? requester_vcpu->vcpu_id : 0; in vgic_mmio_change_active()
583 active && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_change_active()
584 irq->active_source = active_source; in vgic_mmio_change_active()
587 if (irq->active) in vgic_mmio_change_active()
588 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_mmio_change_active()
590 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_change_active()
603 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_cactive()
613 mutex_lock(&vcpu->kvm->arch.config_lock); in vgic_mmio_write_cactive()
619 mutex_unlock(&vcpu->kvm->arch.config_lock); in vgic_mmio_write_cactive()
640 vgic_put_irq(vcpu->kvm, irq); in __vgic_mmio_write_sactive()
650 mutex_lock(&vcpu->kvm->arch.config_lock); in vgic_mmio_write_sactive()
656 mutex_unlock(&vcpu->kvm->arch.config_lock); in vgic_mmio_write_sactive()
677 val |= (u64)irq->priority << (i * 8); in vgic_mmio_read_priority()
679 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_priority()
688 * need to make this VCPU exit and re-evaluate the priorities, potentially
703 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_priority()
705 irq->priority = (val >> (i * 8)) & GENMASK(7, 8 - VGIC_PRI_BITS); in vgic_mmio_write_priority()
706 if (irq->hw && vgic_irq_is_sgi(irq->intid)) in vgic_mmio_write_priority()
708 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_priority()
710 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_priority()
724 if (irq->config == VGIC_CONFIG_EDGE) in vgic_mmio_read_config()
727 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_read_config()
748 * make them read-only here. in vgic_mmio_write_config()
753 irq = vgic_get_irq(vcpu->kvm, intid + i); in vgic_mmio_write_config()
754 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_mmio_write_config()
757 irq->config = VGIC_CONFIG_EDGE; in vgic_mmio_write_config()
759 irq->config = VGIC_CONFIG_LEVEL; in vgic_mmio_write_config()
761 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_mmio_write_config()
762 vgic_put_irq(vcpu->kvm, irq); in vgic_mmio_write_config()
770 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; in vgic_read_irq_line_level_info()
779 if (irq->config == VGIC_CONFIG_LEVEL && irq->line_level) in vgic_read_irq_line_level_info()
782 vgic_put_irq(vcpu->kvm, irq); in vgic_read_irq_line_level_info()
792 int nr_irqs = vcpu->kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; in vgic_write_irq_line_level_info()
810 raw_spin_lock_irqsave(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
811 irq->line_level = new_level; in vgic_write_irq_line_level_info()
813 vgic_queue_irq_unlock(vcpu->kvm, irq, flags); in vgic_write_irq_line_level_info()
815 raw_spin_unlock_irqrestore(&irq->irq_lock, flags); in vgic_write_irq_line_level_info()
817 vgic_put_irq(vcpu->kvm, irq); in vgic_write_irq_line_level_info()
826 if (offset < region->reg_offset) in match_region()
827 return -1; in match_region()
829 if (offset >= region->reg_offset + region->len) in match_region()
865 * We convert this value to the CPUs native format to deal with it as a data
870 unsigned long data = kvm_mmio_read_buf(val, len); in vgic_data_mmio_bus_to_host() local
874 return data; in vgic_data_mmio_bus_to_host()
876 return le16_to_cpu(data); in vgic_data_mmio_bus_to_host()
878 return le32_to_cpu(data); in vgic_data_mmio_bus_to_host()
880 return le64_to_cpu(data); in vgic_data_mmio_bus_to_host()
890 * We convert the data value from the CPUs native format to LE so that the
894 unsigned long data) in vgic_data_host_to_mmio_bus() argument
900 data = cpu_to_le16(data); in vgic_data_host_to_mmio_bus()
903 data = cpu_to_le32(data); in vgic_data_host_to_mmio_bus()
906 data = cpu_to_le64(data); in vgic_data_host_to_mmio_bus()
909 kvm_mmio_write_buf(buf, len, data); in vgic_data_host_to_mmio_bus()
922 int flags, nr_irqs = kvm->arch.vgic.nr_spis + VGIC_NR_PRIVATE_IRQS; in check_region()
938 if ((region->access_flags & flags) && IS_ALIGNED(addr, len)) { in check_region()
939 if (!region->bits_per_irq) in check_region()
942 /* Do we access a non-allocated IRQ? */ in check_region()
943 return VGIC_ADDR_TO_INTID(addr, region->bits_per_irq) < nr_irqs; in check_region()
955 region = vgic_find_mmio_region(iodev->regions, iodev->nr_regions, in vgic_get_mmio_region()
956 addr - iodev->base_addr); in vgic_get_mmio_region()
957 if (!region || !check_region(vcpu->kvm, region, addr, len)) in vgic_get_mmio_region()
975 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; in vgic_uaccess_read()
976 if (region->uaccess_read) in vgic_uaccess_read()
977 *val = region->uaccess_read(r_vcpu, addr, sizeof(u32)); in vgic_uaccess_read()
979 *val = region->read(r_vcpu, addr, sizeof(u32)); in vgic_uaccess_read()
994 r_vcpu = iodev->redist_vcpu ? iodev->redist_vcpu : vcpu; in vgic_uaccess_write()
995 if (region->uaccess_write) in vgic_uaccess_write()
996 return region->uaccess_write(r_vcpu, addr, sizeof(u32), *val); in vgic_uaccess_write()
998 region->write(r_vcpu, addr, sizeof(u32), *val); in vgic_uaccess_write()
1019 unsigned long data = 0; in dispatch_mmio_read() local
1027 switch (iodev->iodev_type) { in dispatch_mmio_read()
1029 data = region->read(vcpu, addr, len); in dispatch_mmio_read()
1032 data = region->read(vcpu, addr, len); in dispatch_mmio_read()
1035 data = region->read(iodev->redist_vcpu, addr, len); in dispatch_mmio_read()
1038 data = region->its_read(vcpu->kvm, iodev->its, addr, len); in dispatch_mmio_read()
1042 vgic_data_host_to_mmio_bus(val, len, data); in dispatch_mmio_read()
1051 unsigned long data = vgic_data_mmio_bus_to_host(val, len); in dispatch_mmio_write() local
1057 switch (iodev->iodev_type) { in dispatch_mmio_write()
1059 region->write(vcpu, addr, len, data); in dispatch_mmio_write()
1062 region->write(vcpu, addr, len, data); in dispatch_mmio_write()
1065 region->write(iodev->redist_vcpu, addr, len, data); in dispatch_mmio_write()
1068 region->its_write(vcpu->kvm, iodev->its, addr, len, data); in dispatch_mmio_write()
1083 struct vgic_io_device *io_device = &kvm->arch.vgic.dist_iodev; in vgic_register_dist_iodev()
1097 io_device->base_addr = dist_base_address; in vgic_register_dist_iodev()
1098 io_device->iodev_type = IODEV_DIST; in vgic_register_dist_iodev()
1099 io_device->redist_vcpu = NULL; in vgic_register_dist_iodev()
1102 len, &io_device->dev); in vgic_register_dist_iodev()