Lines Matching +full:fiq +full:- +full:device
1 // SPDX-License-Identifier: GPL-2.0-only
3 #include <linux/irqchip/arm-gic-v3.h>
25 struct vgic_v3_cpu_if *cpuif = &vcpu->arch.vgic_cpu.vgic_v3;
27 cpuif->vgic_hcr |= ICH_HCR_EL2_UIE;
38 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
39 struct vgic_v3_cpu_if *cpuif = &vgic_cpu->vgic_v3;
40 u32 model = vcpu->kvm->arch.vgic.vgic_model;
45 cpuif->vgic_hcr &= ~ICH_HCR_EL2_UIE;
47 for (lr = 0; lr < cpuif->used_lrs; lr++) {
48 u64 val = cpuif->vgic_lr[lr];
64 /* Notify fds when the guest EOI'ed a level-triggered IRQ */
65 if (lr_signals_eoi_mi(val) && vgic_valid_spi(vcpu->kvm, intid))
66 kvm_notify_acked_irq(vcpu->kvm, 0,
67 intid - VGIC_NR_PRIVATE_IRQS);
73 raw_spin_lock(&irq->irq_lock);
76 deactivated = irq->active && !(val & ICH_LR_ACTIVE_BIT);
77 irq->active = !!(val & ICH_LR_ACTIVE_BIT);
79 if (irq->active && is_v2_sgi)
80 irq->active_source = cpuid;
83 if (irq->config == VGIC_CONFIG_EDGE &&
85 irq->pending_latch = true;
88 irq->source |= (1 << cpuid);
94 if (irq->config == VGIC_CONFIG_LEVEL && !(val & ICH_LR_STATE))
95 irq->pending_latch = false;
100 raw_spin_unlock(&irq->irq_lock);
101 vgic_put_irq(vcpu->kvm, irq);
104 cpuif->used_lrs = 0;
110 u32 model = vcpu->kvm->arch.vgic.vgic_model;
111 u64 val = irq->intid;
114 is_v2_sgi = (vgic_irq_is_sgi(irq->intid) &&
117 if (irq->active) {
120 val |= irq->active_source << GICH_LR_PHYSID_CPUID_SHIFT;
127 if (irq->hw && !vgic_irq_needs_resampling(irq)) {
129 val |= ((u64)irq->hwintid) << ICH_LR_PHYS_ID_SHIFT;
135 if (irq->active)
138 if (irq->config == VGIC_CONFIG_LEVEL) {
145 if (irq->active)
153 if (irq->config == VGIC_CONFIG_EDGE)
154 irq->pending_latch = false;
156 if (vgic_irq_is_sgi(irq->intid) &&
158 u32 src = ffs(irq->source);
161 irq->intid))
164 val |= (src - 1) << GICH_LR_PHYSID_CPUID_SHIFT;
165 irq->source &= ~(1 << (src - 1));
166 if (irq->source) {
167 irq->pending_latch = true;
174 * Level-triggered mapped IRQs are special because we only observe
180 irq->line_level = false;
182 if (irq->group)
185 val |= (u64)irq->priority << ICH_LR_PRIORITY_SHIFT;
187 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val;
192 vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = 0;
197 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
198 u32 model = vcpu->kvm->arch.vgic.vgic_model;
202 vmcr = (vmcrp->ackctl << ICH_VMCR_ACK_CTL_SHIFT) &
204 vmcr |= (vmcrp->fiqen << ICH_VMCR_FIQ_EN_SHIFT) &
214 vmcr |= (vmcrp->cbpr << ICH_VMCR_CBPR_SHIFT) & ICH_VMCR_CBPR_MASK;
215 vmcr |= (vmcrp->eoim << ICH_VMCR_EOIM_SHIFT) & ICH_VMCR_EOIM_MASK;
216 vmcr |= (vmcrp->abpr << ICH_VMCR_BPR1_SHIFT) & ICH_VMCR_BPR1_MASK;
217 vmcr |= (vmcrp->bpr << ICH_VMCR_BPR0_SHIFT) & ICH_VMCR_BPR0_MASK;
218 vmcr |= (vmcrp->pmr << ICH_VMCR_PMR_SHIFT) & ICH_VMCR_PMR_MASK;
219 vmcr |= (vmcrp->grpen0 << ICH_VMCR_ENG0_SHIFT) & ICH_VMCR_ENG0_MASK;
220 vmcr |= (vmcrp->grpen1 << ICH_VMCR_ENG1_SHIFT) & ICH_VMCR_ENG1_MASK;
222 cpu_if->vgic_vmcr = vmcr;
227 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
228 u32 model = vcpu->kvm->arch.vgic.vgic_model;
231 vmcr = cpu_if->vgic_vmcr;
234 vmcrp->ackctl = (vmcr & ICH_VMCR_ACK_CTL_MASK) >>
236 vmcrp->fiqen = (vmcr & ICH_VMCR_FIQ_EN_MASK) >>
243 vmcrp->fiqen = 1;
244 vmcrp->ackctl = 0;
247 vmcrp->cbpr = (vmcr & ICH_VMCR_CBPR_MASK) >> ICH_VMCR_CBPR_SHIFT;
248 vmcrp->eoim = (vmcr & ICH_VMCR_EOIM_MASK) >> ICH_VMCR_EOIM_SHIFT;
249 vmcrp->abpr = (vmcr & ICH_VMCR_BPR1_MASK) >> ICH_VMCR_BPR1_SHIFT;
250 vmcrp->bpr = (vmcr & ICH_VMCR_BPR0_MASK) >> ICH_VMCR_BPR0_SHIFT;
251 vmcrp->pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT;
252 vmcrp->grpen0 = (vmcr & ICH_VMCR_ENG0_MASK) >> ICH_VMCR_ENG0_SHIFT;
253 vmcrp->grpen1 = (vmcr & ICH_VMCR_ENG1_MASK) >> ICH_VMCR_ENG1_SHIFT;
263 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
270 vgic_v3->vgic_vmcr = 0;
273 * If we are emulating a GICv3, we do it in an non-GICv2-compatible
275 * Also, we don't support any form of IRQ/FIQ bypass.
278 if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3) {
279 vgic_v3->vgic_sre = (ICC_SRE_EL1_DIB |
282 vcpu->arch.vgic_cpu.pendbaser = INITIAL_PENDBASER_VALUE;
284 vgic_v3->vgic_sre = 0;
287 vcpu->arch.vgic_cpu.num_id_bits = FIELD_GET(ICH_VTR_EL2_IDbits,
289 vcpu->arch.vgic_cpu.num_pri_bits = FIELD_GET(ICH_VTR_EL2_PRIbits,
293 vgic_v3->vgic_hcr = ICH_HCR_EL2_En;
298 struct vgic_v3_cpu_if *vgic_v3 = &vcpu->arch.vgic_cpu.vgic_v3;
301 if (!kvm_has_gicv3(vcpu->kvm)) {
302 vgic_v3->vgic_hcr |= (ICH_HCR_EL2_TALL0 | ICH_HCR_EL2_TALL1 |
308 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL0;
310 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TALL1;
312 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TC;
314 vgic_v3->vgic_hcr |= ICH_HCR_EL2_TDIR;
328 vcpu = irq->target_vcpu;
332 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
334 byte_offset = irq->intid / BITS_PER_BYTE;
335 bit_nr = irq->intid % BITS_PER_BYTE;
344 raw_spin_lock_irqsave(&irq->irq_lock, flags);
345 if (irq->target_vcpu != vcpu) {
346 raw_spin_unlock_irqrestore(&irq->irq_lock, flags);
349 irq->pending_latch = status;
350 vgic_queue_irq_unlock(vcpu->kvm, irq, flags);
368 struct vgic_dist *dist = &kvm->arch.vgic;
371 for (i = 0; i < dist->its_vm.nr_vpes; i++)
372 free_irq(dist->its_vm.vpes[i]->irq, kvm_get_vcpu(kvm, i));
377 struct vgic_dist *dist = &kvm->arch.vgic;
380 for (i = 0; i < dist->its_vm.nr_vpes; i++)
382 dist->its_vm.vpes[i]->irq));
386 * vgic_v3_save_pending_tables - Save the pending tables into guest RAM
391 struct vgic_dist *dist = &kvm->arch.vgic;
400 return -ENXIO;
412 xa_for_each(&dist->lpi_xa, index, irq) {
419 vcpu = irq->target_vcpu;
423 pendbase = GICR_PENDBASER_ADDRESS(vcpu->arch.vgic_cpu.pendbaser);
425 byte_offset = irq->intid / BITS_PER_BYTE;
426 bit_nr = irq->intid % BITS_PER_BYTE;
438 is_pending = irq->pending_latch;
440 if (irq->hw && vlpi_avail)
464 * vgic_v3_rdist_overlap - check if a region overlaps with any
475 struct vgic_dist *d = &kvm->arch.vgic;
478 list_for_each_entry(rdreg, &d->rd_regions, list) {
479 if ((base + size > rdreg->base) &&
480 (base < rdreg->base + vgic_v3_rd_region_size(kvm, rdreg)))
492 struct vgic_dist *d = &kvm->arch.vgic;
495 if (!IS_VGIC_ADDR_UNDEF(d->vgic_dist_base) &&
496 d->vgic_dist_base + KVM_VGIC_V3_DIST_SIZE < d->vgic_dist_base)
499 list_for_each_entry(rdreg, &d->rd_regions, list) {
503 rdreg->base, SZ_64K, sz))
507 if (IS_VGIC_ADDR_UNDEF(d->vgic_dist_base))
510 return !vgic_v3_rdist_overlap(kvm, d->vgic_dist_base,
515 * vgic_v3_rdist_free_slot - Look up registered rdist regions and identify one
540 struct list_head *rd_regions = &kvm->arch.vgic.rd_regions;
544 if (rdreg->index == index)
553 struct vgic_dist *dist = &kvm->arch.vgic;
558 struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
560 if (IS_VGIC_ADDR_UNDEF(vgic_cpu->rd_iodev.base_addr)) {
562 return -ENXIO;
566 if (IS_VGIC_ADDR_UNDEF(dist->vgic_dist_base)) {
568 return -ENXIO;
573 return -EINVAL;
581 return -EBUSY;
596 early_param("kvm-arm.vgic_v3_group0_trap", early_group0_trap_cfg);
602 early_param("kvm-arm.vgic_v3_group1_trap", early_group1_trap_cfg);
608 early_param("kvm-arm.vgic_v3_common_trap", early_common_trap_cfg);
614 early_param("kvm-arm.vgic_v4_enable", early_gicv4_enable);
639 * vgic_v3_probe - probe for a VGICv3 compatible interrupt controller
663 if (info->has_v4) {
665 kvm_vgic_global_state.has_gicv4_1 = info->has_v4_1 && gicv4_enable;
673 if (!info->vcpu.start) {
677 } else if (!PAGE_ALIGNED(info->vcpu.start)) {
679 (unsigned long long)info->vcpu.start);
681 kvm_vgic_global_state.vcpu_base = info->vcpu.start;
685 kvm_err("Cannot register GICv2 KVM device.\n");
688 kvm_info("vgic-v2@%llx\n", info->vcpu.start);
692 kvm_err("Cannot register GICv3 KVM device.\n");
735 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
754 struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;