Lines Matching +full:fiq +full:- +full:index
1 // SPDX-License-Identifier: GPL-2.0-only
8 * Gregory CLEMENT <gregory.clement@free-electrons.com>
9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
23 #include <linux/irqchip/irq-msi-lib.h>
46 * +---------------+ +---------------+
48 * | per-CPU | | per-CPU |
52 * +---------------+ +---------------+
57 * +-------------------+
62 * +-------------------+
70 * registers, which are relative to "mpic->base".
72 * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK
74 * "mpic->per_cpu". This base address points to a special address,
77 * The per-CPU mask/unmask can also be adjusted using the global
78 * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to
82 * different levels: at the global level and at the per-CPU level.
86 * - For global interrupts:
88 * At ->map() time, a global interrupt is unmasked at the per-CPU
90 * the current CPU, running the ->map() code. This allows to have
91 * the interrupt unmasked at this level in non-SMP
92 * configurations. In SMP configurations, the ->set_affinity()
94 * readjusts the per-CPU mask/unmask for the interrupt.
96 * The ->mask() and ->unmask() operations only mask/unmask the
99 * So, a global interrupt is enabled at the per-CPU level as soon
103 * - For per-CPU interrupts
105 * At ->map() time, a per-CPU interrupt is unmasked at the global
108 * The ->mask() and ->unmask() operations mask/unmask the interrupt
109 * at the per-CPU level.
111 * So, a per-CPU interrupt is enabled at the global level as soon
113 * at the per-CPU level.
116 /* Registers relative to mpic->base */
126 /* Registers relative to mpic->per_cpu */
146 /* MSI interrupt definitions for non-IPI platforms */
154 * struct mpic - MPIC private data structure
156 * @per_cpu: per-CPU registers base address
157 * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller
196 * interrupt controller (e.g. GIC) that takes care of inter-processor in mpic_is_ipi_available()
199 return mpic->parent_irq <= 0; in mpic_is_ipi_available()
218 writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_irq_mask()
220 writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_irq_mask()
229 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_unmask()
231 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_unmask()
241 msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
242 msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
243 msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start); in mpic_compose_msi_msg()
256 return -EINVAL; in mpic_msi_set_affinity()
272 struct mpic *mpic = domain->host_data; in mpic_msi_alloc()
275 mutex_lock(&mpic->msi_lock); in mpic_msi_alloc()
276 hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size, in mpic_msi_alloc()
278 mutex_unlock(&mpic->msi_lock); in mpic_msi_alloc()
281 return -ENOSPC; in mpic_msi_alloc()
286 domain->host_data, handle_simple_irq, in mpic_msi_alloc()
296 struct mpic *mpic = domain->host_data; in mpic_msi_free()
298 mutex_lock(&mpic->msi_lock); in mpic_msi_free()
299 bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs)); in mpic_msi_free()
300 mutex_unlock(&mpic->msi_lock); in mpic_msi_free()
314 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
315 reg |= mpic->msi_doorbell_mask; in mpic_msi_reenable_percpu()
316 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
319 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_reenable_percpu()
333 .prefix = "MPIC-",
340 mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT; in mpic_msi_init()
342 mutex_init(&mpic->msi_lock); in mpic_msi_init()
345 mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START; in mpic_msi_init()
346 mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR; in mpic_msi_init()
347 mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK; in mpic_msi_init()
349 mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START; in mpic_msi_init()
350 mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR; in mpic_msi_init()
351 mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; in mpic_msi_init()
358 .size = mpic->msi_doorbell_size, in mpic_msi_init()
361 mpic->msi_inner_domain = msi_create_parent_irq_domain(&info, &mpic_msi_parent_ops); in mpic_msi_init()
362 if (!mpic->msi_inner_domain) in mpic_msi_init()
363 return -ENOMEM; in mpic_msi_init()
367 /* Unmask low 16 MSI irqs on non-IPI platforms */ in mpic_msi_init()
369 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_init()
391 if (!of_machine_is_compatible("marvell,armada-370-xp")) in mpic_perf_init()
397 writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); in mpic_perf_init()
406 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
407 reg &= ~BIT(d->hwirq); in mpic_ipi_mask()
408 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
416 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
417 reg |= BIT(d->hwirq); in mpic_ipi_unmask()
418 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
438 writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT); in mpic_ipi_send_mask()
445 writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_ipi_ack()
461 irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data, in mpic_ipi_alloc()
482 unsigned int virq = irq_find_mapping(mpic->ipi_domain, i); in mpic_ipi_resume()
488 d = irq_domain_get_irq_data(mpic->ipi_domain, virq); in mpic_ipi_resume()
497 mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR, in mpic_ipi_init()
499 if (WARN_ON(!mpic->ipi_domain)) in mpic_ipi_init()
500 return -ENOMEM; in mpic_ipi_init()
502 irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI); in mpic_ipi_init()
503 base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL); in mpic_ipi_init()
505 return -ENOMEM; in mpic_ipi_init()
521 atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq), in mpic_set_affinity()
531 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) in mpic_smp_cpu_init()
532 writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_smp_cpu_init()
538 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_smp_cpu_init()
541 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_smp_cpu_init()
544 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_smp_cpu_init()
549 /* Re-enable per-CPU interrupts that were enabled before suspend */ in mpic_reenable_percpu()
551 unsigned int virq = irq_find_mapping(mpic->domain, i); in mpic_reenable_percpu()
569 struct mpic *mpic = irq_get_default_domain()->host_data; in mpic_starting_cpu()
584 enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE); in mpic_cascaded_starting_cpu()
606 struct mpic *mpic = domain->host_data; in mpic_irq_map()
610 return -EINVAL; in mpic_irq_map()
616 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_map()
618 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_map()
643 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
644 cause &= mpic->msi_doorbell_mask; in mpic_handle_msi_irq()
645 writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
648 generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start); in mpic_handle_msi_irq()
660 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_ipi_irq()
664 generic_handle_domain_irq(mpic->ipi_domain, i); in mpic_handle_ipi_irq()
680 cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE); in mpic_handle_cascade_irq()
684 irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i)); in mpic_handle_cascade_irq()
687 * Test IRQ (0-1) and FIQ (8-9) mask bits. in mpic_handle_cascade_irq()
697 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_cascade_irq()
705 struct mpic *mpic = irq_get_default_domain()->host_data; in mpic_handle_irq()
710 irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK); in mpic_handle_irq()
717 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_irq()
733 mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_suspend()
743 /* Re-enable interrupts */ in mpic_resume()
744 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { in mpic_resume()
745 unsigned int virq = irq_find_mapping(mpic->domain, i); in mpic_resume()
754 /* Non per-CPU interrupts */ in mpic_resume()
755 writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
759 /* Per-CPU interrupts */ in mpic_resume()
760 writel(i, mpic->base + MPIC_INT_SET_ENABLE); in mpic_resume()
763 * Re-enable on the current CPU, mpic_reenable_percpu() in mpic_resume()
772 writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_resume()
775 src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK; in mpic_resume()
776 src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK; in mpic_resume()
778 src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK; in mpic_resume()
779 src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK; in mpic_resume()
783 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
785 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
796 static int __init mpic_map_region(struct device_node *np, int index, in mpic_map_region() argument
802 err = of_address_to_resource(np, index, &res); in mpic_map_region()
806 if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) { in mpic_map_region()
807 err = -EBUSY; in mpic_map_region()
813 err = -ENOMEM; in mpic_map_region()
823 pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err)); in mpic_map_region()
836 return -ENOMEM; in mpic_of_init()
840 err = mpic_map_region(node, 0, &mpic->base, &phys_base); in mpic_of_init()
844 err = mpic_map_region(node, 1, &mpic->per_cpu, NULL); in mpic_of_init()
848 nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL)); in mpic_of_init()
851 writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_of_init()
854 * Initialize mpic->parent_irq before calling any other functions, since in mpic_of_init()
855 * it is used to distinguish between IPI and non-IPI platforms. in mpic_of_init()
857 mpic->parent_irq = irq_of_parse_and_map(node, 0); in mpic_of_init()
860 * On non-IPI platforms the driver currently supports only the per-CPU in mpic_of_init()
866 mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic); in mpic_of_init()
867 if (!mpic->domain) { in mpic_of_init()
869 return -ENOMEM; in mpic_of_init()
872 irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED); in mpic_of_init()
885 irq_set_default_domain(mpic->domain); in mpic_of_init()
904 irq_set_chained_handler_and_data(mpic->parent_irq, in mpic_of_init()