Lines Matching +full:msi +full:- +full:map +full:- +full:mask

1 // SPDX-License-Identifier: GPL-2.0-only
8 * Gregory CLEMENT <gregory.clement@free-electrons.com>
9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
31 #include <linux/msi.h>
45 * +---------------+ +---------------+
47 * | per-CPU | | per-CPU |
48 * | mask/unmask | | mask/unmask |
51 * +---------------+ +---------------+
56 * +-------------------+
59 * | mask/unmask |
61 * +-------------------+
67 * The "global interrupt mask/unmask" is modified using the
69 * registers, which are relative to "mpic->base".
71 * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK
73 * "mpic->per_cpu". This base address points to a special address,
76 * The per-CPU mask/unmask can also be adjusted using the global
77 * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to
80 * Due to this model, all interrupts need to be mask/unmasked at two
81 * different levels: at the global level and at the per-CPU level.
85 * - For global interrupts:
87 * At ->map() time, a global interrupt is unmasked at the per-CPU
88 * mask/unmask level. It is therefore unmasked at this level for
89 * the current CPU, running the ->map() code. This allows to have
90 * the interrupt unmasked at this level in non-SMP
91 * configurations. In SMP configurations, the ->set_affinity()
93 * readjusts the per-CPU mask/unmask for the interrupt.
95 * The ->mask() and ->unmask() operations only mask/unmask the
98 * So, a global interrupt is enabled at the per-CPU level as soon
102 * - For per-CPU interrupts
104 * At ->map() time, a per-CPU interrupt is unmasked at the global
105 * mask/unmask level.
107 * The ->mask() and ->unmask() operations mask/unmask the interrupt
108 * at the per-CPU level.
110 * So, a per-CPU interrupt is enabled at the global level as soon
112 * at the per-CPU level.
115 /* Registers relative to mpic->base */
125 /* Registers relative to mpic->per_cpu */
138 /* IPI and MSI interrupt definitions for IPI platforms */
145 /* MSI interrupt definitions for non-IPI platforms */
153 * struct mpic - MPIC private data structure
155 * @per_cpu: per-CPU registers base address
156 * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller
159 * @msi_domain: MSI domain
160 * @msi_inner_domain: MSI inner domain
161 * @msi_used: bitmap of used MSI numbers
163 * @msi_doorbell_addr: physical address of MSI doorbell register
164 * @msi_doorbell_mask: mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or
168 * @doorbell_mask: doorbell mask of MSIs and IPIs, stored on suspend, restored on resume
197 * interrupt controller (e.g. GIC) that takes care of inter-processor in mpic_is_ipi_available()
200 return mpic->parent_irq <= 0; in mpic_is_ipi_available()
210 * For shared global interrupts, mask/unmask global enable bit
211 * For CPU interrupts, mask/unmask the calling CPU's bit
219 writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_irq_mask()
221 writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_irq_mask()
230 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_unmask()
232 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_unmask()
238 .name = "MPIC MSI",
254 msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
255 msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
256 msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start); in mpic_compose_msi_msg()
259 static int mpic_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) in mpic_msi_set_affinity() argument
264 cpu = cpumask_any_and(mask, cpu_online_mask); in mpic_msi_set_affinity()
266 cpu = cpumask_first(mask); in mpic_msi_set_affinity()
269 return -EINVAL; in mpic_msi_set_affinity()
277 .name = "MPIC MSI",
285 struct mpic *mpic = domain->host_data; in mpic_msi_alloc()
288 mutex_lock(&mpic->msi_lock); in mpic_msi_alloc()
289 hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size, in mpic_msi_alloc()
291 mutex_unlock(&mpic->msi_lock); in mpic_msi_alloc()
294 return -ENOSPC; in mpic_msi_alloc()
299 domain->host_data, handle_simple_irq, in mpic_msi_alloc()
309 struct mpic *mpic = domain->host_data; in mpic_msi_free()
311 mutex_lock(&mpic->msi_lock); in mpic_msi_free()
312 bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs)); in mpic_msi_free()
313 mutex_unlock(&mpic->msi_lock); in mpic_msi_free()
325 /* Enable MSI doorbell mask and combined cpu local interrupt */ in mpic_msi_reenable_percpu()
326 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
327 reg |= mpic->msi_doorbell_mask; in mpic_msi_reenable_percpu()
328 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
331 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_reenable_percpu()
337 mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT; in mpic_msi_init()
339 mutex_init(&mpic->msi_lock); in mpic_msi_init()
342 mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START; in mpic_msi_init()
343 mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR; in mpic_msi_init()
344 mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK; in mpic_msi_init()
346 mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START; in mpic_msi_init()
347 mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR; in mpic_msi_init()
348 mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; in mpic_msi_init()
351 mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size, in mpic_msi_init()
353 if (!mpic->msi_inner_domain) in mpic_msi_init()
354 return -ENOMEM; in mpic_msi_init()
356 mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info, in mpic_msi_init()
357 mpic->msi_inner_domain); in mpic_msi_init()
358 if (!mpic->msi_domain) { in mpic_msi_init()
359 irq_domain_remove(mpic->msi_inner_domain); in mpic_msi_init()
360 return -ENOMEM; in mpic_msi_init()
365 /* Unmask low 16 MSI irqs on non-IPI platforms */ in mpic_msi_init()
367 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_init()
389 if (!of_machine_is_compatible("marvell,armada-370-xp")) in mpic_perf_init()
395 writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); in mpic_perf_init()
404 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
405 reg &= ~BIT(d->hwirq); in mpic_ipi_mask()
406 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
414 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
415 reg |= BIT(d->hwirq); in mpic_ipi_unmask()
416 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
419 static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) in mpic_ipi_send_mask() argument
423 u32 map = 0; in mpic_ipi_send_mask() local
425 /* Convert our logical CPU mask into a physical one. */ in mpic_ipi_send_mask()
426 for_each_cpu(cpu, mask) in mpic_ipi_send_mask()
427 map |= BIT(cpu_logical_map(cpu)); in mpic_ipi_send_mask()
436 writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT); in mpic_ipi_send_mask()
443 writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_ipi_ack()
459 irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data, in mpic_ipi_alloc()
480 unsigned int virq = irq_find_mapping(mpic->ipi_domain, i); in mpic_ipi_resume()
486 d = irq_domain_get_irq_data(mpic->ipi_domain, virq); in mpic_ipi_resume()
495 mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR, in mpic_ipi_init()
497 if (WARN_ON(!mpic->ipi_domain)) in mpic_ipi_init()
498 return -ENOMEM; in mpic_ipi_init()
500 irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI); in mpic_ipi_init()
501 base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL); in mpic_ipi_init()
503 return -ENOMEM; in mpic_ipi_init()
516 /* Select a single core from the affinity mask which is online */ in mpic_set_affinity()
519 atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq), in mpic_set_affinity()
529 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) in mpic_smp_cpu_init()
530 writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_smp_cpu_init()
536 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_smp_cpu_init()
539 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_smp_cpu_init()
542 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_smp_cpu_init()
547 /* Re-enable per-CPU interrupts that were enabled before suspend */ in mpic_reenable_percpu()
549 unsigned int virq = irq_linear_revmap(mpic->domain, i); in mpic_reenable_percpu()
567 struct mpic *mpic = irq_get_default_host()->host_data; in mpic_starting_cpu()
582 enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE); in mpic_cascaded_starting_cpu()
604 struct mpic *mpic = domain->host_data; in mpic_irq_map()
608 return -EINVAL; in mpic_irq_map()
614 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_map()
616 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_map()
631 .map = mpic_irq_map,
641 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
642 cause &= mpic->msi_doorbell_mask; in mpic_handle_msi_irq()
643 writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
646 generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start); in mpic_handle_msi_irq()
658 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_ipi_irq()
662 generic_handle_domain_irq(mpic->ipi_domain, i); in mpic_handle_ipi_irq()
678 cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE); in mpic_handle_cascade_irq()
682 irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i)); in mpic_handle_cascade_irq()
685 * Test IRQ (0-1) and FIQ (8-9) mask bits. in mpic_handle_cascade_irq()
695 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_cascade_irq()
703 struct mpic *mpic = irq_get_default_host()->host_data; in mpic_handle_irq()
708 irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK); in mpic_handle_irq()
715 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_irq()
717 /* MSI handling */ in mpic_handle_irq()
731 mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_suspend()
741 /* Re-enable interrupts */ in mpic_resume()
742 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { in mpic_resume()
743 unsigned int virq = irq_linear_revmap(mpic->domain, i); in mpic_resume()
752 /* Non per-CPU interrupts */ in mpic_resume()
753 writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
757 /* Per-CPU interrupts */ in mpic_resume()
758 writel(i, mpic->base + MPIC_INT_SET_ENABLE); in mpic_resume()
761 * Re-enable on the current CPU, mpic_reenable_percpu() in mpic_resume()
770 writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_resume()
773 src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK; in mpic_resume()
774 src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK; in mpic_resume()
776 src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK; in mpic_resume()
777 src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK; in mpic_resume()
781 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
783 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
804 if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) { in mpic_map_region()
805 err = -EBUSY; in mpic_map_region()
811 err = -ENOMEM; in mpic_map_region()
821 pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err)); in mpic_map_region()
834 return -ENOMEM; in mpic_of_init()
838 err = mpic_map_region(node, 0, &mpic->base, &phys_base); in mpic_of_init()
842 err = mpic_map_region(node, 1, &mpic->per_cpu, NULL); in mpic_of_init()
846 nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL)); in mpic_of_init()
849 writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_of_init()
852 * Initialize mpic->parent_irq before calling any other functions, since in mpic_of_init()
853 * it is used to distinguish between IPI and non-IPI platforms. in mpic_of_init()
855 mpic->parent_irq = irq_of_parse_and_map(node, 0); in mpic_of_init()
858 * On non-IPI platforms the driver currently supports only the per-CPU in mpic_of_init()
864 mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic); in mpic_of_init()
865 if (!mpic->domain) { in mpic_of_init()
867 return -ENOMEM; in mpic_of_init()
870 irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED); in mpic_of_init()
878 pr_err("%pOF: Unable to initialize MSI domain\n", node); in mpic_of_init()
883 irq_set_default_host(mpic->domain); in mpic_of_init()
902 irq_set_chained_handler_and_data(mpic->parent_irq, in mpic_of_init()