Lines Matching full:mpic

70  * registers, which are relative to "mpic->base".
74 * "mpic->per_cpu". This base address points to a special address,
116 /* Registers relative to mpic->base */
126 /* Registers relative to mpic->per_cpu */
154 * struct mpic - MPIC private data structure
155 * @base: MPIC registers base address
157 * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller
158 * @domain: MPIC main interrupt domain
170 struct mpic { struct
189 static struct mpic *mpic_data __ro_after_init; argument
191 static inline bool mpic_is_ipi_available(struct mpic *mpic) in mpic_is_ipi_available() argument
199 return mpic->parent_irq <= 0; in mpic_is_ipi_available()
214 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_irq_mask() local
218 writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_irq_mask()
220 writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_irq_mask()
225 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_irq_unmask() local
229 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_unmask()
231 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_unmask()
239 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_compose_msi_msg() local
241 msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
242 msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
243 msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start); in mpic_compose_msi_msg()
264 .name = "MPIC MSI",
272 struct mpic *mpic = domain->host_data; in mpic_msi_alloc() local
275 mutex_lock(&mpic->msi_lock); in mpic_msi_alloc()
276 hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size, in mpic_msi_alloc()
278 mutex_unlock(&mpic->msi_lock); in mpic_msi_alloc()
296 struct mpic *mpic = domain->host_data; in mpic_msi_free() local
298 mutex_lock(&mpic->msi_lock); in mpic_msi_free()
299 bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs)); in mpic_msi_free()
300 mutex_unlock(&mpic->msi_lock); in mpic_msi_free()
309 static void mpic_msi_reenable_percpu(struct mpic *mpic) in mpic_msi_reenable_percpu() argument
314 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
315 reg |= mpic->msi_doorbell_mask; in mpic_msi_reenable_percpu()
316 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
319 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_reenable_percpu()
333 .prefix = "MPIC-",
337 static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node, in mpic_msi_init() argument
340 mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT; in mpic_msi_init()
342 mutex_init(&mpic->msi_lock); in mpic_msi_init()
344 if (mpic_is_ipi_available(mpic)) { in mpic_msi_init()
345 mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START; in mpic_msi_init()
346 mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR; in mpic_msi_init()
347 mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK; in mpic_msi_init()
349 mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START; in mpic_msi_init()
350 mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR; in mpic_msi_init()
351 mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; in mpic_msi_init()
357 .host_data = mpic, in mpic_msi_init()
358 .size = mpic->msi_doorbell_size, in mpic_msi_init()
361 mpic->msi_inner_domain = msi_create_parent_irq_domain(&info, &mpic_msi_parent_ops); in mpic_msi_init()
362 if (!mpic->msi_inner_domain) in mpic_msi_init()
365 mpic_msi_reenable_percpu(mpic); in mpic_msi_init()
368 if (!mpic_is_ipi_available(mpic)) in mpic_msi_init()
369 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_init()
374 static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {} in mpic_msi_reenable_percpu() argument
376 static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node, in mpic_msi_init() argument
383 static void mpic_perf_init(struct mpic *mpic) in mpic_perf_init() argument
397 writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); in mpic_perf_init()
403 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_mask() local
406 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
408 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
413 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_unmask() local
416 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
418 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
423 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_send_mask() local
438 writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT); in mpic_ipi_send_mask()
443 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_ack() local
445 writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_ipi_ack()
479 static void mpic_ipi_resume(struct mpic *mpic) in mpic_ipi_resume() argument
482 unsigned int virq = irq_find_mapping(mpic->ipi_domain, i); in mpic_ipi_resume()
488 d = irq_domain_get_irq_data(mpic->ipi_domain, virq); in mpic_ipi_resume()
493 static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node) in mpic_ipi_init() argument
497 mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR, in mpic_ipi_init()
498 &mpic_ipi_domain_ops, mpic); in mpic_ipi_init()
499 if (WARN_ON(!mpic->ipi_domain)) in mpic_ipi_init()
502 irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI); in mpic_ipi_init()
503 base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL); in mpic_ipi_init()
514 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_set_affinity() local
521 atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq), in mpic_set_affinity()
529 static void mpic_smp_cpu_init(struct mpic *mpic) in mpic_smp_cpu_init() argument
531 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) in mpic_smp_cpu_init()
532 writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_smp_cpu_init()
534 if (!mpic_is_ipi_available(mpic)) in mpic_smp_cpu_init()
538 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_smp_cpu_init()
541 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_smp_cpu_init()
544 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_smp_cpu_init()
547 static void mpic_reenable_percpu(struct mpic *mpic) in mpic_reenable_percpu() argument
551 unsigned int virq = irq_find_mapping(mpic->domain, i); in mpic_reenable_percpu()
561 if (mpic_is_ipi_available(mpic)) in mpic_reenable_percpu()
562 mpic_ipi_resume(mpic); in mpic_reenable_percpu()
564 mpic_msi_reenable_percpu(mpic); in mpic_reenable_percpu()
569 struct mpic *mpic = irq_get_default_domain()->host_data; in mpic_starting_cpu() local
571 mpic_perf_init(mpic); in mpic_starting_cpu()
572 mpic_smp_cpu_init(mpic); in mpic_starting_cpu()
573 mpic_reenable_percpu(mpic); in mpic_starting_cpu()
580 struct mpic *mpic = mpic_data; in mpic_cascaded_starting_cpu() local
582 mpic_perf_init(mpic); in mpic_cascaded_starting_cpu()
583 mpic_reenable_percpu(mpic); in mpic_cascaded_starting_cpu()
584 enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE); in mpic_cascaded_starting_cpu()
589 static void mpic_smp_cpu_init(struct mpic *mpic) {} in mpic_smp_cpu_init() argument
590 static void mpic_ipi_resume(struct mpic *mpic) {} in mpic_ipi_resume() argument
594 .name = "MPIC",
606 struct mpic *mpic = domain->host_data; in mpic_irq_map() local
612 irq_set_chip_data(virq, mpic); in mpic_irq_map()
616 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_map()
618 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_map()
638 static void mpic_handle_msi_irq(struct mpic *mpic) in mpic_handle_msi_irq() argument
643 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
644 cause &= mpic->msi_doorbell_mask; in mpic_handle_msi_irq()
645 writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
648 generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start); in mpic_handle_msi_irq()
651 static void mpic_handle_msi_irq(struct mpic *mpic) {} in mpic_handle_msi_irq() argument
655 static void mpic_handle_ipi_irq(struct mpic *mpic) in mpic_handle_ipi_irq() argument
660 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_ipi_irq()
664 generic_handle_domain_irq(mpic->ipi_domain, i); in mpic_handle_ipi_irq()
667 static inline void mpic_handle_ipi_irq(struct mpic *mpic) {} in mpic_handle_ipi_irq() argument
672 struct mpic *mpic = irq_desc_get_handler_data(desc); in mpic_handle_cascade_irq() local
680 cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE); in mpic_handle_cascade_irq()
684 irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i)); in mpic_handle_cascade_irq()
693 mpic_handle_msi_irq(mpic); in mpic_handle_cascade_irq()
697 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_cascade_irq()
705 struct mpic *mpic = irq_get_default_domain()->host_data; in mpic_handle_irq() local
710 irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK); in mpic_handle_irq()
717 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_irq()
721 mpic_handle_msi_irq(mpic); in mpic_handle_irq()
725 mpic_handle_ipi_irq(mpic); in mpic_handle_irq()
731 struct mpic *mpic = mpic_data; in mpic_suspend() local
733 mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_suspend()
740 struct mpic *mpic = mpic_data; in mpic_resume() local
744 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { in mpic_resume()
745 unsigned int virq = irq_find_mapping(mpic->domain, i); in mpic_resume()
755 writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
760 writel(i, mpic->base + MPIC_INT_SET_ENABLE); in mpic_resume()
772 writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_resume()
774 if (mpic_is_ipi_available(mpic)) { in mpic_resume()
775 src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK; in mpic_resume()
776 src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK; in mpic_resume()
778 src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK; in mpic_resume()
779 src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK; in mpic_resume()
783 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
785 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
787 if (mpic_is_ipi_available(mpic)) in mpic_resume()
788 mpic_ipi_resume(mpic); in mpic_resume()
831 struct mpic *mpic; in mpic_of_init() local
834 mpic = kzalloc(sizeof(*mpic), GFP_KERNEL); in mpic_of_init()
835 if (WARN_ON(!mpic)) in mpic_of_init()
838 mpic_data = mpic; in mpic_of_init()
840 err = mpic_map_region(node, 0, &mpic->base, &phys_base); in mpic_of_init()
844 err = mpic_map_region(node, 1, &mpic->per_cpu, NULL); in mpic_of_init()
848 nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL)); in mpic_of_init()
851 writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_of_init()
854 * Initialize mpic->parent_irq before calling any other functions, since in mpic_of_init()
857 mpic->parent_irq = irq_of_parse_and_map(node, 0); in mpic_of_init()
863 if (!mpic_is_ipi_available(mpic)) in mpic_of_init()
866 mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic); in mpic_of_init()
867 if (!mpic->domain) { in mpic_of_init()
872 irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED); in mpic_of_init()
875 mpic_perf_init(mpic); in mpic_of_init()
876 mpic_smp_cpu_init(mpic); in mpic_of_init()
878 err = mpic_msi_init(mpic, node, phys_base); in mpic_of_init()
884 if (mpic_is_ipi_available(mpic)) { in mpic_of_init()
885 irq_set_default_domain(mpic->domain); in mpic_of_init()
888 err = mpic_ipi_init(mpic, node); in mpic_of_init()
904 irq_set_chained_handler_and_data(mpic->parent_irq, in mpic_of_init()
905 mpic_handle_cascade_irq, mpic); in mpic_of_init()
913 IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init);