Lines Matching full:mpic
69 * registers, which are relative to "mpic->base".
73 * "mpic->per_cpu". This base address points to a special address,
115 /* Registers relative to mpic->base */
125 /* Registers relative to mpic->per_cpu */
153 * struct mpic - MPIC private data structure
154 * @base: MPIC registers base address
156 * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller
157 * @domain: MPIC main interrupt domain
170 struct mpic { struct
190 static struct mpic *mpic_data __ro_after_init; argument
192 static inline bool mpic_is_ipi_available(struct mpic *mpic) in mpic_is_ipi_available() argument
200 return mpic->parent_irq <= 0; in mpic_is_ipi_available()
215 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_irq_mask() local
219 writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_irq_mask()
221 writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_irq_mask()
226 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_irq_unmask() local
230 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_unmask()
232 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_unmask()
238 .name = "MPIC MSI",
252 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_compose_msi_msg() local
254 msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
255 msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr); in mpic_compose_msi_msg()
256 msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start); in mpic_compose_msi_msg()
277 .name = "MPIC MSI",
285 struct mpic *mpic = domain->host_data; in mpic_msi_alloc() local
288 mutex_lock(&mpic->msi_lock); in mpic_msi_alloc()
289 hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size, in mpic_msi_alloc()
291 mutex_unlock(&mpic->msi_lock); in mpic_msi_alloc()
309 struct mpic *mpic = domain->host_data; in mpic_msi_free() local
311 mutex_lock(&mpic->msi_lock); in mpic_msi_free()
312 bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs)); in mpic_msi_free()
313 mutex_unlock(&mpic->msi_lock); in mpic_msi_free()
321 static void mpic_msi_reenable_percpu(struct mpic *mpic) in mpic_msi_reenable_percpu() argument
326 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
327 reg |= mpic->msi_doorbell_mask; in mpic_msi_reenable_percpu()
328 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_msi_reenable_percpu()
331 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_reenable_percpu()
334 static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node, in mpic_msi_init() argument
337 mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT; in mpic_msi_init()
339 mutex_init(&mpic->msi_lock); in mpic_msi_init()
341 if (mpic_is_ipi_available(mpic)) { in mpic_msi_init()
342 mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START; in mpic_msi_init()
343 mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR; in mpic_msi_init()
344 mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK; in mpic_msi_init()
346 mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START; in mpic_msi_init()
347 mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR; in mpic_msi_init()
348 mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; in mpic_msi_init()
351 mpic->msi_inner_domain = irq_domain_add_linear(NULL, mpic->msi_doorbell_size, in mpic_msi_init()
352 &mpic_msi_domain_ops, mpic); in mpic_msi_init()
353 if (!mpic->msi_inner_domain) in mpic_msi_init()
356 mpic->msi_domain = pci_msi_create_irq_domain(of_node_to_fwnode(node), &mpic_msi_domain_info, in mpic_msi_init()
357 mpic->msi_inner_domain); in mpic_msi_init()
358 if (!mpic->msi_domain) { in mpic_msi_init()
359 irq_domain_remove(mpic->msi_inner_domain); in mpic_msi_init()
363 mpic_msi_reenable_percpu(mpic); in mpic_msi_init()
366 if (!mpic_is_ipi_available(mpic)) in mpic_msi_init()
367 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_msi_init()
372 static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {} in mpic_msi_reenable_percpu() argument
374 static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node, in mpic_msi_init() argument
381 static void mpic_perf_init(struct mpic *mpic) in mpic_perf_init() argument
395 writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); in mpic_perf_init()
401 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_mask() local
404 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
406 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_mask()
411 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_unmask() local
414 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
416 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_ipi_unmask()
421 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_send_mask() local
436 writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT); in mpic_ipi_send_mask()
441 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_ipi_ack() local
443 writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_ipi_ack()
477 static void mpic_ipi_resume(struct mpic *mpic) in mpic_ipi_resume() argument
480 unsigned int virq = irq_find_mapping(mpic->ipi_domain, i); in mpic_ipi_resume()
486 d = irq_domain_get_irq_data(mpic->ipi_domain, virq); in mpic_ipi_resume()
491 static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node) in mpic_ipi_init() argument
495 mpic->ipi_domain = irq_domain_create_linear(of_node_to_fwnode(node), IPI_DOORBELL_NR, in mpic_ipi_init()
496 &mpic_ipi_domain_ops, mpic); in mpic_ipi_init()
497 if (WARN_ON(!mpic->ipi_domain)) in mpic_ipi_init()
500 irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI); in mpic_ipi_init()
501 base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL); in mpic_ipi_init()
512 struct mpic *mpic = irq_data_get_irq_chip_data(d); in mpic_set_affinity() local
519 atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq), in mpic_set_affinity()
527 static void mpic_smp_cpu_init(struct mpic *mpic) in mpic_smp_cpu_init() argument
529 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) in mpic_smp_cpu_init()
530 writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); in mpic_smp_cpu_init()
532 if (!mpic_is_ipi_available(mpic)) in mpic_smp_cpu_init()
536 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_smp_cpu_init()
539 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_smp_cpu_init()
542 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_smp_cpu_init()
545 static void mpic_reenable_percpu(struct mpic *mpic) in mpic_reenable_percpu() argument
549 unsigned int virq = irq_linear_revmap(mpic->domain, i); in mpic_reenable_percpu()
559 if (mpic_is_ipi_available(mpic)) in mpic_reenable_percpu()
560 mpic_ipi_resume(mpic); in mpic_reenable_percpu()
562 mpic_msi_reenable_percpu(mpic); in mpic_reenable_percpu()
567 struct mpic *mpic = irq_get_default_host()->host_data; in mpic_starting_cpu() local
569 mpic_perf_init(mpic); in mpic_starting_cpu()
570 mpic_smp_cpu_init(mpic); in mpic_starting_cpu()
571 mpic_reenable_percpu(mpic); in mpic_starting_cpu()
578 struct mpic *mpic = mpic_data; in mpic_cascaded_starting_cpu() local
580 mpic_perf_init(mpic); in mpic_cascaded_starting_cpu()
581 mpic_reenable_percpu(mpic); in mpic_cascaded_starting_cpu()
582 enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE); in mpic_cascaded_starting_cpu()
587 static void mpic_smp_cpu_init(struct mpic *mpic) {} in mpic_smp_cpu_init() argument
588 static void mpic_ipi_resume(struct mpic *mpic) {} in mpic_ipi_resume() argument
592 .name = "MPIC",
604 struct mpic *mpic = domain->host_data; in mpic_irq_map() local
610 irq_set_chip_data(virq, mpic); in mpic_irq_map()
614 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_irq_map()
616 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); in mpic_irq_map()
636 static void mpic_handle_msi_irq(struct mpic *mpic) in mpic_handle_msi_irq() argument
641 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
642 cause &= mpic->msi_doorbell_mask; in mpic_handle_msi_irq()
643 writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_msi_irq()
646 generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start); in mpic_handle_msi_irq()
649 static void mpic_handle_msi_irq(struct mpic *mpic) {} in mpic_handle_msi_irq() argument
653 static void mpic_handle_ipi_irq(struct mpic *mpic) in mpic_handle_ipi_irq() argument
658 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); in mpic_handle_ipi_irq()
662 generic_handle_domain_irq(mpic->ipi_domain, i); in mpic_handle_ipi_irq()
665 static inline void mpic_handle_ipi_irq(struct mpic *mpic) {} in mpic_handle_ipi_irq() argument
670 struct mpic *mpic = irq_desc_get_handler_data(desc); in mpic_handle_cascade_irq() local
678 cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE); in mpic_handle_cascade_irq()
682 irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i)); in mpic_handle_cascade_irq()
691 mpic_handle_msi_irq(mpic); in mpic_handle_cascade_irq()
695 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_cascade_irq()
703 struct mpic *mpic = irq_get_default_host()->host_data; in mpic_handle_irq() local
708 irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK); in mpic_handle_irq()
715 generic_handle_domain_irq(mpic->domain, i); in mpic_handle_irq()
719 mpic_handle_msi_irq(mpic); in mpic_handle_irq()
723 mpic_handle_ipi_irq(mpic); in mpic_handle_irq()
729 struct mpic *mpic = mpic_data; in mpic_suspend() local
731 mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_suspend()
738 struct mpic *mpic = mpic_data; in mpic_resume() local
742 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { in mpic_resume()
743 unsigned int virq = irq_linear_revmap(mpic->domain, i); in mpic_resume()
753 writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
758 writel(i, mpic->base + MPIC_INT_SET_ENABLE); in mpic_resume()
770 writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK); in mpic_resume()
772 if (mpic_is_ipi_available(mpic)) { in mpic_resume()
773 src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK; in mpic_resume()
774 src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK; in mpic_resume()
776 src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK; in mpic_resume()
777 src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK; in mpic_resume()
781 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
783 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); in mpic_resume()
785 if (mpic_is_ipi_available(mpic)) in mpic_resume()
786 mpic_ipi_resume(mpic); in mpic_resume()
829 struct mpic *mpic; in mpic_of_init() local
832 mpic = kzalloc(sizeof(*mpic), GFP_KERNEL); in mpic_of_init()
833 if (WARN_ON(!mpic)) in mpic_of_init()
836 mpic_data = mpic; in mpic_of_init()
838 err = mpic_map_region(node, 0, &mpic->base, &phys_base); in mpic_of_init()
842 err = mpic_map_region(node, 1, &mpic->per_cpu, NULL); in mpic_of_init()
846 nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL)); in mpic_of_init()
849 writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE); in mpic_of_init()
852 * Initialize mpic->parent_irq before calling any other functions, since in mpic_of_init()
855 mpic->parent_irq = irq_of_parse_and_map(node, 0); in mpic_of_init()
861 if (!mpic_is_ipi_available(mpic)) in mpic_of_init()
864 mpic->domain = irq_domain_add_linear(node, nr_irqs, &mpic_irq_ops, mpic); in mpic_of_init()
865 if (!mpic->domain) { in mpic_of_init()
870 irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED); in mpic_of_init()
873 mpic_perf_init(mpic); in mpic_of_init()
874 mpic_smp_cpu_init(mpic); in mpic_of_init()
876 err = mpic_msi_init(mpic, node, phys_base); in mpic_of_init()
882 if (mpic_is_ipi_available(mpic)) { in mpic_of_init()
883 irq_set_default_host(mpic->domain); in mpic_of_init()
886 err = mpic_ipi_init(mpic, node); in mpic_of_init()
902 irq_set_chained_handler_and_data(mpic->parent_irq, in mpic_of_init()
903 mpic_handle_cascade_irq, mpic); in mpic_of_init()
911 IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init);