1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Marvell Armada 370 and Armada XP SoC IRQ handling 4 * 5 * Copyright (C) 2012 Marvell 6 * 7 * Lior Amsalem <alior@marvell.com> 8 * Gregory CLEMENT <gregory.clement@free-electrons.com> 9 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> 10 * Ben Dooks <ben.dooks@codethink.co.uk> 11 */ 12 13 #include <linux/bitfield.h> 14 #include <linux/bits.h> 15 #include <linux/err.h> 16 #include <linux/kernel.h> 17 #include <linux/module.h> 18 #include <linux/init.h> 19 #include <linux/irq.h> 20 #include <linux/interrupt.h> 21 #include <linux/irqchip.h> 22 #include <linux/irqchip/chained_irq.h> 23 #include <linux/irqchip/irq-msi-lib.h> 24 #include <linux/cpu.h> 25 #include <linux/io.h> 26 #include <linux/of_address.h> 27 #include <linux/of_irq.h> 28 #include <linux/of_pci.h> 29 #include <linux/irqdomain.h> 30 #include <linux/slab.h> 31 #include <linux/syscore_ops.h> 32 #include <linux/msi.h> 33 #include <linux/types.h> 34 #include <asm/mach/arch.h> 35 #include <asm/exception.h> 36 #include <asm/smp_plat.h> 37 #include <asm/mach/irq.h> 38 39 /* 40 * Overall diagram of the Armada XP interrupt controller: 41 * 42 * To CPU 0 To CPU 1 43 * 44 * /\ /\ 45 * || || 46 * +---------------+ +---------------+ 47 * | | | | 48 * | per-CPU | | per-CPU | 49 * | mask/unmask | | mask/unmask | 50 * | CPU0 | | CPU1 | 51 * | | | | 52 * +---------------+ +---------------+ 53 * /\ /\ 54 * || || 55 * \\_______________________// 56 * || 57 * +-------------------+ 58 * | | 59 * | Global interrupt | 60 * | mask/unmask | 61 * | | 62 * +-------------------+ 63 * /\ 64 * || 65 * interrupt from 66 * device 67 * 68 * The "global interrupt mask/unmask" is modified using the 69 * MPIC_INT_SET_ENABLE and MPIC_INT_CLEAR_ENABLE 70 * registers, which are relative to "mpic->base". 71 * 72 * The "per-CPU mask/unmask" is modified using the MPIC_INT_SET_MASK 73 * and MPIC_INT_CLEAR_MASK registers, which are relative to 74 * "mpic->per_cpu". This base address points to a special address, 75 * which automatically accesses the registers of the current CPU. 76 * 77 * The per-CPU mask/unmask can also be adjusted using the global 78 * per-interrupt MPIC_INT_SOURCE_CTL register, which we use to 79 * configure interrupt affinity. 80 * 81 * Due to this model, all interrupts need to be mask/unmasked at two 82 * different levels: at the global level and at the per-CPU level. 83 * 84 * This driver takes the following approach to deal with this: 85 * 86 * - For global interrupts: 87 * 88 * At ->map() time, a global interrupt is unmasked at the per-CPU 89 * mask/unmask level. It is therefore unmasked at this level for 90 * the current CPU, running the ->map() code. This allows to have 91 * the interrupt unmasked at this level in non-SMP 92 * configurations. In SMP configurations, the ->set_affinity() 93 * callback is called, which using the MPIC_INT_SOURCE_CTL() 94 * readjusts the per-CPU mask/unmask for the interrupt. 95 * 96 * The ->mask() and ->unmask() operations only mask/unmask the 97 * interrupt at the "global" level. 98 * 99 * So, a global interrupt is enabled at the per-CPU level as soon 100 * as it is mapped. At run time, the masking/unmasking takes place 101 * at the global level. 102 * 103 * - For per-CPU interrupts 104 * 105 * At ->map() time, a per-CPU interrupt is unmasked at the global 106 * mask/unmask level. 107 * 108 * The ->mask() and ->unmask() operations mask/unmask the interrupt 109 * at the per-CPU level. 110 * 111 * So, a per-CPU interrupt is enabled at the global level as soon 112 * as it is mapped. At run time, the masking/unmasking takes place 113 * at the per-CPU level. 114 */ 115 116 /* Registers relative to mpic->base */ 117 #define MPIC_INT_CONTROL 0x00 118 #define MPIC_INT_CONTROL_NUMINT_MASK GENMASK(12, 2) 119 #define MPIC_SW_TRIG_INT 0x04 120 #define MPIC_INT_SET_ENABLE 0x30 121 #define MPIC_INT_CLEAR_ENABLE 0x34 122 #define MPIC_INT_SOURCE_CTL(hwirq) (0x100 + (hwirq) * 4) 123 #define MPIC_INT_SOURCE_CPU_MASK GENMASK(3, 0) 124 #define MPIC_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << (cpuid)) 125 126 /* Registers relative to mpic->per_cpu */ 127 #define MPIC_IN_DRBEL_CAUSE 0x08 128 #define MPIC_IN_DRBEL_MASK 0x0c 129 #define MPIC_PPI_CAUSE 0x10 130 #define MPIC_CPU_INTACK 0x44 131 #define MPIC_CPU_INTACK_IID_MASK GENMASK(9, 0) 132 #define MPIC_INT_SET_MASK 0x48 133 #define MPIC_INT_CLEAR_MASK 0x4C 134 #define MPIC_INT_FABRIC_MASK 0x54 135 #define MPIC_INT_CAUSE_PERF(cpu) BIT(cpu) 136 137 #define MPIC_PER_CPU_IRQS_NR 29 138 139 /* IPI and MSI interrupt definitions for IPI platforms */ 140 #define IPI_DOORBELL_NR 8 141 #define IPI_DOORBELL_MASK GENMASK(7, 0) 142 #define PCI_MSI_DOORBELL_START 16 143 #define PCI_MSI_DOORBELL_NR 16 144 #define PCI_MSI_DOORBELL_MASK GENMASK(31, 16) 145 146 /* MSI interrupt definitions for non-IPI platforms */ 147 #define PCI_MSI_FULL_DOORBELL_START 0 148 #define PCI_MSI_FULL_DOORBELL_NR 32 149 #define PCI_MSI_FULL_DOORBELL_MASK GENMASK(31, 0) 150 #define PCI_MSI_FULL_DOORBELL_SRC0_MASK GENMASK(15, 0) 151 #define PCI_MSI_FULL_DOORBELL_SRC1_MASK GENMASK(31, 16) 152 153 /** 154 * struct mpic - MPIC private data structure 155 * @base: MPIC registers base address 156 * @per_cpu: per-CPU registers base address 157 * @parent_irq: parent IRQ if MPIC is not top-level interrupt controller 158 * @domain: MPIC main interrupt domain 159 * @ipi_domain: IPI domain 160 * @msi_inner_domain: MSI inner domain 161 * @msi_used: bitmap of used MSI numbers 162 * @msi_lock: mutex serializing access to @msi_used 163 * @msi_doorbell_addr: physical address of MSI doorbell register 164 * @msi_doorbell_mask: mask of available doorbell bits for MSIs (either PCI_MSI_DOORBELL_MASK or 165 * PCI_MSI_FULL_DOORBELL_MASK) 166 * @msi_doorbell_start: first set bit in @msi_doorbell_mask 167 * @msi_doorbell_size: number of set bits in @msi_doorbell_mask 168 * @doorbell_mask: doorbell mask of MSIs and IPIs, stored on suspend, restored on resume 169 */ 170 struct mpic { 171 void __iomem *base; 172 void __iomem *per_cpu; 173 int parent_irq; 174 struct irq_domain *domain; 175 #ifdef CONFIG_SMP 176 struct irq_domain *ipi_domain; 177 #endif 178 #ifdef CONFIG_PCI_MSI 179 struct irq_domain *msi_inner_domain; 180 DECLARE_BITMAP(msi_used, PCI_MSI_FULL_DOORBELL_NR); 181 struct mutex msi_lock; 182 phys_addr_t msi_doorbell_addr; 183 u32 msi_doorbell_mask; 184 unsigned int msi_doorbell_start, msi_doorbell_size; 185 #endif 186 u32 doorbell_mask; 187 }; 188 189 static struct mpic *mpic_data __ro_after_init; 190 191 static inline bool mpic_is_ipi_available(struct mpic *mpic) 192 { 193 /* 194 * We distinguish IPI availability in the IC by the IC not having a 195 * parent irq defined. If a parent irq is defined, there is a parent 196 * interrupt controller (e.g. GIC) that takes care of inter-processor 197 * interrupts. 198 */ 199 return mpic->parent_irq <= 0; 200 } 201 202 static inline bool mpic_is_percpu_irq(irq_hw_number_t hwirq) 203 { 204 return hwirq < MPIC_PER_CPU_IRQS_NR; 205 } 206 207 /* 208 * In SMP mode: 209 * For shared global interrupts, mask/unmask global enable bit 210 * For CPU interrupts, mask/unmask the calling CPU's bit 211 */ 212 static void mpic_irq_mask(struct irq_data *d) 213 { 214 struct mpic *mpic = irq_data_get_irq_chip_data(d); 215 irq_hw_number_t hwirq = irqd_to_hwirq(d); 216 217 if (!mpic_is_percpu_irq(hwirq)) 218 writel(hwirq, mpic->base + MPIC_INT_CLEAR_ENABLE); 219 else 220 writel(hwirq, mpic->per_cpu + MPIC_INT_SET_MASK); 221 } 222 223 static void mpic_irq_unmask(struct irq_data *d) 224 { 225 struct mpic *mpic = irq_data_get_irq_chip_data(d); 226 irq_hw_number_t hwirq = irqd_to_hwirq(d); 227 228 if (!mpic_is_percpu_irq(hwirq)) 229 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); 230 else 231 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 232 } 233 234 #ifdef CONFIG_PCI_MSI 235 236 static void mpic_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) 237 { 238 unsigned int cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 239 struct mpic *mpic = irq_data_get_irq_chip_data(d); 240 241 msg->address_lo = lower_32_bits(mpic->msi_doorbell_addr); 242 msg->address_hi = upper_32_bits(mpic->msi_doorbell_addr); 243 msg->data = BIT(cpu + 8) | (d->hwirq + mpic->msi_doorbell_start); 244 } 245 246 static int mpic_msi_set_affinity(struct irq_data *d, const struct cpumask *mask, bool force) 247 { 248 unsigned int cpu; 249 250 if (!force) 251 cpu = cpumask_any_and(mask, cpu_online_mask); 252 else 253 cpu = cpumask_first(mask); 254 255 if (cpu >= nr_cpu_ids) 256 return -EINVAL; 257 258 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 259 260 return IRQ_SET_MASK_OK; 261 } 262 263 static struct irq_chip mpic_msi_bottom_irq_chip = { 264 .name = "MPIC MSI", 265 .irq_compose_msi_msg = mpic_compose_msi_msg, 266 .irq_set_affinity = mpic_msi_set_affinity, 267 }; 268 269 static int mpic_msi_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, 270 void *args) 271 { 272 struct mpic *mpic = domain->host_data; 273 int hwirq; 274 275 mutex_lock(&mpic->msi_lock); 276 hwirq = bitmap_find_free_region(mpic->msi_used, mpic->msi_doorbell_size, 277 order_base_2(nr_irqs)); 278 mutex_unlock(&mpic->msi_lock); 279 280 if (hwirq < 0) 281 return -ENOSPC; 282 283 for (unsigned int i = 0; i < nr_irqs; i++) { 284 irq_domain_set_info(domain, virq + i, hwirq + i, 285 &mpic_msi_bottom_irq_chip, 286 domain->host_data, handle_simple_irq, 287 NULL, NULL); 288 } 289 290 return 0; 291 } 292 293 static void mpic_msi_free(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs) 294 { 295 struct irq_data *d = irq_domain_get_irq_data(domain, virq); 296 struct mpic *mpic = domain->host_data; 297 298 mutex_lock(&mpic->msi_lock); 299 bitmap_release_region(mpic->msi_used, d->hwirq, order_base_2(nr_irqs)); 300 mutex_unlock(&mpic->msi_lock); 301 } 302 303 static const struct irq_domain_ops mpic_msi_domain_ops = { 304 .select = msi_lib_irq_domain_select, 305 .alloc = mpic_msi_alloc, 306 .free = mpic_msi_free, 307 }; 308 309 static void mpic_msi_reenable_percpu(struct mpic *mpic) 310 { 311 u32 reg; 312 313 /* Enable MSI doorbell mask and combined cpu local interrupt */ 314 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); 315 reg |= mpic->msi_doorbell_mask; 316 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); 317 318 /* Unmask local doorbell interrupt */ 319 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 320 } 321 322 #define MPIC_MSI_FLAGS_REQUIRED (MSI_FLAG_USE_DEF_DOM_OPS | \ 323 MSI_FLAG_USE_DEF_CHIP_OPS) 324 #define MPIC_MSI_FLAGS_SUPPORTED (MSI_FLAG_MULTI_PCI_MSI | \ 325 MSI_FLAG_PCI_MSIX | \ 326 MSI_GENERIC_FLAGS_MASK) 327 328 static const struct msi_parent_ops mpic_msi_parent_ops = { 329 .required_flags = MPIC_MSI_FLAGS_REQUIRED, 330 .supported_flags = MPIC_MSI_FLAGS_SUPPORTED, 331 .bus_select_token = DOMAIN_BUS_NEXUS, 332 .bus_select_mask = MATCH_PCI_MSI, 333 .prefix = "MPIC-", 334 .init_dev_msi_info = msi_lib_init_dev_msi_info, 335 }; 336 337 static int __init mpic_msi_init(struct mpic *mpic, struct device_node *node, 338 phys_addr_t main_int_phys_base) 339 { 340 mpic->msi_doorbell_addr = main_int_phys_base + MPIC_SW_TRIG_INT; 341 342 mutex_init(&mpic->msi_lock); 343 344 if (mpic_is_ipi_available(mpic)) { 345 mpic->msi_doorbell_start = PCI_MSI_DOORBELL_START; 346 mpic->msi_doorbell_size = PCI_MSI_DOORBELL_NR; 347 mpic->msi_doorbell_mask = PCI_MSI_DOORBELL_MASK; 348 } else { 349 mpic->msi_doorbell_start = PCI_MSI_FULL_DOORBELL_START; 350 mpic->msi_doorbell_size = PCI_MSI_FULL_DOORBELL_NR; 351 mpic->msi_doorbell_mask = PCI_MSI_FULL_DOORBELL_MASK; 352 } 353 354 struct irq_domain_info info = { 355 .fwnode = of_fwnode_handle(node), 356 .ops = &mpic_msi_domain_ops, 357 .host_data = mpic, 358 .size = mpic->msi_doorbell_size, 359 }; 360 361 mpic->msi_inner_domain = msi_create_parent_irq_domain(&info, &mpic_msi_parent_ops); 362 if (!mpic->msi_inner_domain) 363 return -ENOMEM; 364 365 mpic_msi_reenable_percpu(mpic); 366 367 /* Unmask low 16 MSI irqs on non-IPI platforms */ 368 if (!mpic_is_ipi_available(mpic)) 369 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 370 371 return 0; 372 } 373 #else 374 static __maybe_unused void mpic_msi_reenable_percpu(struct mpic *mpic) {} 375 376 static inline int mpic_msi_init(struct mpic *mpic, struct device_node *node, 377 phys_addr_t main_int_phys_base) 378 { 379 return 0; 380 } 381 #endif 382 383 static void mpic_perf_init(struct mpic *mpic) 384 { 385 u32 cpuid; 386 387 /* 388 * This Performance Counter Overflow interrupt is specific for 389 * Armada 370 and XP. It is not available on Armada 375, 38x and 39x. 390 */ 391 if (!of_machine_is_compatible("marvell,armada-370-xp")) 392 return; 393 394 cpuid = cpu_logical_map(smp_processor_id()); 395 396 /* Enable Performance Counter Overflow interrupts */ 397 writel(MPIC_INT_CAUSE_PERF(cpuid), mpic->per_cpu + MPIC_INT_FABRIC_MASK); 398 } 399 400 #ifdef CONFIG_SMP 401 static void mpic_ipi_mask(struct irq_data *d) 402 { 403 struct mpic *mpic = irq_data_get_irq_chip_data(d); 404 u32 reg; 405 406 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); 407 reg &= ~BIT(d->hwirq); 408 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); 409 } 410 411 static void mpic_ipi_unmask(struct irq_data *d) 412 { 413 struct mpic *mpic = irq_data_get_irq_chip_data(d); 414 u32 reg; 415 416 reg = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); 417 reg |= BIT(d->hwirq); 418 writel(reg, mpic->per_cpu + MPIC_IN_DRBEL_MASK); 419 } 420 421 static void mpic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) 422 { 423 struct mpic *mpic = irq_data_get_irq_chip_data(d); 424 unsigned int cpu; 425 u32 map = 0; 426 427 /* Convert our logical CPU mask into a physical one. */ 428 for_each_cpu(cpu, mask) 429 map |= BIT(cpu_logical_map(cpu)); 430 431 /* 432 * Ensure that stores to Normal memory are visible to the 433 * other CPUs before issuing the IPI. 434 */ 435 dsb(); 436 437 /* submit softirq */ 438 writel((map << 8) | d->hwirq, mpic->base + MPIC_SW_TRIG_INT); 439 } 440 441 static void mpic_ipi_ack(struct irq_data *d) 442 { 443 struct mpic *mpic = irq_data_get_irq_chip_data(d); 444 445 writel(~BIT(d->hwirq), mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); 446 } 447 448 static struct irq_chip mpic_ipi_irqchip = { 449 .name = "IPI", 450 .irq_ack = mpic_ipi_ack, 451 .irq_mask = mpic_ipi_mask, 452 .irq_unmask = mpic_ipi_unmask, 453 .ipi_send_mask = mpic_ipi_send_mask, 454 }; 455 456 static int mpic_ipi_alloc(struct irq_domain *d, unsigned int virq, 457 unsigned int nr_irqs, void *args) 458 { 459 for (unsigned int i = 0; i < nr_irqs; i++) { 460 irq_set_percpu_devid(virq + i); 461 irq_domain_set_info(d, virq + i, i, &mpic_ipi_irqchip, d->host_data, 462 handle_percpu_devid_irq, NULL, NULL); 463 } 464 465 return 0; 466 } 467 468 static void mpic_ipi_free(struct irq_domain *d, unsigned int virq, 469 unsigned int nr_irqs) 470 { 471 /* Not freeing IPIs */ 472 } 473 474 static const struct irq_domain_ops mpic_ipi_domain_ops = { 475 .alloc = mpic_ipi_alloc, 476 .free = mpic_ipi_free, 477 }; 478 479 static void mpic_ipi_resume(struct mpic *mpic) 480 { 481 for (irq_hw_number_t i = 0; i < IPI_DOORBELL_NR; i++) { 482 unsigned int virq = irq_find_mapping(mpic->ipi_domain, i); 483 struct irq_data *d; 484 485 if (!virq || !irq_percpu_is_enabled(virq)) 486 continue; 487 488 d = irq_domain_get_irq_data(mpic->ipi_domain, virq); 489 mpic_ipi_unmask(d); 490 } 491 } 492 493 static int __init mpic_ipi_init(struct mpic *mpic, struct device_node *node) 494 { 495 int base_ipi; 496 497 mpic->ipi_domain = irq_domain_create_linear(of_fwnode_handle(node), IPI_DOORBELL_NR, 498 &mpic_ipi_domain_ops, mpic); 499 if (WARN_ON(!mpic->ipi_domain)) 500 return -ENOMEM; 501 502 irq_domain_update_bus_token(mpic->ipi_domain, DOMAIN_BUS_IPI); 503 base_ipi = irq_domain_alloc_irqs(mpic->ipi_domain, IPI_DOORBELL_NR, NUMA_NO_NODE, NULL); 504 if (WARN_ON(!base_ipi)) 505 return -ENOMEM; 506 507 set_smp_ipi_range(base_ipi, IPI_DOORBELL_NR); 508 509 return 0; 510 } 511 512 static int mpic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, bool force) 513 { 514 struct mpic *mpic = irq_data_get_irq_chip_data(d); 515 irq_hw_number_t hwirq = irqd_to_hwirq(d); 516 unsigned int cpu; 517 518 /* Select a single core from the affinity mask which is online */ 519 cpu = cpumask_any_and(mask_val, cpu_online_mask); 520 521 atomic_io_modify(mpic->base + MPIC_INT_SOURCE_CTL(hwirq), 522 MPIC_INT_SOURCE_CPU_MASK, BIT(cpu_logical_map(cpu))); 523 524 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 525 526 return IRQ_SET_MASK_OK; 527 } 528 529 static void mpic_smp_cpu_init(struct mpic *mpic) 530 { 531 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) 532 writel(i, mpic->per_cpu + MPIC_INT_SET_MASK); 533 534 if (!mpic_is_ipi_available(mpic)) 535 return; 536 537 /* Disable all IPIs */ 538 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_MASK); 539 540 /* Clear pending IPIs */ 541 writel(0, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); 542 543 /* Unmask IPI interrupt */ 544 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 545 } 546 547 static void mpic_reenable_percpu(struct mpic *mpic) 548 { 549 /* Re-enable per-CPU interrupts that were enabled before suspend */ 550 for (irq_hw_number_t i = 0; i < MPIC_PER_CPU_IRQS_NR; i++) { 551 unsigned int virq = irq_find_mapping(mpic->domain, i); 552 struct irq_data *d; 553 554 if (!virq || !irq_percpu_is_enabled(virq)) 555 continue; 556 557 d = irq_get_irq_data(virq); 558 mpic_irq_unmask(d); 559 } 560 561 if (mpic_is_ipi_available(mpic)) 562 mpic_ipi_resume(mpic); 563 564 mpic_msi_reenable_percpu(mpic); 565 } 566 567 static int mpic_starting_cpu(unsigned int cpu) 568 { 569 struct mpic *mpic = irq_get_default_domain()->host_data; 570 571 mpic_perf_init(mpic); 572 mpic_smp_cpu_init(mpic); 573 mpic_reenable_percpu(mpic); 574 575 return 0; 576 } 577 578 static int mpic_cascaded_starting_cpu(unsigned int cpu) 579 { 580 struct mpic *mpic = mpic_data; 581 582 mpic_perf_init(mpic); 583 mpic_reenable_percpu(mpic); 584 enable_percpu_irq(mpic->parent_irq, IRQ_TYPE_NONE); 585 586 return 0; 587 } 588 #else 589 static void mpic_smp_cpu_init(struct mpic *mpic) {} 590 static void mpic_ipi_resume(struct mpic *mpic) {} 591 #endif 592 593 static struct irq_chip mpic_irq_chip = { 594 .name = "MPIC", 595 .irq_mask = mpic_irq_mask, 596 .irq_mask_ack = mpic_irq_mask, 597 .irq_unmask = mpic_irq_unmask, 598 #ifdef CONFIG_SMP 599 .irq_set_affinity = mpic_set_affinity, 600 #endif 601 .flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_MASK_ON_SUSPEND, 602 }; 603 604 static int mpic_irq_map(struct irq_domain *domain, unsigned int virq, irq_hw_number_t hwirq) 605 { 606 struct mpic *mpic = domain->host_data; 607 608 /* IRQs 0 and 1 cannot be mapped, they are handled internally */ 609 if (hwirq <= 1) 610 return -EINVAL; 611 612 irq_set_chip_data(virq, mpic); 613 614 mpic_irq_mask(irq_get_irq_data(virq)); 615 if (!mpic_is_percpu_irq(hwirq)) 616 writel(hwirq, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 617 else 618 writel(hwirq, mpic->base + MPIC_INT_SET_ENABLE); 619 irq_set_status_flags(virq, IRQ_LEVEL); 620 621 if (mpic_is_percpu_irq(hwirq)) { 622 irq_set_percpu_devid(virq); 623 irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_percpu_devid_irq); 624 } else { 625 irq_set_chip_and_handler(virq, &mpic_irq_chip, handle_level_irq); 626 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 627 } 628 irq_set_probe(virq); 629 return 0; 630 } 631 632 static const struct irq_domain_ops mpic_irq_ops = { 633 .map = mpic_irq_map, 634 .xlate = irq_domain_xlate_onecell, 635 }; 636 637 #ifdef CONFIG_PCI_MSI 638 static void mpic_handle_msi_irq(struct mpic *mpic) 639 { 640 unsigned long cause; 641 unsigned int i; 642 643 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); 644 cause &= mpic->msi_doorbell_mask; 645 writel(~cause, mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); 646 647 for_each_set_bit(i, &cause, BITS_PER_LONG) 648 generic_handle_domain_irq(mpic->msi_inner_domain, i - mpic->msi_doorbell_start); 649 } 650 #else 651 static void mpic_handle_msi_irq(struct mpic *mpic) {} 652 #endif 653 654 #ifdef CONFIG_SMP 655 static void mpic_handle_ipi_irq(struct mpic *mpic) 656 { 657 unsigned long cause; 658 irq_hw_number_t i; 659 660 cause = readl_relaxed(mpic->per_cpu + MPIC_IN_DRBEL_CAUSE); 661 cause &= IPI_DOORBELL_MASK; 662 663 for_each_set_bit(i, &cause, IPI_DOORBELL_NR) 664 generic_handle_domain_irq(mpic->ipi_domain, i); 665 } 666 #else 667 static inline void mpic_handle_ipi_irq(struct mpic *mpic) {} 668 #endif 669 670 static void mpic_handle_cascade_irq(struct irq_desc *desc) 671 { 672 struct mpic *mpic = irq_desc_get_handler_data(desc); 673 struct irq_chip *chip = irq_desc_get_chip(desc); 674 unsigned long cause; 675 u32 irqsrc, cpuid; 676 irq_hw_number_t i; 677 678 chained_irq_enter(chip, desc); 679 680 cause = readl_relaxed(mpic->per_cpu + MPIC_PPI_CAUSE); 681 cpuid = cpu_logical_map(smp_processor_id()); 682 683 for_each_set_bit(i, &cause, MPIC_PER_CPU_IRQS_NR) { 684 irqsrc = readl_relaxed(mpic->base + MPIC_INT_SOURCE_CTL(i)); 685 686 /* Check if the interrupt is not masked on current CPU. 687 * Test IRQ (0-1) and FIQ (8-9) mask bits. 688 */ 689 if (!(irqsrc & MPIC_INT_IRQ_FIQ_MASK(cpuid))) 690 continue; 691 692 if (i == 0 || i == 1) { 693 mpic_handle_msi_irq(mpic); 694 continue; 695 } 696 697 generic_handle_domain_irq(mpic->domain, i); 698 } 699 700 chained_irq_exit(chip, desc); 701 } 702 703 static void __exception_irq_entry mpic_handle_irq(struct pt_regs *regs) 704 { 705 struct mpic *mpic = irq_get_default_domain()->host_data; 706 irq_hw_number_t i; 707 u32 irqstat; 708 709 do { 710 irqstat = readl_relaxed(mpic->per_cpu + MPIC_CPU_INTACK); 711 i = FIELD_GET(MPIC_CPU_INTACK_IID_MASK, irqstat); 712 713 if (i > 1022) 714 break; 715 716 if (i > 1) 717 generic_handle_domain_irq(mpic->domain, i); 718 719 /* MSI handling */ 720 if (i == 1) 721 mpic_handle_msi_irq(mpic); 722 723 /* IPI Handling */ 724 if (i == 0) 725 mpic_handle_ipi_irq(mpic); 726 } while (1); 727 } 728 729 static int mpic_suspend(void) 730 { 731 struct mpic *mpic = mpic_data; 732 733 mpic->doorbell_mask = readl(mpic->per_cpu + MPIC_IN_DRBEL_MASK); 734 735 return 0; 736 } 737 738 static void mpic_resume(void) 739 { 740 struct mpic *mpic = mpic_data; 741 bool src0, src1; 742 743 /* Re-enable interrupts */ 744 for (irq_hw_number_t i = 0; i < mpic->domain->hwirq_max; i++) { 745 unsigned int virq = irq_find_mapping(mpic->domain, i); 746 struct irq_data *d; 747 748 if (!virq) 749 continue; 750 751 d = irq_get_irq_data(virq); 752 753 if (!mpic_is_percpu_irq(i)) { 754 /* Non per-CPU interrupts */ 755 writel(i, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 756 if (!irqd_irq_disabled(d)) 757 mpic_irq_unmask(d); 758 } else { 759 /* Per-CPU interrupts */ 760 writel(i, mpic->base + MPIC_INT_SET_ENABLE); 761 762 /* 763 * Re-enable on the current CPU, mpic_reenable_percpu() 764 * will take care of secondary CPUs when they come up. 765 */ 766 if (irq_percpu_is_enabled(virq)) 767 mpic_irq_unmask(d); 768 } 769 } 770 771 /* Reconfigure doorbells for IPIs and MSIs */ 772 writel(mpic->doorbell_mask, mpic->per_cpu + MPIC_IN_DRBEL_MASK); 773 774 if (mpic_is_ipi_available(mpic)) { 775 src0 = mpic->doorbell_mask & IPI_DOORBELL_MASK; 776 src1 = mpic->doorbell_mask & PCI_MSI_DOORBELL_MASK; 777 } else { 778 src0 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC0_MASK; 779 src1 = mpic->doorbell_mask & PCI_MSI_FULL_DOORBELL_SRC1_MASK; 780 } 781 782 if (src0) 783 writel(0, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 784 if (src1) 785 writel(1, mpic->per_cpu + MPIC_INT_CLEAR_MASK); 786 787 if (mpic_is_ipi_available(mpic)) 788 mpic_ipi_resume(mpic); 789 } 790 791 static struct syscore_ops mpic_syscore_ops = { 792 .suspend = mpic_suspend, 793 .resume = mpic_resume, 794 }; 795 796 static int __init mpic_map_region(struct device_node *np, int index, 797 void __iomem **base, phys_addr_t *phys_base) 798 { 799 struct resource res; 800 int err; 801 802 err = of_address_to_resource(np, index, &res); 803 if (WARN_ON(err)) 804 goto fail; 805 806 if (WARN_ON(!request_mem_region(res.start, resource_size(&res), np->full_name))) { 807 err = -EBUSY; 808 goto fail; 809 } 810 811 *base = ioremap(res.start, resource_size(&res)); 812 if (WARN_ON(!*base)) { 813 err = -ENOMEM; 814 goto fail; 815 } 816 817 if (phys_base) 818 *phys_base = res.start; 819 820 return 0; 821 822 fail: 823 pr_err("%pOF: Unable to map resource %d: %pE\n", np, index, ERR_PTR(err)); 824 return err; 825 } 826 827 static int __init mpic_of_init(struct device_node *node, struct device_node *parent) 828 { 829 phys_addr_t phys_base; 830 unsigned int nr_irqs; 831 struct mpic *mpic; 832 int err; 833 834 mpic = kzalloc(sizeof(*mpic), GFP_KERNEL); 835 if (WARN_ON(!mpic)) 836 return -ENOMEM; 837 838 mpic_data = mpic; 839 840 err = mpic_map_region(node, 0, &mpic->base, &phys_base); 841 if (err) 842 return err; 843 844 err = mpic_map_region(node, 1, &mpic->per_cpu, NULL); 845 if (err) 846 return err; 847 848 nr_irqs = FIELD_GET(MPIC_INT_CONTROL_NUMINT_MASK, readl(mpic->base + MPIC_INT_CONTROL)); 849 850 for (irq_hw_number_t i = 0; i < nr_irqs; i++) 851 writel(i, mpic->base + MPIC_INT_CLEAR_ENABLE); 852 853 /* 854 * Initialize mpic->parent_irq before calling any other functions, since 855 * it is used to distinguish between IPI and non-IPI platforms. 856 */ 857 mpic->parent_irq = irq_of_parse_and_map(node, 0); 858 859 /* 860 * On non-IPI platforms the driver currently supports only the per-CPU 861 * interrupts (the first 29 interrupts). See mpic_handle_cascade_irq(). 862 */ 863 if (!mpic_is_ipi_available(mpic)) 864 nr_irqs = MPIC_PER_CPU_IRQS_NR; 865 866 mpic->domain = irq_domain_create_linear(of_fwnode_handle(node), nr_irqs, &mpic_irq_ops, mpic); 867 if (!mpic->domain) { 868 pr_err("%pOF: Unable to add IRQ domain\n", node); 869 return -ENOMEM; 870 } 871 872 irq_domain_update_bus_token(mpic->domain, DOMAIN_BUS_WIRED); 873 874 /* Setup for the boot CPU */ 875 mpic_perf_init(mpic); 876 mpic_smp_cpu_init(mpic); 877 878 err = mpic_msi_init(mpic, node, phys_base); 879 if (err) { 880 pr_err("%pOF: Unable to initialize MSI domain\n", node); 881 return err; 882 } 883 884 if (mpic_is_ipi_available(mpic)) { 885 irq_set_default_domain(mpic->domain); 886 set_handle_irq(mpic_handle_irq); 887 #ifdef CONFIG_SMP 888 err = mpic_ipi_init(mpic, node); 889 if (err) { 890 pr_err("%pOF: Unable to initialize IPI domain\n", node); 891 return err; 892 } 893 894 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, 895 "irqchip/armada/ipi:starting", 896 mpic_starting_cpu, NULL); 897 #endif 898 } else { 899 #ifdef CONFIG_SMP 900 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_ARMADA_XP_STARTING, 901 "irqchip/armada/cascade:starting", 902 mpic_cascaded_starting_cpu, NULL); 903 #endif 904 irq_set_chained_handler_and_data(mpic->parent_irq, 905 mpic_handle_cascade_irq, mpic); 906 } 907 908 register_syscore_ops(&mpic_syscore_ops); 909 910 return 0; 911 } 912 913 IRQCHIP_DECLARE(marvell_mpic, "marvell,mpic", mpic_of_init); 914