1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 10 #define pr_fmt(fmt) "irq-mips-gic: " fmt 11 12 #include <linux/bitfield.h> 13 #include <linux/bitmap.h> 14 #include <linux/clocksource.h> 15 #include <linux/cpuhotplug.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/irq.h> 19 #include <linux/irqchip.h> 20 #include <linux/irqdomain.h> 21 #include <linux/of_address.h> 22 #include <linux/percpu.h> 23 #include <linux/sched.h> 24 #include <linux/smp.h> 25 26 #include <asm/mips-cps.h> 27 #include <asm/setup.h> 28 #include <asm/traps.h> 29 30 #include <dt-bindings/interrupt-controller/mips-gic.h> 31 32 #define GIC_MAX_INTRS 256 33 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) 34 35 /* Add 2 to convert GIC CPU pin to core interrupt */ 36 #define GIC_CPU_PIN_OFFSET 2 37 38 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 39 #define GIC_PIN_TO_VEC_OFFSET 1 40 41 /* Convert between local/shared IRQ number and GIC HW IRQ number. */ 42 #define GIC_LOCAL_HWIRQ_BASE 0 43 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) 44 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) 45 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS 46 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) 47 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) 48 49 void __iomem *mips_gic_base; 50 51 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); 52 53 static DEFINE_RAW_SPINLOCK(gic_lock); 54 static struct irq_domain *gic_irq_domain; 55 static int gic_shared_intrs; 56 static unsigned int gic_cpu_pin; 57 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 58 59 #ifdef CONFIG_GENERIC_IRQ_IPI 60 static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 61 static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); 62 #endif /* CONFIG_GENERIC_IRQ_IPI */ 63 64 static struct gic_all_vpes_chip_data { 65 u32 map; 66 bool mask; 67 } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS]; 68 69 static int __gic_with_next_online_cpu(int prev) 70 { 71 unsigned int cpu; 72 73 /* Discover the next online CPU */ 74 cpu = cpumask_next(prev, cpu_online_mask); 75 76 /* If there isn't one, we're done */ 77 if (cpu >= nr_cpu_ids) 78 return cpu; 79 80 /* 81 * Move the access lock to the next CPU's GIC local register block. 82 * 83 * Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can 84 * clobber the written value. 85 */ 86 write_gic_vl_other(mips_cm_vp_id(cpu)); 87 88 return cpu; 89 } 90 91 static inline void gic_unlock_cluster(void) 92 { 93 if (mips_cps_multicluster_cpus()) 94 mips_cm_unlock_other(); 95 } 96 97 /** 98 * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers 99 * @cpu: An integer variable to hold the current CPU number 100 * @gic_lock: A pointer to raw spin lock used as a guard 101 * 102 * Iterate over online CPUs & configure the other/redirect register region to 103 * access each CPUs GIC local register block, which can be accessed from the 104 * loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or 105 * their derivatives. 106 */ 107 #define for_each_online_cpu_gic(cpu, gic_lock) \ 108 guard(raw_spinlock_irqsave)(gic_lock); \ 109 for ((cpu) = __gic_with_next_online_cpu(-1); \ 110 (cpu) < nr_cpu_ids; \ 111 gic_unlock_cluster(), \ 112 (cpu) = __gic_with_next_online_cpu(cpu)) 113 114 /** 115 * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster 116 * @d: struct irq_data corresponding to the interrupt we're interested in 117 * 118 * Locks redirect register block access to the global register block of the GIC 119 * within the remote cluster that the IRQ corresponding to @d is affine to, 120 * returning true when this redirect block setup & locking has been performed. 121 * 122 * If @d is affine to the local cluster then no locking is performed and this 123 * function will return false, indicating to the caller that it should access 124 * the local clusters registers without the overhead of indirection through the 125 * redirect block. 126 * 127 * In summary, if this function returns true then the caller should access GIC 128 * registers using redirect register block accessors & then call 129 * mips_cm_unlock_other() when done. If this function returns false then the 130 * caller should trivially access GIC registers in the local cluster. 131 * 132 * Returns true if locking performed, else false. 133 */ 134 static bool gic_irq_lock_cluster(struct irq_data *d) 135 { 136 unsigned int cpu, cl; 137 138 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 139 BUG_ON(cpu >= NR_CPUS); 140 141 cl = cpu_cluster(&cpu_data[cpu]); 142 if (cl == cpu_cluster(¤t_cpu_data)) 143 return false; 144 if (mips_cps_numcores(cl) == 0) 145 return false; 146 mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); 147 return true; 148 } 149 150 static void gic_clear_pcpu_masks(unsigned int intr) 151 { 152 unsigned int i; 153 154 /* Clear the interrupt's bit in all pcpu_masks */ 155 for_each_possible_cpu(i) 156 clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); 157 } 158 159 static bool gic_local_irq_is_routable(int intr) 160 { 161 u32 vpe_ctl; 162 163 /* All local interrupts are routable in EIC mode. */ 164 if (cpu_has_veic) 165 return true; 166 167 vpe_ctl = read_gic_vl_ctl(); 168 switch (intr) { 169 case GIC_LOCAL_INT_TIMER: 170 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; 171 case GIC_LOCAL_INT_PERFCTR: 172 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; 173 case GIC_LOCAL_INT_FDC: 174 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; 175 case GIC_LOCAL_INT_SWINT0: 176 case GIC_LOCAL_INT_SWINT1: 177 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; 178 default: 179 return true; 180 } 181 } 182 183 static void gic_bind_eic_interrupt(int irq, int set) 184 { 185 /* Convert irq vector # to hw int # */ 186 irq -= GIC_PIN_TO_VEC_OFFSET; 187 188 /* Set irq to use shadow set */ 189 write_gic_vl_eic_shadow_set(irq, set); 190 } 191 192 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) 193 { 194 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 195 196 if (gic_irq_lock_cluster(d)) { 197 write_gic_redir_wedge(GIC_WEDGE_RW | hwirq); 198 mips_cm_unlock_other(); 199 } else { 200 write_gic_wedge(GIC_WEDGE_RW | hwirq); 201 } 202 } 203 204 int gic_get_c0_compare_int(void) 205 { 206 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 207 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 208 return irq_create_mapping(gic_irq_domain, 209 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 210 } 211 212 int gic_get_c0_perfcount_int(void) 213 { 214 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 215 /* Is the performance counter shared with the timer? */ 216 if (cp0_perfcount_irq < 0) 217 return -1; 218 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 219 } 220 return irq_create_mapping(gic_irq_domain, 221 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 222 } 223 224 int gic_get_c0_fdc_int(void) 225 { 226 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 227 /* Is the FDC IRQ even present? */ 228 if (cp0_fdc_irq < 0) 229 return -1; 230 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 231 } 232 233 return irq_create_mapping(gic_irq_domain, 234 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 235 } 236 237 static void gic_handle_shared_int(bool chained) 238 { 239 unsigned int intr; 240 unsigned long *pcpu_mask; 241 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 242 243 /* Get per-cpu bitmaps */ 244 pcpu_mask = this_cpu_ptr(pcpu_masks); 245 246 if (mips_cm_is64) 247 __ioread64_copy(pending, addr_gic_pend(), 248 DIV_ROUND_UP(gic_shared_intrs, 64)); 249 else 250 __ioread32_copy(pending, addr_gic_pend(), 251 DIV_ROUND_UP(gic_shared_intrs, 32)); 252 253 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 254 255 for_each_set_bit(intr, pending, gic_shared_intrs) { 256 if (chained) 257 generic_handle_domain_irq(gic_irq_domain, 258 GIC_SHARED_TO_HWIRQ(intr)); 259 else 260 do_domain_IRQ(gic_irq_domain, 261 GIC_SHARED_TO_HWIRQ(intr)); 262 } 263 } 264 265 static void gic_mask_irq(struct irq_data *d) 266 { 267 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 268 269 if (gic_irq_lock_cluster(d)) { 270 write_gic_redir_rmask(intr); 271 mips_cm_unlock_other(); 272 } else { 273 write_gic_rmask(intr); 274 } 275 276 gic_clear_pcpu_masks(intr); 277 } 278 279 static void gic_unmask_irq(struct irq_data *d) 280 { 281 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 282 unsigned int cpu; 283 284 if (gic_irq_lock_cluster(d)) { 285 write_gic_redir_smask(intr); 286 mips_cm_unlock_other(); 287 } else { 288 write_gic_smask(intr); 289 } 290 291 gic_clear_pcpu_masks(intr); 292 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 293 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 294 } 295 296 static void gic_ack_irq(struct irq_data *d) 297 { 298 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 299 300 if (gic_irq_lock_cluster(d)) { 301 write_gic_redir_wedge(irq); 302 mips_cm_unlock_other(); 303 } else { 304 write_gic_wedge(irq); 305 } 306 } 307 308 static int gic_set_type(struct irq_data *d, unsigned int type) 309 { 310 unsigned int irq, pol, trig, dual; 311 unsigned long flags; 312 313 irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 314 315 raw_spin_lock_irqsave(&gic_lock, flags); 316 switch (type & IRQ_TYPE_SENSE_MASK) { 317 case IRQ_TYPE_EDGE_FALLING: 318 pol = GIC_POL_FALLING_EDGE; 319 trig = GIC_TRIG_EDGE; 320 dual = GIC_DUAL_SINGLE; 321 break; 322 case IRQ_TYPE_EDGE_RISING: 323 pol = GIC_POL_RISING_EDGE; 324 trig = GIC_TRIG_EDGE; 325 dual = GIC_DUAL_SINGLE; 326 break; 327 case IRQ_TYPE_EDGE_BOTH: 328 pol = 0; /* Doesn't matter */ 329 trig = GIC_TRIG_EDGE; 330 dual = GIC_DUAL_DUAL; 331 break; 332 case IRQ_TYPE_LEVEL_LOW: 333 pol = GIC_POL_ACTIVE_LOW; 334 trig = GIC_TRIG_LEVEL; 335 dual = GIC_DUAL_SINGLE; 336 break; 337 case IRQ_TYPE_LEVEL_HIGH: 338 default: 339 pol = GIC_POL_ACTIVE_HIGH; 340 trig = GIC_TRIG_LEVEL; 341 dual = GIC_DUAL_SINGLE; 342 break; 343 } 344 345 if (gic_irq_lock_cluster(d)) { 346 change_gic_redir_pol(irq, pol); 347 change_gic_redir_trig(irq, trig); 348 change_gic_redir_dual(irq, dual); 349 mips_cm_unlock_other(); 350 } else { 351 change_gic_pol(irq, pol); 352 change_gic_trig(irq, trig); 353 change_gic_dual(irq, dual); 354 } 355 356 if (trig == GIC_TRIG_EDGE) 357 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 358 handle_edge_irq, NULL); 359 else 360 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 361 handle_level_irq, NULL); 362 raw_spin_unlock_irqrestore(&gic_lock, flags); 363 364 return 0; 365 } 366 367 #ifdef CONFIG_SMP 368 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 369 bool force) 370 { 371 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 372 unsigned int cpu, cl, old_cpu, old_cl; 373 unsigned long flags; 374 375 /* 376 * The GIC specifies that we can only route an interrupt to one VP(E), 377 * ie. CPU in Linux parlance, at a time. Therefore we always route to 378 * the first online CPU in the mask. 379 */ 380 cpu = cpumask_first_and(cpumask, cpu_online_mask); 381 if (cpu >= NR_CPUS) 382 return -EINVAL; 383 384 old_cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 385 old_cl = cpu_cluster(&cpu_data[old_cpu]); 386 cl = cpu_cluster(&cpu_data[cpu]); 387 388 raw_spin_lock_irqsave(&gic_lock, flags); 389 390 /* 391 * If we're moving affinity between clusters, stop routing the 392 * interrupt to any VP(E) in the old cluster. 393 */ 394 if (cl != old_cl) { 395 if (gic_irq_lock_cluster(d)) { 396 write_gic_redir_map_vp(irq, 0); 397 mips_cm_unlock_other(); 398 } else { 399 write_gic_map_vp(irq, 0); 400 } 401 } 402 403 /* 404 * Update effective affinity - after this gic_irq_lock_cluster() will 405 * begin operating on the new cluster. 406 */ 407 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 408 409 /* 410 * If we're moving affinity between clusters, configure the interrupt 411 * trigger type in the new cluster. 412 */ 413 if (cl != old_cl) 414 gic_set_type(d, irqd_get_trigger_type(d)); 415 416 /* Route the interrupt to its new VP(E) */ 417 if (gic_irq_lock_cluster(d)) { 418 write_gic_redir_map_pin(irq, 419 GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 420 write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 421 422 /* Update the pcpu_masks */ 423 gic_clear_pcpu_masks(irq); 424 if (read_gic_redir_mask(irq)) 425 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 426 427 mips_cm_unlock_other(); 428 } else { 429 write_gic_map_pin(irq, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 430 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 431 432 /* Update the pcpu_masks */ 433 gic_clear_pcpu_masks(irq); 434 if (read_gic_mask(irq)) 435 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 436 } 437 438 raw_spin_unlock_irqrestore(&gic_lock, flags); 439 440 return IRQ_SET_MASK_OK; 441 } 442 #endif 443 444 static struct irq_chip gic_level_irq_controller = { 445 .name = "MIPS GIC", 446 .irq_mask = gic_mask_irq, 447 .irq_unmask = gic_unmask_irq, 448 .irq_set_type = gic_set_type, 449 #ifdef CONFIG_SMP 450 .irq_set_affinity = gic_set_affinity, 451 #endif 452 }; 453 454 static struct irq_chip gic_edge_irq_controller = { 455 .name = "MIPS GIC", 456 .irq_ack = gic_ack_irq, 457 .irq_mask = gic_mask_irq, 458 .irq_unmask = gic_unmask_irq, 459 .irq_set_type = gic_set_type, 460 #ifdef CONFIG_SMP 461 .irq_set_affinity = gic_set_affinity, 462 #endif 463 .ipi_send_single = gic_send_ipi, 464 }; 465 466 static void gic_handle_local_int(bool chained) 467 { 468 unsigned long pending, masked; 469 unsigned int intr; 470 471 pending = read_gic_vl_pend(); 472 masked = read_gic_vl_mask(); 473 474 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 475 476 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 477 if (chained) 478 generic_handle_domain_irq(gic_irq_domain, 479 GIC_LOCAL_TO_HWIRQ(intr)); 480 else 481 do_domain_IRQ(gic_irq_domain, 482 GIC_LOCAL_TO_HWIRQ(intr)); 483 } 484 } 485 486 static void gic_mask_local_irq(struct irq_data *d) 487 { 488 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 489 490 write_gic_vl_rmask(BIT(intr)); 491 } 492 493 static void gic_unmask_local_irq(struct irq_data *d) 494 { 495 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 496 497 write_gic_vl_smask(BIT(intr)); 498 } 499 500 static struct irq_chip gic_local_irq_controller = { 501 .name = "MIPS GIC Local", 502 .irq_mask = gic_mask_local_irq, 503 .irq_unmask = gic_unmask_local_irq, 504 }; 505 506 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 507 { 508 struct gic_all_vpes_chip_data *cd; 509 int intr, cpu; 510 511 if (!mips_cps_multicluster_cpus()) 512 return; 513 514 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 515 cd = irq_data_get_irq_chip_data(d); 516 cd->mask = false; 517 518 for_each_online_cpu_gic(cpu, &gic_lock) 519 write_gic_vo_rmask(BIT(intr)); 520 } 521 522 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 523 { 524 struct gic_all_vpes_chip_data *cd; 525 int intr, cpu; 526 527 if (!mips_cps_multicluster_cpus()) 528 return; 529 530 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 531 cd = irq_data_get_irq_chip_data(d); 532 cd->mask = true; 533 534 for_each_online_cpu_gic(cpu, &gic_lock) 535 write_gic_vo_smask(BIT(intr)); 536 } 537 538 static void gic_all_vpes_irq_cpu_online(void) 539 { 540 static const unsigned int local_intrs[] = { 541 GIC_LOCAL_INT_TIMER, 542 GIC_LOCAL_INT_PERFCTR, 543 GIC_LOCAL_INT_FDC, 544 }; 545 unsigned long flags; 546 int i; 547 548 raw_spin_lock_irqsave(&gic_lock, flags); 549 550 for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { 551 unsigned int intr = local_intrs[i]; 552 struct gic_all_vpes_chip_data *cd; 553 554 if (!gic_local_irq_is_routable(intr)) 555 continue; 556 cd = &gic_all_vpes_chip_data[intr]; 557 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); 558 if (cd->mask) 559 write_gic_vl_smask(BIT(intr)); 560 } 561 562 raw_spin_unlock_irqrestore(&gic_lock, flags); 563 } 564 565 static struct irq_chip gic_all_vpes_local_irq_controller = { 566 .name = "MIPS GIC Local", 567 .irq_mask = gic_mask_local_irq_all_vpes, 568 .irq_unmask = gic_unmask_local_irq_all_vpes, 569 }; 570 571 static void __gic_irq_dispatch(void) 572 { 573 gic_handle_local_int(false); 574 gic_handle_shared_int(false); 575 } 576 577 static void gic_irq_dispatch(struct irq_desc *desc) 578 { 579 gic_handle_local_int(true); 580 gic_handle_shared_int(true); 581 } 582 583 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 584 irq_hw_number_t hw, unsigned int cpu) 585 { 586 int intr = GIC_HWIRQ_TO_SHARED(hw); 587 struct irq_data *data; 588 unsigned long flags; 589 590 data = irq_get_irq_data(virq); 591 irq_data_update_effective_affinity(data, cpumask_of(cpu)); 592 593 raw_spin_lock_irqsave(&gic_lock, flags); 594 595 /* Route the interrupt to its VP(E) */ 596 if (gic_irq_lock_cluster(data)) { 597 write_gic_redir_map_pin(intr, 598 GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 599 write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 600 mips_cm_unlock_other(); 601 } else { 602 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 603 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 604 } 605 606 raw_spin_unlock_irqrestore(&gic_lock, flags); 607 608 return 0; 609 } 610 611 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 612 const u32 *intspec, unsigned int intsize, 613 irq_hw_number_t *out_hwirq, 614 unsigned int *out_type) 615 { 616 if (intsize != 3) 617 return -EINVAL; 618 619 if (intspec[0] == GIC_SHARED) 620 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 621 else if (intspec[0] == GIC_LOCAL) 622 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 623 else 624 return -EINVAL; 625 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 626 627 return 0; 628 } 629 630 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 631 irq_hw_number_t hwirq) 632 { 633 struct gic_all_vpes_chip_data *cd; 634 unsigned int intr; 635 int err, cpu; 636 u32 map; 637 638 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 639 #ifdef CONFIG_GENERIC_IRQ_IPI 640 /* verify that shared irqs don't conflict with an IPI irq */ 641 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) 642 return -EBUSY; 643 #endif /* CONFIG_GENERIC_IRQ_IPI */ 644 645 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 646 &gic_level_irq_controller, 647 NULL); 648 if (err) 649 return err; 650 651 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 652 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 653 } 654 655 intr = GIC_HWIRQ_TO_LOCAL(hwirq); 656 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; 657 658 /* 659 * If adding support for more per-cpu interrupts, keep the 660 * array in gic_all_vpes_irq_cpu_online() in sync. 661 */ 662 switch (intr) { 663 case GIC_LOCAL_INT_TIMER: 664 case GIC_LOCAL_INT_PERFCTR: 665 case GIC_LOCAL_INT_FDC: 666 /* 667 * HACK: These are all really percpu interrupts, but 668 * the rest of the MIPS kernel code does not use the 669 * percpu IRQ API for them. 670 */ 671 cd = &gic_all_vpes_chip_data[intr]; 672 cd->map = map; 673 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 674 &gic_all_vpes_local_irq_controller, 675 cd); 676 if (err) 677 return err; 678 679 irq_set_handler(virq, handle_percpu_irq); 680 break; 681 682 default: 683 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 684 &gic_local_irq_controller, 685 NULL); 686 if (err) 687 return err; 688 689 irq_set_handler(virq, handle_percpu_devid_irq); 690 irq_set_percpu_devid(virq); 691 break; 692 } 693 694 if (!gic_local_irq_is_routable(intr)) 695 return -EPERM; 696 697 if (mips_cps_multicluster_cpus()) { 698 for_each_online_cpu_gic(cpu, &gic_lock) 699 write_gic_vo_map(mips_gic_vx_map_reg(intr), map); 700 } 701 702 return 0; 703 } 704 705 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 706 unsigned int nr_irqs, void *arg) 707 { 708 struct irq_fwspec *fwspec = arg; 709 irq_hw_number_t hwirq; 710 711 if (fwspec->param[0] == GIC_SHARED) 712 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 713 else 714 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 715 716 return gic_irq_domain_map(d, virq, hwirq); 717 } 718 719 static void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, 720 unsigned int nr_irqs) 721 { 722 } 723 724 static const struct irq_domain_ops gic_irq_domain_ops = { 725 .xlate = gic_irq_domain_xlate, 726 .alloc = gic_irq_domain_alloc, 727 .free = gic_irq_domain_free, 728 .map = gic_irq_domain_map, 729 }; 730 731 #ifdef CONFIG_GENERIC_IRQ_IPI 732 733 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 734 const u32 *intspec, unsigned int intsize, 735 irq_hw_number_t *out_hwirq, 736 unsigned int *out_type) 737 { 738 /* 739 * There's nothing to translate here. hwirq is dynamically allocated and 740 * the irq type is always edge triggered. 741 * */ 742 *out_hwirq = 0; 743 *out_type = IRQ_TYPE_EDGE_RISING; 744 745 return 0; 746 } 747 748 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, 749 unsigned int nr_irqs, void *arg) 750 { 751 struct cpumask *ipimask = arg; 752 irq_hw_number_t hwirq, base_hwirq; 753 int cpu, ret, i; 754 755 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs); 756 if (base_hwirq == gic_shared_intrs) 757 return -ENOMEM; 758 759 /* check that we have enough space */ 760 for (i = base_hwirq; i < nr_irqs; i++) { 761 if (!test_bit(i, ipi_available)) 762 return -EBUSY; 763 } 764 bitmap_clear(ipi_available, base_hwirq, nr_irqs); 765 766 /* map the hwirq for each cpu consecutively */ 767 i = 0; 768 for_each_cpu(cpu, ipimask) { 769 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 770 771 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 772 &gic_edge_irq_controller, 773 NULL); 774 if (ret) 775 goto error; 776 777 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, 778 &gic_edge_irq_controller, 779 NULL); 780 if (ret) 781 goto error; 782 783 /* Set affinity to cpu. */ 784 irq_data_update_effective_affinity(irq_get_irq_data(virq + i), 785 cpumask_of(cpu)); 786 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 787 if (ret) 788 goto error; 789 790 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 791 if (ret) 792 goto error; 793 794 i++; 795 } 796 797 return 0; 798 error: 799 bitmap_set(ipi_available, base_hwirq, nr_irqs); 800 return ret; 801 } 802 803 static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, 804 unsigned int nr_irqs) 805 { 806 irq_hw_number_t base_hwirq; 807 struct irq_data *data; 808 809 data = irq_get_irq_data(virq); 810 if (!data) 811 return; 812 813 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); 814 bitmap_set(ipi_available, base_hwirq, nr_irqs); 815 } 816 817 static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, 818 enum irq_domain_bus_token bus_token) 819 { 820 bool is_ipi; 821 822 switch (bus_token) { 823 case DOMAIN_BUS_IPI: 824 is_ipi = d->bus_token == bus_token; 825 return (!node || to_of_node(d->fwnode) == node) && is_ipi; 826 break; 827 default: 828 return 0; 829 } 830 } 831 832 static const struct irq_domain_ops gic_ipi_domain_ops = { 833 .xlate = gic_ipi_domain_xlate, 834 .alloc = gic_ipi_domain_alloc, 835 .free = gic_ipi_domain_free, 836 .match = gic_ipi_domain_match, 837 }; 838 839 static int gic_register_ipi_domain(struct device_node *node) 840 { 841 struct irq_domain *gic_ipi_domain; 842 unsigned int v[2], num_ipis; 843 844 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 845 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 846 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 847 node, &gic_ipi_domain_ops, NULL); 848 if (!gic_ipi_domain) { 849 pr_err("Failed to add IPI domain"); 850 return -ENXIO; 851 } 852 853 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 854 855 if (node && 856 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 857 bitmap_set(ipi_resrv, v[0], v[1]); 858 } else { 859 /* 860 * Reserve 2 interrupts per possible CPU/VP for use as IPIs, 861 * meeting the requirements of arch/mips SMP. 862 */ 863 num_ipis = 2 * num_possible_cpus(); 864 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); 865 } 866 867 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); 868 869 return 0; 870 } 871 872 #else /* !CONFIG_GENERIC_IRQ_IPI */ 873 874 static inline int gic_register_ipi_domain(struct device_node *node) 875 { 876 return 0; 877 } 878 879 #endif /* !CONFIG_GENERIC_IRQ_IPI */ 880 881 static int gic_cpu_startup(unsigned int cpu) 882 { 883 /* Enable or disable EIC */ 884 change_gic_vl_ctl(GIC_VX_CTL_EIC, 885 cpu_has_veic ? GIC_VX_CTL_EIC : 0); 886 887 /* Clear all local IRQ masks (ie. disable all local interrupts) */ 888 write_gic_vl_rmask(~0); 889 890 /* Enable desired interrupts */ 891 gic_all_vpes_irq_cpu_online(); 892 893 return 0; 894 } 895 896 static int __init gic_of_init(struct device_node *node, 897 struct device_node *parent) 898 { 899 unsigned int cpu_vec, i, gicconfig, cl, nclusters; 900 unsigned long reserved; 901 phys_addr_t gic_base; 902 struct resource res; 903 size_t gic_len; 904 int ret; 905 906 /* Find the first available CPU vector. */ 907 i = 0; 908 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); 909 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 910 i++, &cpu_vec)) 911 reserved |= BIT(cpu_vec); 912 913 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); 914 if (cpu_vec == hweight_long(ST0_IM)) { 915 pr_err("No CPU vectors available\n"); 916 return -ENODEV; 917 } 918 919 if (of_address_to_resource(node, 0, &res)) { 920 /* 921 * Probe the CM for the GIC base address if not specified 922 * in the device-tree. 923 */ 924 if (mips_cm_present()) { 925 gic_base = read_gcr_gic_base() & 926 ~CM_GCR_GIC_BASE_GICEN; 927 gic_len = 0x20000; 928 pr_warn("Using inherited base address %pa\n", 929 &gic_base); 930 } else { 931 pr_err("Failed to get memory range\n"); 932 return -ENODEV; 933 } 934 } else { 935 gic_base = res.start; 936 gic_len = resource_size(&res); 937 } 938 939 if (mips_cm_present()) { 940 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); 941 /* Ensure GIC region is enabled before trying to access it */ 942 __sync(); 943 } 944 945 mips_gic_base = ioremap(gic_base, gic_len); 946 if (!mips_gic_base) { 947 pr_err("Failed to ioremap gic_base\n"); 948 return -ENOMEM; 949 } 950 951 gicconfig = read_gic_config(); 952 gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig); 953 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 954 955 if (cpu_has_veic) { 956 /* Always use vector 1 in EIC mode */ 957 gic_cpu_pin = 0; 958 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 959 __gic_irq_dispatch); 960 } else { 961 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 962 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 963 gic_irq_dispatch); 964 } 965 966 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 967 gic_shared_intrs, 0, 968 &gic_irq_domain_ops, NULL); 969 if (!gic_irq_domain) { 970 pr_err("Failed to add IRQ domain"); 971 return -ENXIO; 972 } 973 974 ret = gic_register_ipi_domain(node); 975 if (ret) 976 return ret; 977 978 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 979 980 /* 981 * Initialise each cluster's GIC shared registers to sane default 982 * values. 983 * Otherwise, the IPI set up will be erased if we move code 984 * to gic_cpu_startup for each cpu. 985 */ 986 nclusters = mips_cps_numclusters(); 987 for (cl = 0; cl < nclusters; cl++) { 988 if (cl == cpu_cluster(¤t_cpu_data)) { 989 for (i = 0; i < gic_shared_intrs; i++) { 990 change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 991 change_gic_trig(i, GIC_TRIG_LEVEL); 992 write_gic_rmask(i); 993 } 994 } else if (mips_cps_numcores(cl) != 0) { 995 mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); 996 for (i = 0; i < gic_shared_intrs; i++) { 997 change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH); 998 change_gic_redir_trig(i, GIC_TRIG_LEVEL); 999 write_gic_redir_rmask(i); 1000 } 1001 mips_cm_unlock_other(); 1002 1003 } else { 1004 pr_warn("No CPU cores on the cluster %d skip it\n", cl); 1005 } 1006 } 1007 1008 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING, 1009 "irqchip/mips/gic:starting", 1010 gic_cpu_startup, NULL); 1011 } 1012 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 1013