1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 10 #define pr_fmt(fmt) "irq-mips-gic: " fmt 11 12 #include <linux/bitfield.h> 13 #include <linux/bitmap.h> 14 #include <linux/clocksource.h> 15 #include <linux/cpuhotplug.h> 16 #include <linux/init.h> 17 #include <linux/interrupt.h> 18 #include <linux/irq.h> 19 #include <linux/irqchip.h> 20 #include <linux/irqdomain.h> 21 #include <linux/of_address.h> 22 #include <linux/percpu.h> 23 #include <linux/sched.h> 24 #include <linux/smp.h> 25 26 #include <asm/mips-cps.h> 27 #include <asm/setup.h> 28 #include <asm/traps.h> 29 30 #include <dt-bindings/interrupt-controller/mips-gic.h> 31 32 #define GIC_MAX_INTRS 256 33 #define GIC_MAX_LONGS BITS_TO_LONGS(GIC_MAX_INTRS) 34 35 /* Add 2 to convert GIC CPU pin to core interrupt */ 36 #define GIC_CPU_PIN_OFFSET 2 37 38 /* Mapped interrupt to pin X, then GIC will generate the vector (X+1). */ 39 #define GIC_PIN_TO_VEC_OFFSET 1 40 41 /* Convert between local/shared IRQ number and GIC HW IRQ number. */ 42 #define GIC_LOCAL_HWIRQ_BASE 0 43 #define GIC_LOCAL_TO_HWIRQ(x) (GIC_LOCAL_HWIRQ_BASE + (x)) 44 #define GIC_HWIRQ_TO_LOCAL(x) ((x) - GIC_LOCAL_HWIRQ_BASE) 45 #define GIC_SHARED_HWIRQ_BASE GIC_NUM_LOCAL_INTRS 46 #define GIC_SHARED_TO_HWIRQ(x) (GIC_SHARED_HWIRQ_BASE + (x)) 47 #define GIC_HWIRQ_TO_SHARED(x) ((x) - GIC_SHARED_HWIRQ_BASE) 48 49 void __iomem *mips_gic_base; 50 51 static DEFINE_PER_CPU_READ_MOSTLY(unsigned long[GIC_MAX_LONGS], pcpu_masks); 52 53 static DEFINE_RAW_SPINLOCK(gic_lock); 54 static struct irq_domain *gic_irq_domain; 55 static int gic_shared_intrs; 56 static unsigned int gic_cpu_pin; 57 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 58 59 #ifdef CONFIG_GENERIC_IRQ_IPI 60 static DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 61 static DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS); 62 #endif /* CONFIG_GENERIC_IRQ_IPI */ 63 64 static struct gic_all_vpes_chip_data { 65 u32 map; 66 bool mask; 67 } gic_all_vpes_chip_data[GIC_NUM_LOCAL_INTRS]; 68 69 static int __gic_with_next_online_cpu(int prev) 70 { 71 unsigned int cpu; 72 73 /* Discover the next online CPU */ 74 cpu = cpumask_next(prev, cpu_online_mask); 75 76 /* If there isn't one, we're done */ 77 if (cpu >= nr_cpu_ids) 78 return cpu; 79 80 /* 81 * Move the access lock to the next CPU's GIC local register block. 82 * 83 * Set GIC_VL_OTHER. Since the caller holds gic_lock nothing can 84 * clobber the written value. 85 */ 86 write_gic_vl_other(mips_cm_vp_id(cpu)); 87 88 return cpu; 89 } 90 91 static inline void gic_unlock_cluster(void) 92 { 93 if (mips_cps_multicluster_cpus()) 94 mips_cm_unlock_other(); 95 } 96 97 /** 98 * for_each_online_cpu_gic() - Iterate over online CPUs, access local registers 99 * @cpu: An integer variable to hold the current CPU number 100 * @gic_lock: A pointer to raw spin lock used as a guard 101 * 102 * Iterate over online CPUs & configure the other/redirect register region to 103 * access each CPUs GIC local register block, which can be accessed from the 104 * loop body using read_gic_vo_*() or write_gic_vo_*() accessor functions or 105 * their derivatives. 106 */ 107 #define for_each_online_cpu_gic(cpu, gic_lock) \ 108 guard(raw_spinlock_irqsave)(gic_lock); \ 109 for ((cpu) = __gic_with_next_online_cpu(-1); \ 110 (cpu) < nr_cpu_ids; \ 111 gic_unlock_cluster(), \ 112 (cpu) = __gic_with_next_online_cpu(cpu)) 113 114 /** 115 * gic_irq_lock_cluster() - Lock redirect block access to IRQ's cluster 116 * @d: struct irq_data corresponding to the interrupt we're interested in 117 * 118 * Locks redirect register block access to the global register block of the GIC 119 * within the remote cluster that the IRQ corresponding to @d is affine to, 120 * returning true when this redirect block setup & locking has been performed. 121 * 122 * If @d is affine to the local cluster then no locking is performed and this 123 * function will return false, indicating to the caller that it should access 124 * the local clusters registers without the overhead of indirection through the 125 * redirect block. 126 * 127 * In summary, if this function returns true then the caller should access GIC 128 * registers using redirect register block accessors & then call 129 * mips_cm_unlock_other() when done. If this function returns false then the 130 * caller should trivially access GIC registers in the local cluster. 131 * 132 * Returns true if locking performed, else false. 133 */ 134 static bool gic_irq_lock_cluster(struct irq_data *d) 135 { 136 unsigned int cpu, cl; 137 138 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 139 BUG_ON(cpu >= NR_CPUS); 140 141 cl = cpu_cluster(&cpu_data[cpu]); 142 if (cl == cpu_cluster(¤t_cpu_data)) 143 return false; 144 if (mips_cps_numcores(cl) == 0) 145 return false; 146 mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); 147 return true; 148 } 149 150 static void gic_clear_pcpu_masks(unsigned int intr) 151 { 152 unsigned int i; 153 154 /* Clear the interrupt's bit in all pcpu_masks */ 155 for_each_possible_cpu(i) 156 clear_bit(intr, per_cpu_ptr(pcpu_masks, i)); 157 } 158 159 static bool gic_local_irq_is_routable(int intr) 160 { 161 u32 vpe_ctl; 162 163 /* All local interrupts are routable in EIC mode. */ 164 if (cpu_has_veic) 165 return true; 166 167 vpe_ctl = read_gic_vl_ctl(); 168 switch (intr) { 169 case GIC_LOCAL_INT_TIMER: 170 return vpe_ctl & GIC_VX_CTL_TIMER_ROUTABLE; 171 case GIC_LOCAL_INT_PERFCTR: 172 return vpe_ctl & GIC_VX_CTL_PERFCNT_ROUTABLE; 173 case GIC_LOCAL_INT_FDC: 174 return vpe_ctl & GIC_VX_CTL_FDC_ROUTABLE; 175 case GIC_LOCAL_INT_SWINT0: 176 case GIC_LOCAL_INT_SWINT1: 177 return vpe_ctl & GIC_VX_CTL_SWINT_ROUTABLE; 178 default: 179 return true; 180 } 181 } 182 183 static void gic_bind_eic_interrupt(int irq, int set) 184 { 185 /* Convert irq vector # to hw int # */ 186 irq -= GIC_PIN_TO_VEC_OFFSET; 187 188 /* Set irq to use shadow set */ 189 write_gic_vl_eic_shadow_set(irq, set); 190 } 191 192 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) 193 { 194 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 195 196 if (gic_irq_lock_cluster(d)) { 197 write_gic_redir_wedge(GIC_WEDGE_RW | hwirq); 198 mips_cm_unlock_other(); 199 } else { 200 write_gic_wedge(GIC_WEDGE_RW | hwirq); 201 } 202 } 203 204 int gic_get_c0_compare_int(void) 205 { 206 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 207 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 208 return irq_create_mapping(gic_irq_domain, 209 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 210 } 211 212 int gic_get_c0_perfcount_int(void) 213 { 214 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 215 /* Is the performance counter shared with the timer? */ 216 if (cp0_perfcount_irq < 0) 217 return -1; 218 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 219 } 220 return irq_create_mapping(gic_irq_domain, 221 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 222 } 223 224 int gic_get_c0_fdc_int(void) 225 { 226 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 227 /* Is the FDC IRQ even present? */ 228 if (cp0_fdc_irq < 0) 229 return -1; 230 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 231 } 232 233 return irq_create_mapping(gic_irq_domain, 234 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 235 } 236 237 static void gic_handle_shared_int(bool chained) 238 { 239 unsigned int intr; 240 unsigned long *pcpu_mask; 241 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 242 243 /* Get per-cpu bitmaps */ 244 pcpu_mask = this_cpu_ptr(pcpu_masks); 245 246 if (mips_cm_is64) 247 __ioread64_copy(pending, addr_gic_pend(), 248 DIV_ROUND_UP(gic_shared_intrs, 64)); 249 else 250 __ioread32_copy(pending, addr_gic_pend(), 251 DIV_ROUND_UP(gic_shared_intrs, 32)); 252 253 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 254 255 for_each_set_bit(intr, pending, gic_shared_intrs) { 256 if (chained) 257 generic_handle_domain_irq(gic_irq_domain, 258 GIC_SHARED_TO_HWIRQ(intr)); 259 else 260 do_domain_IRQ(gic_irq_domain, 261 GIC_SHARED_TO_HWIRQ(intr)); 262 } 263 } 264 265 static void gic_mask_irq(struct irq_data *d) 266 { 267 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 268 269 if (gic_irq_lock_cluster(d)) { 270 write_gic_redir_rmask(intr); 271 mips_cm_unlock_other(); 272 } else { 273 write_gic_rmask(intr); 274 } 275 276 gic_clear_pcpu_masks(intr); 277 } 278 279 static void gic_unmask_irq(struct irq_data *d) 280 { 281 unsigned int intr = GIC_HWIRQ_TO_SHARED(d->hwirq); 282 unsigned int cpu; 283 284 if (gic_irq_lock_cluster(d)) { 285 write_gic_redir_smask(intr); 286 mips_cm_unlock_other(); 287 } else { 288 write_gic_smask(intr); 289 } 290 291 gic_clear_pcpu_masks(intr); 292 cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 293 set_bit(intr, per_cpu_ptr(pcpu_masks, cpu)); 294 } 295 296 static void gic_ack_irq(struct irq_data *d) 297 { 298 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 299 300 if (gic_irq_lock_cluster(d)) { 301 write_gic_redir_wedge(irq); 302 mips_cm_unlock_other(); 303 } else { 304 write_gic_wedge(irq); 305 } 306 } 307 308 static int gic_set_type(struct irq_data *d, unsigned int type) 309 { 310 unsigned int irq, pol, trig, dual; 311 unsigned long flags; 312 313 irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 314 315 raw_spin_lock_irqsave(&gic_lock, flags); 316 switch (type & IRQ_TYPE_SENSE_MASK) { 317 case IRQ_TYPE_EDGE_FALLING: 318 pol = GIC_POL_FALLING_EDGE; 319 trig = GIC_TRIG_EDGE; 320 dual = GIC_DUAL_SINGLE; 321 break; 322 case IRQ_TYPE_EDGE_RISING: 323 pol = GIC_POL_RISING_EDGE; 324 trig = GIC_TRIG_EDGE; 325 dual = GIC_DUAL_SINGLE; 326 break; 327 case IRQ_TYPE_EDGE_BOTH: 328 pol = 0; /* Doesn't matter */ 329 trig = GIC_TRIG_EDGE; 330 dual = GIC_DUAL_DUAL; 331 break; 332 case IRQ_TYPE_LEVEL_LOW: 333 pol = GIC_POL_ACTIVE_LOW; 334 trig = GIC_TRIG_LEVEL; 335 dual = GIC_DUAL_SINGLE; 336 break; 337 case IRQ_TYPE_LEVEL_HIGH: 338 default: 339 pol = GIC_POL_ACTIVE_HIGH; 340 trig = GIC_TRIG_LEVEL; 341 dual = GIC_DUAL_SINGLE; 342 break; 343 } 344 345 if (gic_irq_lock_cluster(d)) { 346 change_gic_redir_pol(irq, pol); 347 change_gic_redir_trig(irq, trig); 348 change_gic_redir_dual(irq, dual); 349 mips_cm_unlock_other(); 350 } else { 351 change_gic_pol(irq, pol); 352 change_gic_trig(irq, trig); 353 change_gic_dual(irq, dual); 354 } 355 356 if (trig == GIC_TRIG_EDGE) 357 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 358 handle_edge_irq, NULL); 359 else 360 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 361 handle_level_irq, NULL); 362 raw_spin_unlock_irqrestore(&gic_lock, flags); 363 364 return 0; 365 } 366 367 #ifdef CONFIG_SMP 368 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 369 bool force) 370 { 371 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 372 unsigned int cpu, cl, old_cpu, old_cl; 373 unsigned long flags; 374 375 /* 376 * The GIC specifies that we can only route an interrupt to one VP(E), 377 * ie. CPU in Linux parlance, at a time. Therefore we always route to 378 * the first forced or online CPU in the mask. 379 */ 380 if (force) 381 cpu = cpumask_first(cpumask); 382 else 383 cpu = cpumask_first_and(cpumask, cpu_online_mask); 384 385 if (cpu >= NR_CPUS) 386 return -EINVAL; 387 388 old_cpu = cpumask_first(irq_data_get_effective_affinity_mask(d)); 389 old_cl = cpu_cluster(&cpu_data[old_cpu]); 390 cl = cpu_cluster(&cpu_data[cpu]); 391 392 raw_spin_lock_irqsave(&gic_lock, flags); 393 394 /* 395 * If we're moving affinity between clusters, stop routing the 396 * interrupt to any VP(E) in the old cluster. 397 */ 398 if (cl != old_cl) { 399 if (gic_irq_lock_cluster(d)) { 400 write_gic_redir_map_vp(irq, 0); 401 mips_cm_unlock_other(); 402 } else { 403 write_gic_map_vp(irq, 0); 404 } 405 } 406 407 /* 408 * Update effective affinity - after this gic_irq_lock_cluster() will 409 * begin operating on the new cluster. 410 */ 411 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 412 413 /* 414 * If we're moving affinity between clusters, configure the interrupt 415 * trigger type in the new cluster. 416 */ 417 if (cl != old_cl) 418 gic_set_type(d, irqd_get_trigger_type(d)); 419 420 /* Route the interrupt to its new VP(E) */ 421 if (gic_irq_lock_cluster(d)) { 422 write_gic_redir_map_pin(irq, 423 GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 424 write_gic_redir_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 425 426 /* Update the pcpu_masks */ 427 gic_clear_pcpu_masks(irq); 428 if (read_gic_redir_mask(irq)) 429 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 430 431 mips_cm_unlock_other(); 432 } else { 433 write_gic_map_pin(irq, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 434 write_gic_map_vp(irq, BIT(mips_cm_vp_id(cpu))); 435 436 /* Update the pcpu_masks */ 437 gic_clear_pcpu_masks(irq); 438 if (read_gic_mask(irq)) 439 set_bit(irq, per_cpu_ptr(pcpu_masks, cpu)); 440 } 441 442 raw_spin_unlock_irqrestore(&gic_lock, flags); 443 444 return IRQ_SET_MASK_OK; 445 } 446 #endif 447 448 static struct irq_chip gic_level_irq_controller = { 449 .name = "MIPS GIC", 450 .irq_mask = gic_mask_irq, 451 .irq_unmask = gic_unmask_irq, 452 .irq_set_type = gic_set_type, 453 #ifdef CONFIG_SMP 454 .irq_set_affinity = gic_set_affinity, 455 #endif 456 }; 457 458 static struct irq_chip gic_edge_irq_controller = { 459 .name = "MIPS GIC", 460 .irq_ack = gic_ack_irq, 461 .irq_mask = gic_mask_irq, 462 .irq_unmask = gic_unmask_irq, 463 .irq_set_type = gic_set_type, 464 #ifdef CONFIG_SMP 465 .irq_set_affinity = gic_set_affinity, 466 #endif 467 .ipi_send_single = gic_send_ipi, 468 }; 469 470 static void gic_handle_local_int(bool chained) 471 { 472 unsigned long pending, masked; 473 unsigned int intr; 474 475 pending = read_gic_vl_pend(); 476 masked = read_gic_vl_mask(); 477 478 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 479 480 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 481 if (chained) 482 generic_handle_domain_irq(gic_irq_domain, 483 GIC_LOCAL_TO_HWIRQ(intr)); 484 else 485 do_domain_IRQ(gic_irq_domain, 486 GIC_LOCAL_TO_HWIRQ(intr)); 487 } 488 } 489 490 static void gic_mask_local_irq(struct irq_data *d) 491 { 492 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 493 494 write_gic_vl_rmask(BIT(intr)); 495 } 496 497 static void gic_unmask_local_irq(struct irq_data *d) 498 { 499 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 500 501 write_gic_vl_smask(BIT(intr)); 502 } 503 504 static struct irq_chip gic_local_irq_controller = { 505 .name = "MIPS GIC Local", 506 .irq_mask = gic_mask_local_irq, 507 .irq_unmask = gic_unmask_local_irq, 508 }; 509 510 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 511 { 512 struct gic_all_vpes_chip_data *cd; 513 int intr, cpu; 514 515 if (!mips_cps_multicluster_cpus()) 516 return; 517 518 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 519 cd = irq_data_get_irq_chip_data(d); 520 cd->mask = false; 521 522 for_each_online_cpu_gic(cpu, &gic_lock) 523 write_gic_vo_rmask(BIT(intr)); 524 } 525 526 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 527 { 528 struct gic_all_vpes_chip_data *cd; 529 int intr, cpu; 530 531 if (!mips_cps_multicluster_cpus()) 532 return; 533 534 intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 535 cd = irq_data_get_irq_chip_data(d); 536 cd->mask = true; 537 538 for_each_online_cpu_gic(cpu, &gic_lock) 539 write_gic_vo_smask(BIT(intr)); 540 } 541 542 static void gic_all_vpes_irq_cpu_online(void) 543 { 544 static const unsigned int local_intrs[] = { 545 GIC_LOCAL_INT_TIMER, 546 GIC_LOCAL_INT_PERFCTR, 547 GIC_LOCAL_INT_FDC, 548 }; 549 unsigned long flags; 550 int i; 551 552 raw_spin_lock_irqsave(&gic_lock, flags); 553 554 for (i = 0; i < ARRAY_SIZE(local_intrs); i++) { 555 unsigned int intr = local_intrs[i]; 556 struct gic_all_vpes_chip_data *cd; 557 558 if (!gic_local_irq_is_routable(intr)) 559 continue; 560 cd = &gic_all_vpes_chip_data[intr]; 561 write_gic_vl_map(mips_gic_vx_map_reg(intr), cd->map); 562 if (cd->mask) 563 write_gic_vl_smask(BIT(intr)); 564 } 565 566 raw_spin_unlock_irqrestore(&gic_lock, flags); 567 } 568 569 static struct irq_chip gic_all_vpes_local_irq_controller = { 570 .name = "MIPS GIC Local", 571 .irq_mask = gic_mask_local_irq_all_vpes, 572 .irq_unmask = gic_unmask_local_irq_all_vpes, 573 }; 574 575 static void __gic_irq_dispatch(void) 576 { 577 gic_handle_local_int(false); 578 gic_handle_shared_int(false); 579 } 580 581 static void gic_irq_dispatch(struct irq_desc *desc) 582 { 583 gic_handle_local_int(true); 584 gic_handle_shared_int(true); 585 } 586 587 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 588 irq_hw_number_t hw, unsigned int cpu) 589 { 590 int intr = GIC_HWIRQ_TO_SHARED(hw); 591 struct irq_data *data; 592 unsigned long flags; 593 594 data = irq_get_irq_data(virq); 595 irq_data_update_effective_affinity(data, cpumask_of(cpu)); 596 597 raw_spin_lock_irqsave(&gic_lock, flags); 598 599 /* Route the interrupt to its VP(E) */ 600 if (gic_irq_lock_cluster(data)) { 601 write_gic_redir_map_pin(intr, 602 GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 603 write_gic_redir_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 604 mips_cm_unlock_other(); 605 } else { 606 write_gic_map_pin(intr, GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin); 607 write_gic_map_vp(intr, BIT(mips_cm_vp_id(cpu))); 608 } 609 610 raw_spin_unlock_irqrestore(&gic_lock, flags); 611 612 return 0; 613 } 614 615 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 616 const u32 *intspec, unsigned int intsize, 617 irq_hw_number_t *out_hwirq, 618 unsigned int *out_type) 619 { 620 if (intsize != 3) 621 return -EINVAL; 622 623 if (intspec[0] == GIC_SHARED) 624 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 625 else if (intspec[0] == GIC_LOCAL) 626 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 627 else 628 return -EINVAL; 629 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 630 631 return 0; 632 } 633 634 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq, 635 irq_hw_number_t hwirq) 636 { 637 struct gic_all_vpes_chip_data *cd; 638 unsigned int intr; 639 int err, cpu; 640 u32 map; 641 642 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 643 #ifdef CONFIG_GENERIC_IRQ_IPI 644 /* verify that shared irqs don't conflict with an IPI irq */ 645 if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv)) 646 return -EBUSY; 647 #endif /* CONFIG_GENERIC_IRQ_IPI */ 648 649 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 650 &gic_level_irq_controller, 651 NULL); 652 if (err) 653 return err; 654 655 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq))); 656 return gic_shared_irq_domain_map(d, virq, hwirq, 0); 657 } 658 659 intr = GIC_HWIRQ_TO_LOCAL(hwirq); 660 map = GIC_MAP_PIN_MAP_TO_PIN | gic_cpu_pin; 661 662 /* 663 * If adding support for more per-cpu interrupts, keep the 664 * array in gic_all_vpes_irq_cpu_online() in sync. 665 */ 666 switch (intr) { 667 case GIC_LOCAL_INT_TIMER: 668 case GIC_LOCAL_INT_PERFCTR: 669 case GIC_LOCAL_INT_FDC: 670 /* 671 * HACK: These are all really percpu interrupts, but 672 * the rest of the MIPS kernel code does not use the 673 * percpu IRQ API for them. 674 */ 675 cd = &gic_all_vpes_chip_data[intr]; 676 cd->map = map; 677 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 678 &gic_all_vpes_local_irq_controller, 679 cd); 680 if (err) 681 return err; 682 683 irq_set_handler(virq, handle_percpu_irq); 684 break; 685 686 default: 687 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 688 &gic_local_irq_controller, 689 NULL); 690 if (err) 691 return err; 692 693 irq_set_handler(virq, handle_percpu_devid_irq); 694 irq_set_percpu_devid(virq); 695 break; 696 } 697 698 if (!gic_local_irq_is_routable(intr)) 699 return -EPERM; 700 701 if (mips_cps_multicluster_cpus()) { 702 for_each_online_cpu_gic(cpu, &gic_lock) 703 write_gic_vo_map(mips_gic_vx_map_reg(intr), map); 704 } 705 706 return 0; 707 } 708 709 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 710 unsigned int nr_irqs, void *arg) 711 { 712 struct irq_fwspec *fwspec = arg; 713 irq_hw_number_t hwirq; 714 715 if (fwspec->param[0] == GIC_SHARED) 716 hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 717 else 718 hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 719 720 return gic_irq_domain_map(d, virq, hwirq); 721 } 722 723 static void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, 724 unsigned int nr_irqs) 725 { 726 } 727 728 static const struct irq_domain_ops gic_irq_domain_ops = { 729 .xlate = gic_irq_domain_xlate, 730 .alloc = gic_irq_domain_alloc, 731 .free = gic_irq_domain_free, 732 .map = gic_irq_domain_map, 733 }; 734 735 #ifdef CONFIG_GENERIC_IRQ_IPI 736 737 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 738 const u32 *intspec, unsigned int intsize, 739 irq_hw_number_t *out_hwirq, 740 unsigned int *out_type) 741 { 742 /* 743 * There's nothing to translate here. hwirq is dynamically allocated and 744 * the irq type is always edge triggered. 745 * */ 746 *out_hwirq = 0; 747 *out_type = IRQ_TYPE_EDGE_RISING; 748 749 return 0; 750 } 751 752 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, 753 unsigned int nr_irqs, void *arg) 754 { 755 struct cpumask *ipimask = arg; 756 irq_hw_number_t hwirq, base_hwirq; 757 int cpu, ret, i; 758 759 base_hwirq = find_first_bit(ipi_available, gic_shared_intrs); 760 if (base_hwirq == gic_shared_intrs) 761 return -ENOMEM; 762 763 /* check that we have enough space */ 764 for (i = base_hwirq; i < nr_irqs; i++) { 765 if (!test_bit(i, ipi_available)) 766 return -EBUSY; 767 } 768 bitmap_clear(ipi_available, base_hwirq, nr_irqs); 769 770 /* map the hwirq for each cpu consecutively */ 771 i = 0; 772 for_each_cpu(cpu, ipimask) { 773 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 774 775 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 776 &gic_edge_irq_controller, 777 NULL); 778 if (ret) 779 goto error; 780 781 ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq, 782 &gic_edge_irq_controller, 783 NULL); 784 if (ret) 785 goto error; 786 787 /* Set affinity to cpu. */ 788 irq_data_update_effective_affinity(irq_get_irq_data(virq + i), 789 cpumask_of(cpu)); 790 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 791 if (ret) 792 goto error; 793 794 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 795 if (ret) 796 goto error; 797 798 i++; 799 } 800 801 return 0; 802 error: 803 bitmap_set(ipi_available, base_hwirq, nr_irqs); 804 return ret; 805 } 806 807 static void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, 808 unsigned int nr_irqs) 809 { 810 irq_hw_number_t base_hwirq; 811 struct irq_data *data; 812 813 data = irq_get_irq_data(virq); 814 if (!data) 815 return; 816 817 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); 818 bitmap_set(ipi_available, base_hwirq, nr_irqs); 819 } 820 821 static int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, 822 enum irq_domain_bus_token bus_token) 823 { 824 bool is_ipi; 825 826 switch (bus_token) { 827 case DOMAIN_BUS_IPI: 828 is_ipi = d->bus_token == bus_token; 829 return (!node || to_of_node(d->fwnode) == node) && is_ipi; 830 break; 831 default: 832 return 0; 833 } 834 } 835 836 static const struct irq_domain_ops gic_ipi_domain_ops = { 837 .xlate = gic_ipi_domain_xlate, 838 .alloc = gic_ipi_domain_alloc, 839 .free = gic_ipi_domain_free, 840 .match = gic_ipi_domain_match, 841 }; 842 843 static int gic_register_ipi_domain(struct device_node *node) 844 { 845 struct irq_domain *gic_ipi_domain; 846 unsigned int v[2], num_ipis; 847 848 gic_ipi_domain = irq_domain_create_hierarchy(gic_irq_domain, IRQ_DOMAIN_FLAG_IPI_PER_CPU, 849 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 850 of_fwnode_handle(node), &gic_ipi_domain_ops, 851 NULL); 852 if (!gic_ipi_domain) { 853 pr_err("Failed to add IPI domain"); 854 return -ENXIO; 855 } 856 857 irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI); 858 859 if (node && 860 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 861 bitmap_set(ipi_resrv, v[0], v[1]); 862 } else { 863 /* 864 * Reserve 2 interrupts per possible CPU/VP for use as IPIs, 865 * meeting the requirements of arch/mips SMP. 866 */ 867 num_ipis = 2 * num_possible_cpus(); 868 bitmap_set(ipi_resrv, gic_shared_intrs - num_ipis, num_ipis); 869 } 870 871 bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS); 872 873 return 0; 874 } 875 876 #else /* !CONFIG_GENERIC_IRQ_IPI */ 877 878 static inline int gic_register_ipi_domain(struct device_node *node) 879 { 880 return 0; 881 } 882 883 #endif /* !CONFIG_GENERIC_IRQ_IPI */ 884 885 static int gic_cpu_startup(unsigned int cpu) 886 { 887 /* Enable or disable EIC */ 888 change_gic_vl_ctl(GIC_VX_CTL_EIC, 889 cpu_has_veic ? GIC_VX_CTL_EIC : 0); 890 891 /* Clear all local IRQ masks (ie. disable all local interrupts) */ 892 write_gic_vl_rmask(~0); 893 894 /* Enable desired interrupts */ 895 gic_all_vpes_irq_cpu_online(); 896 897 return 0; 898 } 899 900 static int __init gic_of_init(struct device_node *node, 901 struct device_node *parent) 902 { 903 unsigned int cpu_vec, i, gicconfig, cl, nclusters; 904 unsigned long reserved; 905 phys_addr_t gic_base; 906 struct resource res; 907 size_t gic_len; 908 int ret; 909 910 /* Find the first available CPU vector. */ 911 i = 0; 912 reserved = (C_SW0 | C_SW1) >> __ffs(C_SW0); 913 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 914 i++, &cpu_vec)) 915 reserved |= BIT(cpu_vec); 916 917 cpu_vec = find_first_zero_bit(&reserved, hweight_long(ST0_IM)); 918 if (cpu_vec == hweight_long(ST0_IM)) { 919 pr_err("No CPU vectors available\n"); 920 return -ENODEV; 921 } 922 923 if (of_address_to_resource(node, 0, &res)) { 924 /* 925 * Probe the CM for the GIC base address if not specified 926 * in the device-tree. 927 */ 928 if (mips_cm_present()) { 929 gic_base = read_gcr_gic_base() & 930 ~CM_GCR_GIC_BASE_GICEN; 931 gic_len = 0x20000; 932 pr_warn("Using inherited base address %pa\n", 933 &gic_base); 934 } else { 935 pr_err("Failed to get memory range\n"); 936 return -ENODEV; 937 } 938 } else { 939 gic_base = res.start; 940 gic_len = resource_size(&res); 941 } 942 943 if (mips_cm_present()) { 944 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN); 945 /* Ensure GIC region is enabled before trying to access it */ 946 __sync(); 947 } 948 949 mips_gic_base = ioremap(gic_base, gic_len); 950 if (!mips_gic_base) { 951 pr_err("Failed to ioremap gic_base\n"); 952 return -ENOMEM; 953 } 954 955 gicconfig = read_gic_config(); 956 gic_shared_intrs = FIELD_GET(GIC_CONFIG_NUMINTERRUPTS, gicconfig); 957 gic_shared_intrs = (gic_shared_intrs + 1) * 8; 958 959 if (cpu_has_veic) { 960 /* Always use vector 1 in EIC mode */ 961 gic_cpu_pin = 0; 962 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 963 __gic_irq_dispatch); 964 } else { 965 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 966 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 967 gic_irq_dispatch); 968 } 969 970 gic_irq_domain = irq_domain_create_simple(of_fwnode_handle(node), 971 GIC_NUM_LOCAL_INTRS + 972 gic_shared_intrs, 0, 973 &gic_irq_domain_ops, NULL); 974 if (!gic_irq_domain) { 975 pr_err("Failed to add IRQ domain"); 976 return -ENXIO; 977 } 978 979 ret = gic_register_ipi_domain(node); 980 if (ret) 981 return ret; 982 983 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 984 985 /* 986 * Initialise each cluster's GIC shared registers to sane default 987 * values. 988 * Otherwise, the IPI set up will be erased if we move code 989 * to gic_cpu_startup for each cpu. 990 */ 991 nclusters = mips_cps_numclusters(); 992 for (cl = 0; cl < nclusters; cl++) { 993 if (cl == cpu_cluster(¤t_cpu_data)) { 994 for (i = 0; i < gic_shared_intrs; i++) { 995 change_gic_pol(i, GIC_POL_ACTIVE_HIGH); 996 change_gic_trig(i, GIC_TRIG_LEVEL); 997 write_gic_rmask(i); 998 } 999 } else if (mips_cps_numcores(cl) != 0) { 1000 mips_cm_lock_other(cl, 0, 0, CM_GCR_Cx_OTHER_BLOCK_GLOBAL); 1001 for (i = 0; i < gic_shared_intrs; i++) { 1002 change_gic_redir_pol(i, GIC_POL_ACTIVE_HIGH); 1003 change_gic_redir_trig(i, GIC_TRIG_LEVEL); 1004 write_gic_redir_rmask(i); 1005 } 1006 mips_cm_unlock_other(); 1007 1008 } else { 1009 pr_warn("No CPU cores on the cluster %d skip it\n", cl); 1010 } 1011 } 1012 1013 return cpuhp_setup_state(CPUHP_AP_IRQ_MIPS_GIC_STARTING, 1014 "irqchip/mips/gic:starting", 1015 gic_cpu_startup, NULL); 1016 } 1017 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 1018