1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org) 7 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 8 */ 9 #include <linux/bitmap.h> 10 #include <linux/clocksource.h> 11 #include <linux/init.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/irqchip.h> 15 #include <linux/irqchip/mips-gic.h> 16 #include <linux/of_address.h> 17 #include <linux/sched.h> 18 #include <linux/smp.h> 19 20 #include <asm/mips-cm.h> 21 #include <asm/setup.h> 22 #include <asm/traps.h> 23 24 #include <dt-bindings/interrupt-controller/mips-gic.h> 25 26 unsigned int gic_present; 27 28 struct gic_pcpu_mask { 29 DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS); 30 }; 31 32 struct gic_irq_spec { 33 enum { 34 GIC_DEVICE, 35 GIC_IPI 36 } type; 37 38 union { 39 struct cpumask *ipimask; 40 unsigned int hwirq; 41 }; 42 }; 43 44 static unsigned long __gic_base_addr; 45 46 static void __iomem *gic_base; 47 static struct gic_pcpu_mask pcpu_masks[NR_CPUS]; 48 static DEFINE_SPINLOCK(gic_lock); 49 static struct irq_domain *gic_irq_domain; 50 static struct irq_domain *gic_dev_domain; 51 static struct irq_domain *gic_ipi_domain; 52 static int gic_shared_intrs; 53 static int gic_vpes; 54 static unsigned int gic_cpu_pin; 55 static unsigned int timer_cpu_pin; 56 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller; 57 DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS); 58 59 static void __gic_irq_dispatch(void); 60 61 static inline u32 gic_read32(unsigned int reg) 62 { 63 return __raw_readl(gic_base + reg); 64 } 65 66 static inline u64 gic_read64(unsigned int reg) 67 { 68 return __raw_readq(gic_base + reg); 69 } 70 71 static inline unsigned long gic_read(unsigned int reg) 72 { 73 if (!mips_cm_is64) 74 return gic_read32(reg); 75 else 76 return gic_read64(reg); 77 } 78 79 static inline void gic_write32(unsigned int reg, u32 val) 80 { 81 return __raw_writel(val, gic_base + reg); 82 } 83 84 static inline void gic_write64(unsigned int reg, u64 val) 85 { 86 return __raw_writeq(val, gic_base + reg); 87 } 88 89 static inline void gic_write(unsigned int reg, unsigned long val) 90 { 91 if (!mips_cm_is64) 92 return gic_write32(reg, (u32)val); 93 else 94 return gic_write64(reg, (u64)val); 95 } 96 97 static inline void gic_update_bits(unsigned int reg, unsigned long mask, 98 unsigned long val) 99 { 100 unsigned long regval; 101 102 regval = gic_read(reg); 103 regval &= ~mask; 104 regval |= val; 105 gic_write(reg, regval); 106 } 107 108 static inline void gic_reset_mask(unsigned int intr) 109 { 110 gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr), 111 1ul << GIC_INTR_BIT(intr)); 112 } 113 114 static inline void gic_set_mask(unsigned int intr) 115 { 116 gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr), 117 1ul << GIC_INTR_BIT(intr)); 118 } 119 120 static inline void gic_set_polarity(unsigned int intr, unsigned int pol) 121 { 122 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) + 123 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), 124 (unsigned long)pol << GIC_INTR_BIT(intr)); 125 } 126 127 static inline void gic_set_trigger(unsigned int intr, unsigned int trig) 128 { 129 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) + 130 GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr), 131 (unsigned long)trig << GIC_INTR_BIT(intr)); 132 } 133 134 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual) 135 { 136 gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr), 137 1ul << GIC_INTR_BIT(intr), 138 (unsigned long)dual << GIC_INTR_BIT(intr)); 139 } 140 141 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin) 142 { 143 gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) + 144 GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin); 145 } 146 147 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe) 148 { 149 gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) + 150 GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe), 151 GIC_SH_MAP_TO_VPE_REG_BIT(vpe)); 152 } 153 154 #ifdef CONFIG_CLKSRC_MIPS_GIC 155 u64 gic_read_count(void) 156 { 157 unsigned int hi, hi2, lo; 158 159 if (mips_cm_is64) 160 return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER)); 161 162 do { 163 hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 164 lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00)); 165 hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32)); 166 } while (hi2 != hi); 167 168 return (((u64) hi) << 32) + lo; 169 } 170 171 unsigned int gic_get_count_width(void) 172 { 173 unsigned int bits, config; 174 175 config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 176 bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >> 177 GIC_SH_CONFIG_COUNTBITS_SHF); 178 179 return bits; 180 } 181 182 void gic_write_compare(u64 cnt) 183 { 184 if (mips_cm_is64) { 185 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt); 186 } else { 187 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI), 188 (int)(cnt >> 32)); 189 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO), 190 (int)(cnt & 0xffffffff)); 191 } 192 } 193 194 void gic_write_cpu_compare(u64 cnt, int cpu) 195 { 196 unsigned long flags; 197 198 local_irq_save(flags); 199 200 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu)); 201 202 if (mips_cm_is64) { 203 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt); 204 } else { 205 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI), 206 (int)(cnt >> 32)); 207 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO), 208 (int)(cnt & 0xffffffff)); 209 } 210 211 local_irq_restore(flags); 212 } 213 214 u64 gic_read_compare(void) 215 { 216 unsigned int hi, lo; 217 218 if (mips_cm_is64) 219 return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE)); 220 221 hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI)); 222 lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO)); 223 224 return (((u64) hi) << 32) + lo; 225 } 226 227 void gic_start_count(void) 228 { 229 u32 gicconfig; 230 231 /* Start the counter */ 232 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 233 gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF); 234 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); 235 } 236 237 void gic_stop_count(void) 238 { 239 u32 gicconfig; 240 241 /* Stop the counter */ 242 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 243 gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF; 244 gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig); 245 } 246 247 #endif 248 249 unsigned gic_read_local_vp_id(void) 250 { 251 unsigned long ident; 252 253 ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT)); 254 return ident & GIC_VP_IDENT_VCNUM_MSK; 255 } 256 257 static bool gic_local_irq_is_routable(int intr) 258 { 259 u32 vpe_ctl; 260 261 /* All local interrupts are routable in EIC mode. */ 262 if (cpu_has_veic) 263 return true; 264 265 vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL)); 266 switch (intr) { 267 case GIC_LOCAL_INT_TIMER: 268 return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK; 269 case GIC_LOCAL_INT_PERFCTR: 270 return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK; 271 case GIC_LOCAL_INT_FDC: 272 return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK; 273 case GIC_LOCAL_INT_SWINT0: 274 case GIC_LOCAL_INT_SWINT1: 275 return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK; 276 default: 277 return true; 278 } 279 } 280 281 static void gic_bind_eic_interrupt(int irq, int set) 282 { 283 /* Convert irq vector # to hw int # */ 284 irq -= GIC_PIN_TO_VEC_OFFSET; 285 286 /* Set irq to use shadow set */ 287 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) + 288 GIC_VPE_EIC_SS(irq), set); 289 } 290 291 static void gic_send_ipi(struct irq_data *d, unsigned int cpu) 292 { 293 irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d)); 294 295 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq)); 296 } 297 298 int gic_get_c0_compare_int(void) 299 { 300 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) 301 return MIPS_CPU_IRQ_BASE + cp0_compare_irq; 302 return irq_create_mapping(gic_irq_domain, 303 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER)); 304 } 305 306 int gic_get_c0_perfcount_int(void) 307 { 308 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) { 309 /* Is the performance counter shared with the timer? */ 310 if (cp0_perfcount_irq < 0) 311 return -1; 312 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 313 } 314 return irq_create_mapping(gic_irq_domain, 315 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR)); 316 } 317 318 int gic_get_c0_fdc_int(void) 319 { 320 if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) { 321 /* Is the FDC IRQ even present? */ 322 if (cp0_fdc_irq < 0) 323 return -1; 324 return MIPS_CPU_IRQ_BASE + cp0_fdc_irq; 325 } 326 327 return irq_create_mapping(gic_irq_domain, 328 GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC)); 329 } 330 331 int gic_get_usm_range(struct resource *gic_usm_res) 332 { 333 if (!gic_present) 334 return -1; 335 336 gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS; 337 gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1); 338 339 return 0; 340 } 341 342 static void gic_handle_shared_int(bool chained) 343 { 344 unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4; 345 unsigned long *pcpu_mask; 346 unsigned long pending_reg, intrmask_reg; 347 DECLARE_BITMAP(pending, GIC_MAX_INTRS); 348 DECLARE_BITMAP(intrmask, GIC_MAX_INTRS); 349 350 /* Get per-cpu bitmaps */ 351 pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask; 352 353 pending_reg = GIC_REG(SHARED, GIC_SH_PEND); 354 intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK); 355 356 for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) { 357 pending[i] = gic_read(pending_reg); 358 intrmask[i] = gic_read(intrmask_reg); 359 pending_reg += gic_reg_step; 360 intrmask_reg += gic_reg_step; 361 362 if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64) 363 continue; 364 365 pending[i] |= (u64)gic_read(pending_reg) << 32; 366 intrmask[i] |= (u64)gic_read(intrmask_reg) << 32; 367 pending_reg += gic_reg_step; 368 intrmask_reg += gic_reg_step; 369 } 370 371 bitmap_and(pending, pending, intrmask, gic_shared_intrs); 372 bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs); 373 374 for_each_set_bit(intr, pending, gic_shared_intrs) { 375 virq = irq_linear_revmap(gic_irq_domain, 376 GIC_SHARED_TO_HWIRQ(intr)); 377 if (chained) 378 generic_handle_irq(virq); 379 else 380 do_IRQ(virq); 381 } 382 } 383 384 static void gic_mask_irq(struct irq_data *d) 385 { 386 gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); 387 } 388 389 static void gic_unmask_irq(struct irq_data *d) 390 { 391 gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq)); 392 } 393 394 static void gic_ack_irq(struct irq_data *d) 395 { 396 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 397 398 gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq)); 399 } 400 401 static int gic_set_type(struct irq_data *d, unsigned int type) 402 { 403 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 404 unsigned long flags; 405 bool is_edge; 406 407 spin_lock_irqsave(&gic_lock, flags); 408 switch (type & IRQ_TYPE_SENSE_MASK) { 409 case IRQ_TYPE_EDGE_FALLING: 410 gic_set_polarity(irq, GIC_POL_NEG); 411 gic_set_trigger(irq, GIC_TRIG_EDGE); 412 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 413 is_edge = true; 414 break; 415 case IRQ_TYPE_EDGE_RISING: 416 gic_set_polarity(irq, GIC_POL_POS); 417 gic_set_trigger(irq, GIC_TRIG_EDGE); 418 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 419 is_edge = true; 420 break; 421 case IRQ_TYPE_EDGE_BOTH: 422 /* polarity is irrelevant in this case */ 423 gic_set_trigger(irq, GIC_TRIG_EDGE); 424 gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE); 425 is_edge = true; 426 break; 427 case IRQ_TYPE_LEVEL_LOW: 428 gic_set_polarity(irq, GIC_POL_NEG); 429 gic_set_trigger(irq, GIC_TRIG_LEVEL); 430 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 431 is_edge = false; 432 break; 433 case IRQ_TYPE_LEVEL_HIGH: 434 default: 435 gic_set_polarity(irq, GIC_POL_POS); 436 gic_set_trigger(irq, GIC_TRIG_LEVEL); 437 gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE); 438 is_edge = false; 439 break; 440 } 441 442 if (is_edge) 443 irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller, 444 handle_edge_irq, NULL); 445 else 446 irq_set_chip_handler_name_locked(d, &gic_level_irq_controller, 447 handle_level_irq, NULL); 448 spin_unlock_irqrestore(&gic_lock, flags); 449 450 return 0; 451 } 452 453 #ifdef CONFIG_SMP 454 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask, 455 bool force) 456 { 457 unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq); 458 cpumask_t tmp = CPU_MASK_NONE; 459 unsigned long flags; 460 int i; 461 462 cpumask_and(&tmp, cpumask, cpu_online_mask); 463 if (cpumask_empty(&tmp)) 464 return -EINVAL; 465 466 /* Assumption : cpumask refers to a single CPU */ 467 spin_lock_irqsave(&gic_lock, flags); 468 469 /* Re-route this IRQ */ 470 gic_map_to_vpe(irq, mips_cm_vp_id(cpumask_first(&tmp))); 471 472 /* Update the pcpu_masks */ 473 for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 474 clear_bit(irq, pcpu_masks[i].pcpu_mask); 475 set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask); 476 477 cpumask_copy(irq_data_get_affinity_mask(d), cpumask); 478 spin_unlock_irqrestore(&gic_lock, flags); 479 480 return IRQ_SET_MASK_OK_NOCOPY; 481 } 482 #endif 483 484 static struct irq_chip gic_level_irq_controller = { 485 .name = "MIPS GIC", 486 .irq_mask = gic_mask_irq, 487 .irq_unmask = gic_unmask_irq, 488 .irq_set_type = gic_set_type, 489 #ifdef CONFIG_SMP 490 .irq_set_affinity = gic_set_affinity, 491 #endif 492 }; 493 494 static struct irq_chip gic_edge_irq_controller = { 495 .name = "MIPS GIC", 496 .irq_ack = gic_ack_irq, 497 .irq_mask = gic_mask_irq, 498 .irq_unmask = gic_unmask_irq, 499 .irq_set_type = gic_set_type, 500 #ifdef CONFIG_SMP 501 .irq_set_affinity = gic_set_affinity, 502 #endif 503 .ipi_send_single = gic_send_ipi, 504 }; 505 506 static void gic_handle_local_int(bool chained) 507 { 508 unsigned long pending, masked; 509 unsigned int intr, virq; 510 511 pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND)); 512 masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK)); 513 514 bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS); 515 516 for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) { 517 virq = irq_linear_revmap(gic_irq_domain, 518 GIC_LOCAL_TO_HWIRQ(intr)); 519 if (chained) 520 generic_handle_irq(virq); 521 else 522 do_IRQ(virq); 523 } 524 } 525 526 static void gic_mask_local_irq(struct irq_data *d) 527 { 528 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 529 530 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr); 531 } 532 533 static void gic_unmask_local_irq(struct irq_data *d) 534 { 535 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 536 537 gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr); 538 } 539 540 static struct irq_chip gic_local_irq_controller = { 541 .name = "MIPS GIC Local", 542 .irq_mask = gic_mask_local_irq, 543 .irq_unmask = gic_unmask_local_irq, 544 }; 545 546 static void gic_mask_local_irq_all_vpes(struct irq_data *d) 547 { 548 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 549 int i; 550 unsigned long flags; 551 552 spin_lock_irqsave(&gic_lock, flags); 553 for (i = 0; i < gic_vpes; i++) { 554 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 555 mips_cm_vp_id(i)); 556 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr); 557 } 558 spin_unlock_irqrestore(&gic_lock, flags); 559 } 560 561 static void gic_unmask_local_irq_all_vpes(struct irq_data *d) 562 { 563 int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq); 564 int i; 565 unsigned long flags; 566 567 spin_lock_irqsave(&gic_lock, flags); 568 for (i = 0; i < gic_vpes; i++) { 569 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 570 mips_cm_vp_id(i)); 571 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr); 572 } 573 spin_unlock_irqrestore(&gic_lock, flags); 574 } 575 576 static struct irq_chip gic_all_vpes_local_irq_controller = { 577 .name = "MIPS GIC Local", 578 .irq_mask = gic_mask_local_irq_all_vpes, 579 .irq_unmask = gic_unmask_local_irq_all_vpes, 580 }; 581 582 static void __gic_irq_dispatch(void) 583 { 584 gic_handle_local_int(false); 585 gic_handle_shared_int(false); 586 } 587 588 static void gic_irq_dispatch(struct irq_desc *desc) 589 { 590 gic_handle_local_int(true); 591 gic_handle_shared_int(true); 592 } 593 594 static void __init gic_basic_init(void) 595 { 596 unsigned int i; 597 598 board_bind_eic_interrupt = &gic_bind_eic_interrupt; 599 600 /* Setup defaults */ 601 for (i = 0; i < gic_shared_intrs; i++) { 602 gic_set_polarity(i, GIC_POL_POS); 603 gic_set_trigger(i, GIC_TRIG_LEVEL); 604 gic_reset_mask(i); 605 } 606 607 for (i = 0; i < gic_vpes; i++) { 608 unsigned int j; 609 610 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 611 mips_cm_vp_id(i)); 612 for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) { 613 if (!gic_local_irq_is_routable(j)) 614 continue; 615 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j); 616 } 617 } 618 } 619 620 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq, 621 irq_hw_number_t hw) 622 { 623 int intr = GIC_HWIRQ_TO_LOCAL(hw); 624 int ret = 0; 625 int i; 626 unsigned long flags; 627 628 if (!gic_local_irq_is_routable(intr)) 629 return -EPERM; 630 631 spin_lock_irqsave(&gic_lock, flags); 632 for (i = 0; i < gic_vpes; i++) { 633 u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin; 634 635 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 636 mips_cm_vp_id(i)); 637 638 switch (intr) { 639 case GIC_LOCAL_INT_WD: 640 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val); 641 break; 642 case GIC_LOCAL_INT_COMPARE: 643 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), 644 val); 645 break; 646 case GIC_LOCAL_INT_TIMER: 647 /* CONFIG_MIPS_CMP workaround (see __gic_init) */ 648 val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin; 649 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), 650 val); 651 break; 652 case GIC_LOCAL_INT_PERFCTR: 653 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), 654 val); 655 break; 656 case GIC_LOCAL_INT_SWINT0: 657 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), 658 val); 659 break; 660 case GIC_LOCAL_INT_SWINT1: 661 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), 662 val); 663 break; 664 case GIC_LOCAL_INT_FDC: 665 gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val); 666 break; 667 default: 668 pr_err("Invalid local IRQ %d\n", intr); 669 ret = -EINVAL; 670 break; 671 } 672 } 673 spin_unlock_irqrestore(&gic_lock, flags); 674 675 return ret; 676 } 677 678 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq, 679 irq_hw_number_t hw, unsigned int vpe) 680 { 681 int intr = GIC_HWIRQ_TO_SHARED(hw); 682 unsigned long flags; 683 int i; 684 685 spin_lock_irqsave(&gic_lock, flags); 686 gic_map_to_pin(intr, gic_cpu_pin); 687 gic_map_to_vpe(intr, mips_cm_vp_id(vpe)); 688 for (i = 0; i < min(gic_vpes, NR_CPUS); i++) 689 clear_bit(intr, pcpu_masks[i].pcpu_mask); 690 set_bit(intr, pcpu_masks[vpe].pcpu_mask); 691 spin_unlock_irqrestore(&gic_lock, flags); 692 693 return 0; 694 } 695 696 static int gic_setup_dev_chip(struct irq_domain *d, unsigned int virq, 697 unsigned int hwirq) 698 { 699 struct irq_chip *chip; 700 int err; 701 702 if (hwirq >= GIC_SHARED_HWIRQ_BASE) { 703 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 704 &gic_level_irq_controller, 705 NULL); 706 } else { 707 switch (GIC_HWIRQ_TO_LOCAL(hwirq)) { 708 case GIC_LOCAL_INT_TIMER: 709 case GIC_LOCAL_INT_PERFCTR: 710 case GIC_LOCAL_INT_FDC: 711 /* 712 * HACK: These are all really percpu interrupts, but 713 * the rest of the MIPS kernel code does not use the 714 * percpu IRQ API for them. 715 */ 716 chip = &gic_all_vpes_local_irq_controller; 717 irq_set_handler(virq, handle_percpu_irq); 718 break; 719 720 default: 721 chip = &gic_local_irq_controller; 722 irq_set_handler(virq, handle_percpu_devid_irq); 723 irq_set_percpu_devid(virq); 724 break; 725 } 726 727 err = irq_domain_set_hwirq_and_chip(d, virq, hwirq, 728 chip, NULL); 729 } 730 731 return err; 732 } 733 734 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq, 735 unsigned int nr_irqs, void *arg) 736 { 737 struct gic_irq_spec *spec = arg; 738 irq_hw_number_t hwirq, base_hwirq; 739 int cpu, ret, i; 740 741 if (spec->type == GIC_DEVICE) { 742 /* verify that shared irqs don't conflict with an IPI irq */ 743 if ((spec->hwirq >= GIC_SHARED_HWIRQ_BASE) && 744 test_bit(GIC_HWIRQ_TO_SHARED(spec->hwirq), ipi_resrv)) 745 return -EBUSY; 746 747 return gic_setup_dev_chip(d, virq, spec->hwirq); 748 } else { 749 base_hwirq = find_first_bit(ipi_resrv, gic_shared_intrs); 750 if (base_hwirq == gic_shared_intrs) { 751 return -ENOMEM; 752 } 753 754 /* check that we have enough space */ 755 for (i = base_hwirq; i < nr_irqs; i++) { 756 if (!test_bit(i, ipi_resrv)) 757 return -EBUSY; 758 } 759 bitmap_clear(ipi_resrv, base_hwirq, nr_irqs); 760 761 /* map the hwirq for each cpu consecutively */ 762 i = 0; 763 for_each_cpu(cpu, spec->ipimask) { 764 hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i); 765 766 ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq, 767 &gic_level_irq_controller, 768 NULL); 769 if (ret) 770 goto error; 771 772 irq_set_handler(virq + i, handle_level_irq); 773 774 ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu); 775 if (ret) 776 goto error; 777 778 i++; 779 } 780 781 /* 782 * tell the parent about the base hwirq we allocated so it can 783 * set its own domain data 784 */ 785 spec->hwirq = base_hwirq; 786 } 787 788 return 0; 789 error: 790 bitmap_set(ipi_resrv, base_hwirq, nr_irqs); 791 return ret; 792 } 793 794 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq, 795 unsigned int nr_irqs) 796 { 797 irq_hw_number_t base_hwirq; 798 struct irq_data *data; 799 800 data = irq_get_irq_data(virq); 801 if (!data) 802 return; 803 804 base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data)); 805 bitmap_set(ipi_resrv, base_hwirq, nr_irqs); 806 } 807 808 int gic_irq_domain_match(struct irq_domain *d, struct device_node *node, 809 enum irq_domain_bus_token bus_token) 810 { 811 /* this domain should'nt be accessed directly */ 812 return 0; 813 } 814 815 static const struct irq_domain_ops gic_irq_domain_ops = { 816 .alloc = gic_irq_domain_alloc, 817 .free = gic_irq_domain_free, 818 .match = gic_irq_domain_match, 819 }; 820 821 static int gic_dev_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 822 const u32 *intspec, unsigned int intsize, 823 irq_hw_number_t *out_hwirq, 824 unsigned int *out_type) 825 { 826 if (intsize != 3) 827 return -EINVAL; 828 829 if (intspec[0] == GIC_SHARED) 830 *out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]); 831 else if (intspec[0] == GIC_LOCAL) 832 *out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]); 833 else 834 return -EINVAL; 835 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 836 837 return 0; 838 } 839 840 static int gic_dev_domain_alloc(struct irq_domain *d, unsigned int virq, 841 unsigned int nr_irqs, void *arg) 842 { 843 struct irq_fwspec *fwspec = arg; 844 struct gic_irq_spec spec = { 845 .type = GIC_DEVICE, 846 }; 847 int i, ret; 848 849 if (fwspec->param[0] == GIC_SHARED) 850 spec.hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]); 851 else 852 spec.hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]); 853 854 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); 855 if (ret) 856 return ret; 857 858 for (i = 0; i < nr_irqs; i++) { 859 ret = gic_setup_dev_chip(d, virq + i, spec.hwirq + i); 860 if (ret) 861 goto error; 862 } 863 864 return 0; 865 866 error: 867 irq_domain_free_irqs_parent(d, virq, nr_irqs); 868 return ret; 869 } 870 871 void gic_dev_domain_free(struct irq_domain *d, unsigned int virq, 872 unsigned int nr_irqs) 873 { 874 /* no real allocation is done for dev irqs, so no need to free anything */ 875 return; 876 } 877 878 static void gic_dev_domain_activate(struct irq_domain *domain, 879 struct irq_data *d) 880 { 881 if (GIC_HWIRQ_TO_LOCAL(d->hwirq) < GIC_NUM_LOCAL_INTRS) 882 gic_local_irq_domain_map(domain, d->irq, d->hwirq); 883 else 884 gic_shared_irq_domain_map(domain, d->irq, d->hwirq, 0); 885 } 886 887 static struct irq_domain_ops gic_dev_domain_ops = { 888 .xlate = gic_dev_domain_xlate, 889 .alloc = gic_dev_domain_alloc, 890 .free = gic_dev_domain_free, 891 .activate = gic_dev_domain_activate, 892 }; 893 894 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr, 895 const u32 *intspec, unsigned int intsize, 896 irq_hw_number_t *out_hwirq, 897 unsigned int *out_type) 898 { 899 /* 900 * There's nothing to translate here. hwirq is dynamically allocated and 901 * the irq type is always edge triggered. 902 * */ 903 *out_hwirq = 0; 904 *out_type = IRQ_TYPE_EDGE_RISING; 905 906 return 0; 907 } 908 909 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq, 910 unsigned int nr_irqs, void *arg) 911 { 912 struct cpumask *ipimask = arg; 913 struct gic_irq_spec spec = { 914 .type = GIC_IPI, 915 .ipimask = ipimask 916 }; 917 int ret, i; 918 919 ret = irq_domain_alloc_irqs_parent(d, virq, nr_irqs, &spec); 920 if (ret) 921 return ret; 922 923 /* the parent should have set spec.hwirq to the base_hwirq it allocated */ 924 for (i = 0; i < nr_irqs; i++) { 925 ret = irq_domain_set_hwirq_and_chip(d, virq + i, 926 GIC_SHARED_TO_HWIRQ(spec.hwirq + i), 927 &gic_edge_irq_controller, 928 NULL); 929 if (ret) 930 goto error; 931 932 ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING); 933 if (ret) 934 goto error; 935 } 936 937 return 0; 938 error: 939 irq_domain_free_irqs_parent(d, virq, nr_irqs); 940 return ret; 941 } 942 943 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq, 944 unsigned int nr_irqs) 945 { 946 irq_domain_free_irqs_parent(d, virq, nr_irqs); 947 } 948 949 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node, 950 enum irq_domain_bus_token bus_token) 951 { 952 bool is_ipi; 953 954 switch (bus_token) { 955 case DOMAIN_BUS_IPI: 956 is_ipi = d->bus_token == bus_token; 957 return (!node || to_of_node(d->fwnode) == node) && is_ipi; 958 break; 959 default: 960 return 0; 961 } 962 } 963 964 static struct irq_domain_ops gic_ipi_domain_ops = { 965 .xlate = gic_ipi_domain_xlate, 966 .alloc = gic_ipi_domain_alloc, 967 .free = gic_ipi_domain_free, 968 .match = gic_ipi_domain_match, 969 }; 970 971 static void __init gic_map_single_int(struct device_node *node, 972 unsigned int irq) 973 { 974 unsigned int linux_irq; 975 struct irq_fwspec local_int_fwspec = { 976 .fwnode = &node->fwnode, 977 .param_count = 3, 978 .param = { 979 [0] = GIC_LOCAL, 980 [1] = irq, 981 [2] = IRQ_TYPE_NONE, 982 }, 983 }; 984 985 if (!gic_local_irq_is_routable(irq)) 986 return; 987 988 linux_irq = irq_create_fwspec_mapping(&local_int_fwspec); 989 WARN_ON(!linux_irq); 990 } 991 992 static void __init gic_map_interrupts(struct device_node *node) 993 { 994 gic_map_single_int(node, GIC_LOCAL_INT_WD); 995 gic_map_single_int(node, GIC_LOCAL_INT_COMPARE); 996 gic_map_single_int(node, GIC_LOCAL_INT_TIMER); 997 gic_map_single_int(node, GIC_LOCAL_INT_PERFCTR); 998 gic_map_single_int(node, GIC_LOCAL_INT_SWINT0); 999 gic_map_single_int(node, GIC_LOCAL_INT_SWINT1); 1000 gic_map_single_int(node, GIC_LOCAL_INT_FDC); 1001 } 1002 1003 static void __init __gic_init(unsigned long gic_base_addr, 1004 unsigned long gic_addrspace_size, 1005 unsigned int cpu_vec, unsigned int irqbase, 1006 struct device_node *node) 1007 { 1008 unsigned int gicconfig, cpu; 1009 unsigned int v[2]; 1010 1011 __gic_base_addr = gic_base_addr; 1012 1013 gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size); 1014 1015 gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG)); 1016 gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >> 1017 GIC_SH_CONFIG_NUMINTRS_SHF; 1018 gic_shared_intrs = ((gic_shared_intrs + 1) * 8); 1019 1020 gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >> 1021 GIC_SH_CONFIG_NUMVPES_SHF; 1022 gic_vpes = gic_vpes + 1; 1023 1024 if (cpu_has_veic) { 1025 /* Set EIC mode for all VPEs */ 1026 for_each_present_cpu(cpu) { 1027 gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), 1028 mips_cm_vp_id(cpu)); 1029 gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL), 1030 GIC_VPE_CTL_EIC_MODE_MSK); 1031 } 1032 1033 /* Always use vector 1 in EIC mode */ 1034 gic_cpu_pin = 0; 1035 timer_cpu_pin = gic_cpu_pin; 1036 set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET, 1037 __gic_irq_dispatch); 1038 } else { 1039 gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET; 1040 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec, 1041 gic_irq_dispatch); 1042 /* 1043 * With the CMP implementation of SMP (deprecated), other CPUs 1044 * are started by the bootloader and put into a timer based 1045 * waiting poll loop. We must not re-route those CPU's local 1046 * timer interrupts as the wait instruction will never finish, 1047 * so just handle whatever CPU interrupt it is routed to by 1048 * default. 1049 * 1050 * This workaround should be removed when CMP support is 1051 * dropped. 1052 */ 1053 if (IS_ENABLED(CONFIG_MIPS_CMP) && 1054 gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) { 1055 timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL, 1056 GIC_VPE_TIMER_MAP)) & 1057 GIC_MAP_MSK; 1058 irq_set_chained_handler(MIPS_CPU_IRQ_BASE + 1059 GIC_CPU_PIN_OFFSET + 1060 timer_cpu_pin, 1061 gic_irq_dispatch); 1062 } else { 1063 timer_cpu_pin = gic_cpu_pin; 1064 } 1065 } 1066 1067 gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS + 1068 gic_shared_intrs, irqbase, 1069 &gic_irq_domain_ops, NULL); 1070 if (!gic_irq_domain) 1071 panic("Failed to add GIC IRQ domain"); 1072 gic_irq_domain->name = "mips-gic-irq"; 1073 1074 gic_dev_domain = irq_domain_add_hierarchy(gic_irq_domain, 0, 1075 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 1076 node, &gic_dev_domain_ops, NULL); 1077 if (!gic_dev_domain) 1078 panic("Failed to add GIC DEV domain"); 1079 gic_dev_domain->name = "mips-gic-dev"; 1080 1081 gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain, 1082 IRQ_DOMAIN_FLAG_IPI_PER_CPU, 1083 GIC_NUM_LOCAL_INTRS + gic_shared_intrs, 1084 node, &gic_ipi_domain_ops, NULL); 1085 if (!gic_ipi_domain) 1086 panic("Failed to add GIC IPI domain"); 1087 1088 gic_ipi_domain->name = "mips-gic-ipi"; 1089 gic_ipi_domain->bus_token = DOMAIN_BUS_IPI; 1090 1091 if (node && 1092 !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) { 1093 bitmap_set(ipi_resrv, v[0], v[1]); 1094 } else { 1095 /* Make the last 2 * gic_vpes available for IPIs */ 1096 bitmap_set(ipi_resrv, 1097 gic_shared_intrs - 2 * gic_vpes, 1098 2 * gic_vpes); 1099 } 1100 1101 gic_basic_init(); 1102 gic_map_interrupts(node); 1103 } 1104 1105 void __init gic_init(unsigned long gic_base_addr, 1106 unsigned long gic_addrspace_size, 1107 unsigned int cpu_vec, unsigned int irqbase) 1108 { 1109 __gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL); 1110 } 1111 1112 static int __init gic_of_init(struct device_node *node, 1113 struct device_node *parent) 1114 { 1115 struct resource res; 1116 unsigned int cpu_vec, i = 0, reserved = 0; 1117 phys_addr_t gic_base; 1118 size_t gic_len; 1119 1120 /* Find the first available CPU vector. */ 1121 while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors", 1122 i++, &cpu_vec)) 1123 reserved |= BIT(cpu_vec); 1124 for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) { 1125 if (!(reserved & BIT(cpu_vec))) 1126 break; 1127 } 1128 if (cpu_vec == 8) { 1129 pr_err("No CPU vectors available for GIC\n"); 1130 return -ENODEV; 1131 } 1132 1133 if (of_address_to_resource(node, 0, &res)) { 1134 /* 1135 * Probe the CM for the GIC base address if not specified 1136 * in the device-tree. 1137 */ 1138 if (mips_cm_present()) { 1139 gic_base = read_gcr_gic_base() & 1140 ~CM_GCR_GIC_BASE_GICEN_MSK; 1141 gic_len = 0x20000; 1142 } else { 1143 pr_err("Failed to get GIC memory range\n"); 1144 return -ENODEV; 1145 } 1146 } else { 1147 gic_base = res.start; 1148 gic_len = resource_size(&res); 1149 } 1150 1151 if (mips_cm_present()) 1152 write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK); 1153 gic_present = true; 1154 1155 __gic_init(gic_base, gic_len, cpu_vec, 0, node); 1156 1157 return 0; 1158 } 1159 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init); 1160