1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #define pr_fmt(fmt) "GICv3: " fmt 8 9 #include <linux/acpi.h> 10 #include <linux/cpu.h> 11 #include <linux/cpu_pm.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/irqdomain.h> 15 #include <linux/kstrtox.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <linux/percpu.h> 20 #include <linux/refcount.h> 21 #include <linux/slab.h> 22 23 #include <linux/irqchip.h> 24 #include <linux/irqchip/arm-gic-common.h> 25 #include <linux/irqchip/arm-gic-v3.h> 26 #include <linux/irqchip/irq-partition-percpu.h> 27 28 #include <asm/cputype.h> 29 #include <asm/exception.h> 30 #include <asm/smp_plat.h> 31 #include <asm/virt.h> 32 33 #include "irq-gic-common.h" 34 35 #define GICD_INT_NMI_PRI (GICD_INT_DEF_PRI & ~0x80) 36 37 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) 38 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) 39 40 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) 41 42 struct redist_region { 43 void __iomem *redist_base; 44 phys_addr_t phys_base; 45 bool single_redist; 46 }; 47 48 struct gic_chip_data { 49 struct fwnode_handle *fwnode; 50 void __iomem *dist_base; 51 struct redist_region *redist_regions; 52 struct rdists rdists; 53 struct irq_domain *domain; 54 u64 redist_stride; 55 u32 nr_redist_regions; 56 u64 flags; 57 bool has_rss; 58 unsigned int ppi_nr; 59 struct partition_desc **ppi_descs; 60 }; 61 62 static struct gic_chip_data gic_data __read_mostly; 63 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); 64 65 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) 66 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) 67 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) 68 69 /* 70 * The behaviours of RPR and PMR registers differ depending on the value of 71 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the 72 * distributor and redistributors depends on whether security is enabled in the 73 * GIC. 74 * 75 * When security is enabled, non-secure priority values from the (re)distributor 76 * are presented to the GIC CPUIF as follow: 77 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; 78 * 79 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure 80 * EL1 are subject to a similar operation thus matching the priorities presented 81 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, 82 * these values are unchanged by the GIC. 83 * 84 * see GICv3/GICv4 Architecture Specification (IHI0069D): 85 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt 86 * priorities. 87 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 88 * interrupt. 89 */ 90 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); 91 92 DEFINE_STATIC_KEY_FALSE(gic_nonsecure_priorities); 93 EXPORT_SYMBOL(gic_nonsecure_priorities); 94 95 /* 96 * When the Non-secure world has access to group 0 interrupts (as a 97 * consequence of SCR_EL3.FIQ == 0), reading the ICC_RPR_EL1 register will 98 * return the Distributor's view of the interrupt priority. 99 * 100 * When GIC security is enabled (GICD_CTLR.DS == 0), the interrupt priority 101 * written by software is moved to the Non-secure range by the Distributor. 102 * 103 * If both are true (which is when gic_nonsecure_priorities gets enabled), 104 * we need to shift down the priority programmed by software to match it 105 * against the value returned by ICC_RPR_EL1. 106 */ 107 #define GICD_INT_RPR_PRI(priority) \ 108 ({ \ 109 u32 __priority = (priority); \ 110 if (static_branch_unlikely(&gic_nonsecure_priorities)) \ 111 __priority = 0x80 | (__priority >> 1); \ 112 \ 113 __priority; \ 114 }) 115 116 /* ppi_nmi_refs[n] == number of cpus having ppi[n + 16] set as NMI */ 117 static refcount_t *ppi_nmi_refs; 118 119 static struct gic_kvm_info gic_v3_kvm_info __initdata; 120 static DEFINE_PER_CPU(bool, has_rss); 121 122 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) 123 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) 124 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 125 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) 126 127 /* Our default, arbitrary priority value. Linux only uses one anyway. */ 128 #define DEFAULT_PMR_VALUE 0xf0 129 130 enum gic_intid_range { 131 SGI_RANGE, 132 PPI_RANGE, 133 SPI_RANGE, 134 EPPI_RANGE, 135 ESPI_RANGE, 136 LPI_RANGE, 137 __INVALID_RANGE__ 138 }; 139 140 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) 141 { 142 switch (hwirq) { 143 case 0 ... 15: 144 return SGI_RANGE; 145 case 16 ... 31: 146 return PPI_RANGE; 147 case 32 ... 1019: 148 return SPI_RANGE; 149 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): 150 return EPPI_RANGE; 151 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): 152 return ESPI_RANGE; 153 case 8192 ... GENMASK(23, 0): 154 return LPI_RANGE; 155 default: 156 return __INVALID_RANGE__; 157 } 158 } 159 160 static enum gic_intid_range get_intid_range(struct irq_data *d) 161 { 162 return __get_intid_range(d->hwirq); 163 } 164 165 static inline unsigned int gic_irq(struct irq_data *d) 166 { 167 return d->hwirq; 168 } 169 170 static inline bool gic_irq_in_rdist(struct irq_data *d) 171 { 172 switch (get_intid_range(d)) { 173 case SGI_RANGE: 174 case PPI_RANGE: 175 case EPPI_RANGE: 176 return true; 177 default: 178 return false; 179 } 180 } 181 182 static inline void __iomem *gic_dist_base(struct irq_data *d) 183 { 184 switch (get_intid_range(d)) { 185 case SGI_RANGE: 186 case PPI_RANGE: 187 case EPPI_RANGE: 188 /* SGI+PPI -> SGI_base for this CPU */ 189 return gic_data_rdist_sgi_base(); 190 191 case SPI_RANGE: 192 case ESPI_RANGE: 193 /* SPI -> dist_base */ 194 return gic_data.dist_base; 195 196 default: 197 return NULL; 198 } 199 } 200 201 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) 202 { 203 u32 count = 1000000; /* 1s! */ 204 205 while (readl_relaxed(base + GICD_CTLR) & bit) { 206 count--; 207 if (!count) { 208 pr_err_ratelimited("RWP timeout, gone fishing\n"); 209 return; 210 } 211 cpu_relax(); 212 udelay(1); 213 } 214 } 215 216 /* Wait for completion of a distributor change */ 217 static void gic_dist_wait_for_rwp(void) 218 { 219 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); 220 } 221 222 /* Wait for completion of a redistributor change */ 223 static void gic_redist_wait_for_rwp(void) 224 { 225 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); 226 } 227 228 #ifdef CONFIG_ARM64 229 230 static u64 __maybe_unused gic_read_iar(void) 231 { 232 if (cpus_have_const_cap(ARM64_WORKAROUND_CAVIUM_23154)) 233 return gic_read_iar_cavium_thunderx(); 234 else 235 return gic_read_iar_common(); 236 } 237 #endif 238 239 static void gic_enable_redist(bool enable) 240 { 241 void __iomem *rbase; 242 u32 count = 1000000; /* 1s! */ 243 u32 val; 244 245 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) 246 return; 247 248 rbase = gic_data_rdist_rd_base(); 249 250 val = readl_relaxed(rbase + GICR_WAKER); 251 if (enable) 252 /* Wake up this CPU redistributor */ 253 val &= ~GICR_WAKER_ProcessorSleep; 254 else 255 val |= GICR_WAKER_ProcessorSleep; 256 writel_relaxed(val, rbase + GICR_WAKER); 257 258 if (!enable) { /* Check that GICR_WAKER is writeable */ 259 val = readl_relaxed(rbase + GICR_WAKER); 260 if (!(val & GICR_WAKER_ProcessorSleep)) 261 return; /* No PM support in this redistributor */ 262 } 263 264 while (--count) { 265 val = readl_relaxed(rbase + GICR_WAKER); 266 if (enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep)) 267 break; 268 cpu_relax(); 269 udelay(1); 270 } 271 if (!count) 272 pr_err_ratelimited("redistributor failed to %s...\n", 273 enable ? "wakeup" : "sleep"); 274 } 275 276 /* 277 * Routines to disable, enable, EOI and route interrupts 278 */ 279 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) 280 { 281 switch (get_intid_range(d)) { 282 case SGI_RANGE: 283 case PPI_RANGE: 284 case SPI_RANGE: 285 *index = d->hwirq; 286 return offset; 287 case EPPI_RANGE: 288 /* 289 * Contrary to the ESPI range, the EPPI range is contiguous 290 * to the PPI range in the registers, so let's adjust the 291 * displacement accordingly. Consistency is overrated. 292 */ 293 *index = d->hwirq - EPPI_BASE_INTID + 32; 294 return offset; 295 case ESPI_RANGE: 296 *index = d->hwirq - ESPI_BASE_INTID; 297 switch (offset) { 298 case GICD_ISENABLER: 299 return GICD_ISENABLERnE; 300 case GICD_ICENABLER: 301 return GICD_ICENABLERnE; 302 case GICD_ISPENDR: 303 return GICD_ISPENDRnE; 304 case GICD_ICPENDR: 305 return GICD_ICPENDRnE; 306 case GICD_ISACTIVER: 307 return GICD_ISACTIVERnE; 308 case GICD_ICACTIVER: 309 return GICD_ICACTIVERnE; 310 case GICD_IPRIORITYR: 311 return GICD_IPRIORITYRnE; 312 case GICD_ICFGR: 313 return GICD_ICFGRnE; 314 case GICD_IROUTER: 315 return GICD_IROUTERnE; 316 default: 317 break; 318 } 319 break; 320 default: 321 break; 322 } 323 324 WARN_ON(1); 325 *index = d->hwirq; 326 return offset; 327 } 328 329 static int gic_peek_irq(struct irq_data *d, u32 offset) 330 { 331 void __iomem *base; 332 u32 index, mask; 333 334 offset = convert_offset_index(d, offset, &index); 335 mask = 1 << (index % 32); 336 337 if (gic_irq_in_rdist(d)) 338 base = gic_data_rdist_sgi_base(); 339 else 340 base = gic_data.dist_base; 341 342 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); 343 } 344 345 static void gic_poke_irq(struct irq_data *d, u32 offset) 346 { 347 void __iomem *base; 348 u32 index, mask; 349 350 offset = convert_offset_index(d, offset, &index); 351 mask = 1 << (index % 32); 352 353 if (gic_irq_in_rdist(d)) 354 base = gic_data_rdist_sgi_base(); 355 else 356 base = gic_data.dist_base; 357 358 writel_relaxed(mask, base + offset + (index / 32) * 4); 359 } 360 361 static void gic_mask_irq(struct irq_data *d) 362 { 363 gic_poke_irq(d, GICD_ICENABLER); 364 if (gic_irq_in_rdist(d)) 365 gic_redist_wait_for_rwp(); 366 else 367 gic_dist_wait_for_rwp(); 368 } 369 370 static void gic_eoimode1_mask_irq(struct irq_data *d) 371 { 372 gic_mask_irq(d); 373 /* 374 * When masking a forwarded interrupt, make sure it is 375 * deactivated as well. 376 * 377 * This ensures that an interrupt that is getting 378 * disabled/masked will not get "stuck", because there is 379 * noone to deactivate it (guest is being terminated). 380 */ 381 if (irqd_is_forwarded_to_vcpu(d)) 382 gic_poke_irq(d, GICD_ICACTIVER); 383 } 384 385 static void gic_unmask_irq(struct irq_data *d) 386 { 387 gic_poke_irq(d, GICD_ISENABLER); 388 } 389 390 static inline bool gic_supports_nmi(void) 391 { 392 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && 393 static_branch_likely(&supports_pseudo_nmis); 394 } 395 396 static int gic_irq_set_irqchip_state(struct irq_data *d, 397 enum irqchip_irq_state which, bool val) 398 { 399 u32 reg; 400 401 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ 402 return -EINVAL; 403 404 switch (which) { 405 case IRQCHIP_STATE_PENDING: 406 reg = val ? GICD_ISPENDR : GICD_ICPENDR; 407 break; 408 409 case IRQCHIP_STATE_ACTIVE: 410 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; 411 break; 412 413 case IRQCHIP_STATE_MASKED: 414 if (val) { 415 gic_mask_irq(d); 416 return 0; 417 } 418 reg = GICD_ISENABLER; 419 break; 420 421 default: 422 return -EINVAL; 423 } 424 425 gic_poke_irq(d, reg); 426 return 0; 427 } 428 429 static int gic_irq_get_irqchip_state(struct irq_data *d, 430 enum irqchip_irq_state which, bool *val) 431 { 432 if (d->hwirq >= 8192) /* PPI/SPI only */ 433 return -EINVAL; 434 435 switch (which) { 436 case IRQCHIP_STATE_PENDING: 437 *val = gic_peek_irq(d, GICD_ISPENDR); 438 break; 439 440 case IRQCHIP_STATE_ACTIVE: 441 *val = gic_peek_irq(d, GICD_ISACTIVER); 442 break; 443 444 case IRQCHIP_STATE_MASKED: 445 *val = !gic_peek_irq(d, GICD_ISENABLER); 446 break; 447 448 default: 449 return -EINVAL; 450 } 451 452 return 0; 453 } 454 455 static void gic_irq_set_prio(struct irq_data *d, u8 prio) 456 { 457 void __iomem *base = gic_dist_base(d); 458 u32 offset, index; 459 460 offset = convert_offset_index(d, GICD_IPRIORITYR, &index); 461 462 writeb_relaxed(prio, base + offset + index); 463 } 464 465 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) 466 { 467 switch (__get_intid_range(hwirq)) { 468 case PPI_RANGE: 469 return hwirq - 16; 470 case EPPI_RANGE: 471 return hwirq - EPPI_BASE_INTID + 16; 472 default: 473 unreachable(); 474 } 475 } 476 477 static u32 gic_get_ppi_index(struct irq_data *d) 478 { 479 return __gic_get_ppi_index(d->hwirq); 480 } 481 482 static int gic_irq_nmi_setup(struct irq_data *d) 483 { 484 struct irq_desc *desc = irq_to_desc(d->irq); 485 486 if (!gic_supports_nmi()) 487 return -EINVAL; 488 489 if (gic_peek_irq(d, GICD_ISENABLER)) { 490 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 491 return -EINVAL; 492 } 493 494 /* 495 * A secondary irq_chip should be in charge of LPI request, 496 * it should not be possible to get there 497 */ 498 if (WARN_ON(gic_irq(d) >= 8192)) 499 return -EINVAL; 500 501 /* desc lock should already be held */ 502 if (gic_irq_in_rdist(d)) { 503 u32 idx = gic_get_ppi_index(d); 504 505 /* Setting up PPI as NMI, only switch handler for first NMI */ 506 if (!refcount_inc_not_zero(&ppi_nmi_refs[idx])) { 507 refcount_set(&ppi_nmi_refs[idx], 1); 508 desc->handle_irq = handle_percpu_devid_fasteoi_nmi; 509 } 510 } else { 511 desc->handle_irq = handle_fasteoi_nmi; 512 } 513 514 gic_irq_set_prio(d, GICD_INT_NMI_PRI); 515 516 return 0; 517 } 518 519 static void gic_irq_nmi_teardown(struct irq_data *d) 520 { 521 struct irq_desc *desc = irq_to_desc(d->irq); 522 523 if (WARN_ON(!gic_supports_nmi())) 524 return; 525 526 if (gic_peek_irq(d, GICD_ISENABLER)) { 527 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 528 return; 529 } 530 531 /* 532 * A secondary irq_chip should be in charge of LPI request, 533 * it should not be possible to get there 534 */ 535 if (WARN_ON(gic_irq(d) >= 8192)) 536 return; 537 538 /* desc lock should already be held */ 539 if (gic_irq_in_rdist(d)) { 540 u32 idx = gic_get_ppi_index(d); 541 542 /* Tearing down NMI, only switch handler for last NMI */ 543 if (refcount_dec_and_test(&ppi_nmi_refs[idx])) 544 desc->handle_irq = handle_percpu_devid_irq; 545 } else { 546 desc->handle_irq = handle_fasteoi_irq; 547 } 548 549 gic_irq_set_prio(d, GICD_INT_DEF_PRI); 550 } 551 552 static void gic_eoi_irq(struct irq_data *d) 553 { 554 write_gicreg(gic_irq(d), ICC_EOIR1_EL1); 555 isb(); 556 } 557 558 static void gic_eoimode1_eoi_irq(struct irq_data *d) 559 { 560 /* 561 * No need to deactivate an LPI, or an interrupt that 562 * is is getting forwarded to a vcpu. 563 */ 564 if (gic_irq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) 565 return; 566 gic_write_dir(gic_irq(d)); 567 } 568 569 static int gic_set_type(struct irq_data *d, unsigned int type) 570 { 571 enum gic_intid_range range; 572 unsigned int irq = gic_irq(d); 573 void __iomem *base; 574 u32 offset, index; 575 int ret; 576 577 range = get_intid_range(d); 578 579 /* Interrupt configuration for SGIs can't be changed */ 580 if (range == SGI_RANGE) 581 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; 582 583 /* SPIs have restrictions on the supported types */ 584 if ((range == SPI_RANGE || range == ESPI_RANGE) && 585 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) 586 return -EINVAL; 587 588 if (gic_irq_in_rdist(d)) 589 base = gic_data_rdist_sgi_base(); 590 else 591 base = gic_data.dist_base; 592 593 offset = convert_offset_index(d, GICD_ICFGR, &index); 594 595 ret = gic_configure_irq(index, type, base + offset, NULL); 596 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { 597 /* Misconfigured PPIs are usually not fatal */ 598 pr_warn("GIC: PPI INTID%d is secure or misconfigured\n", irq); 599 ret = 0; 600 } 601 602 return ret; 603 } 604 605 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 606 { 607 if (get_intid_range(d) == SGI_RANGE) 608 return -EINVAL; 609 610 if (vcpu) 611 irqd_set_forwarded_to_vcpu(d); 612 else 613 irqd_clr_forwarded_to_vcpu(d); 614 return 0; 615 } 616 617 static u64 gic_mpidr_to_affinity(unsigned long mpidr) 618 { 619 u64 aff; 620 621 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | 622 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 623 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 624 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 625 626 return aff; 627 } 628 629 static void gic_deactivate_unhandled(u32 irqnr) 630 { 631 if (static_branch_likely(&supports_deactivate_key)) { 632 if (irqnr < 8192) 633 gic_write_dir(irqnr); 634 } else { 635 write_gicreg(irqnr, ICC_EOIR1_EL1); 636 isb(); 637 } 638 } 639 640 /* 641 * Follow a read of the IAR with any HW maintenance that needs to happen prior 642 * to invoking the relevant IRQ handler. We must do two things: 643 * 644 * (1) Ensure instruction ordering between a read of IAR and subsequent 645 * instructions in the IRQ handler using an ISB. 646 * 647 * It is possible for the IAR to report an IRQ which was signalled *after* 648 * the CPU took an IRQ exception as multiple interrupts can race to be 649 * recognized by the GIC, earlier interrupts could be withdrawn, and/or 650 * later interrupts could be prioritized by the GIC. 651 * 652 * For devices which are tightly coupled to the CPU, such as PMUs, a 653 * context synchronization event is necessary to ensure that system 654 * register state is not stale, as these may have been indirectly written 655 * *after* exception entry. 656 * 657 * (2) Deactivate the interrupt when EOI mode 1 is in use. 658 */ 659 static inline void gic_complete_ack(u32 irqnr) 660 { 661 if (static_branch_likely(&supports_deactivate_key)) 662 write_gicreg(irqnr, ICC_EOIR1_EL1); 663 664 isb(); 665 } 666 667 static bool gic_rpr_is_nmi_prio(void) 668 { 669 if (!gic_supports_nmi()) 670 return false; 671 672 return unlikely(gic_read_rpr() == GICD_INT_RPR_PRI(GICD_INT_NMI_PRI)); 673 } 674 675 static bool gic_irqnr_is_special(u32 irqnr) 676 { 677 return irqnr >= 1020 && irqnr <= 1023; 678 } 679 680 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) 681 { 682 if (gic_irqnr_is_special(irqnr)) 683 return; 684 685 gic_complete_ack(irqnr); 686 687 if (generic_handle_domain_irq(gic_data.domain, irqnr)) { 688 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); 689 gic_deactivate_unhandled(irqnr); 690 } 691 } 692 693 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) 694 { 695 if (gic_irqnr_is_special(irqnr)) 696 return; 697 698 gic_complete_ack(irqnr); 699 700 if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { 701 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); 702 gic_deactivate_unhandled(irqnr); 703 } 704 } 705 706 /* 707 * An exception has been taken from a context with IRQs enabled, and this could 708 * be an IRQ or an NMI. 709 * 710 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear 711 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, 712 * after handling any NMI but before handling any IRQ. 713 * 714 * The entry code has performed IRQ entry, and if an NMI is detected we must 715 * perform NMI entry/exit around invoking the handler. 716 */ 717 static void __gic_handle_irq_from_irqson(struct pt_regs *regs) 718 { 719 bool is_nmi; 720 u32 irqnr; 721 722 irqnr = gic_read_iar(); 723 724 is_nmi = gic_rpr_is_nmi_prio(); 725 726 if (is_nmi) { 727 nmi_enter(); 728 __gic_handle_nmi(irqnr, regs); 729 nmi_exit(); 730 } 731 732 if (gic_prio_masking_enabled()) { 733 gic_pmr_mask_irqs(); 734 gic_arch_enable_irqs(); 735 } 736 737 if (!is_nmi) 738 __gic_handle_irq(irqnr, regs); 739 } 740 741 /* 742 * An exception has been taken from a context with IRQs disabled, which can only 743 * be an NMI. 744 * 745 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave 746 * DAIF.IF (and ICC_PMR_EL1) unchanged. 747 * 748 * The entry code has performed NMI entry. 749 */ 750 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) 751 { 752 u64 pmr; 753 u32 irqnr; 754 755 /* 756 * We were in a context with IRQs disabled. However, the 757 * entry code has set PMR to a value that allows any 758 * interrupt to be acknowledged, and not just NMIs. This can 759 * lead to surprising effects if the NMI has been retired in 760 * the meantime, and that there is an IRQ pending. The IRQ 761 * would then be taken in NMI context, something that nobody 762 * wants to debug twice. 763 * 764 * Until we sort this, drop PMR again to a level that will 765 * actually only allow NMIs before reading IAR, and then 766 * restore it to what it was. 767 */ 768 pmr = gic_read_pmr(); 769 gic_pmr_mask_irqs(); 770 isb(); 771 irqnr = gic_read_iar(); 772 gic_write_pmr(pmr); 773 774 __gic_handle_nmi(irqnr, regs); 775 } 776 777 static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 778 { 779 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs))) 780 __gic_handle_irq_from_irqsoff(regs); 781 else 782 __gic_handle_irq_from_irqson(regs); 783 } 784 785 static u32 gic_get_pribits(void) 786 { 787 u32 pribits; 788 789 pribits = gic_read_ctlr(); 790 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; 791 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; 792 pribits++; 793 794 return pribits; 795 } 796 797 static bool gic_has_group0(void) 798 { 799 u32 val; 800 u32 old_pmr; 801 802 old_pmr = gic_read_pmr(); 803 804 /* 805 * Let's find out if Group0 is under control of EL3 or not by 806 * setting the highest possible, non-zero priority in PMR. 807 * 808 * If SCR_EL3.FIQ is set, the priority gets shifted down in 809 * order for the CPU interface to set bit 7, and keep the 810 * actual priority in the non-secure range. In the process, it 811 * looses the least significant bit and the actual priority 812 * becomes 0x80. Reading it back returns 0, indicating that 813 * we're don't have access to Group0. 814 */ 815 gic_write_pmr(BIT(8 - gic_get_pribits())); 816 val = gic_read_pmr(); 817 818 gic_write_pmr(old_pmr); 819 820 return val != 0; 821 } 822 823 static void __init gic_dist_init(void) 824 { 825 unsigned int i; 826 u64 affinity; 827 void __iomem *base = gic_data.dist_base; 828 u32 val; 829 830 /* Disable the distributor */ 831 writel_relaxed(0, base + GICD_CTLR); 832 gic_dist_wait_for_rwp(); 833 834 /* 835 * Configure SPIs as non-secure Group-1. This will only matter 836 * if the GIC only has a single security state. This will not 837 * do the right thing if the kernel is running in secure mode, 838 * but that's not the intended use case anyway. 839 */ 840 for (i = 32; i < GIC_LINE_NR; i += 32) 841 writel_relaxed(~0, base + GICD_IGROUPR + i / 8); 842 843 /* Extended SPI range, not handled by the GICv2/GICv3 common code */ 844 for (i = 0; i < GIC_ESPI_NR; i += 32) { 845 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); 846 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); 847 } 848 849 for (i = 0; i < GIC_ESPI_NR; i += 32) 850 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); 851 852 for (i = 0; i < GIC_ESPI_NR; i += 16) 853 writel_relaxed(0, base + GICD_ICFGRnE + i / 4); 854 855 for (i = 0; i < GIC_ESPI_NR; i += 4) 856 writel_relaxed(GICD_INT_DEF_PRI_X4, base + GICD_IPRIORITYRnE + i); 857 858 /* Now do the common stuff */ 859 gic_dist_config(base, GIC_LINE_NR, NULL); 860 861 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; 862 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { 863 pr_info("Enabling SGIs without active state\n"); 864 val |= GICD_CTLR_nASSGIreq; 865 } 866 867 /* Enable distributor with ARE, Group1, and wait for it to drain */ 868 writel_relaxed(val, base + GICD_CTLR); 869 gic_dist_wait_for_rwp(); 870 871 /* 872 * Set all global interrupts to the boot CPU only. ARE must be 873 * enabled. 874 */ 875 affinity = gic_mpidr_to_affinity(cpu_logical_map(smp_processor_id())); 876 for (i = 32; i < GIC_LINE_NR; i++) 877 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 878 879 for (i = 0; i < GIC_ESPI_NR; i++) 880 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); 881 } 882 883 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) 884 { 885 int ret = -ENODEV; 886 int i; 887 888 for (i = 0; i < gic_data.nr_redist_regions; i++) { 889 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 890 u64 typer; 891 u32 reg; 892 893 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 894 if (reg != GIC_PIDR2_ARCH_GICv3 && 895 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ 896 pr_warn("No redistributor present @%p\n", ptr); 897 break; 898 } 899 900 do { 901 typer = gic_read_typer(ptr + GICR_TYPER); 902 ret = fn(gic_data.redist_regions + i, ptr); 903 if (!ret) 904 return 0; 905 906 if (gic_data.redist_regions[i].single_redist) 907 break; 908 909 if (gic_data.redist_stride) { 910 ptr += gic_data.redist_stride; 911 } else { 912 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ 913 if (typer & GICR_TYPER_VLPIS) 914 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ 915 } 916 } while (!(typer & GICR_TYPER_LAST)); 917 } 918 919 return ret ? -ENODEV : 0; 920 } 921 922 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) 923 { 924 unsigned long mpidr = cpu_logical_map(smp_processor_id()); 925 u64 typer; 926 u32 aff; 927 928 /* 929 * Convert affinity to a 32bit value that can be matched to 930 * GICR_TYPER bits [63:32]. 931 */ 932 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | 933 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 934 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 935 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 936 937 typer = gic_read_typer(ptr + GICR_TYPER); 938 if ((typer >> 32) == aff) { 939 u64 offset = ptr - region->redist_base; 940 raw_spin_lock_init(&gic_data_rdist()->rd_lock); 941 gic_data_rdist_rd_base() = ptr; 942 gic_data_rdist()->phys_base = region->phys_base + offset; 943 944 pr_info("CPU%d: found redistributor %lx region %d:%pa\n", 945 smp_processor_id(), mpidr, 946 (int)(region - gic_data.redist_regions), 947 &gic_data_rdist()->phys_base); 948 return 0; 949 } 950 951 /* Try next one */ 952 return 1; 953 } 954 955 static int gic_populate_rdist(void) 956 { 957 if (gic_iterate_rdists(__gic_populate_rdist) == 0) 958 return 0; 959 960 /* We couldn't even deal with ourselves... */ 961 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 962 smp_processor_id(), 963 (unsigned long)cpu_logical_map(smp_processor_id())); 964 return -ENODEV; 965 } 966 967 static int __gic_update_rdist_properties(struct redist_region *region, 968 void __iomem *ptr) 969 { 970 u64 typer = gic_read_typer(ptr + GICR_TYPER); 971 u32 ctlr = readl_relaxed(ptr + GICR_CTLR); 972 973 /* Boot-time cleanup */ 974 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { 975 u64 val; 976 977 /* Deactivate any present vPE */ 978 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); 979 if (val & GICR_VPENDBASER_Valid) 980 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, 981 ptr + SZ_128K + GICR_VPENDBASER); 982 983 /* Mark the VPE table as invalid */ 984 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); 985 val &= ~GICR_VPROPBASER_4_1_VALID; 986 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); 987 } 988 989 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); 990 991 /* 992 * TYPER.RVPEID implies some form of DirectLPI, no matter what the 993 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI 994 * that the ITS driver can make use of for LPIs (and not VLPIs). 995 * 996 * These are 3 different ways to express the same thing, depending 997 * on the revision of the architecture and its relaxations over 998 * time. Just group them under the 'direct_lpi' banner. 999 */ 1000 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); 1001 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | 1002 !!(ctlr & GICR_CTLR_IR) | 1003 gic_data.rdists.has_rvpeid); 1004 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); 1005 1006 /* Detect non-sensical configurations */ 1007 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { 1008 gic_data.rdists.has_direct_lpi = false; 1009 gic_data.rdists.has_vlpis = false; 1010 gic_data.rdists.has_rvpeid = false; 1011 } 1012 1013 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); 1014 1015 return 1; 1016 } 1017 1018 static void gic_update_rdist_properties(void) 1019 { 1020 gic_data.ppi_nr = UINT_MAX; 1021 gic_iterate_rdists(__gic_update_rdist_properties); 1022 if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) 1023 gic_data.ppi_nr = 0; 1024 pr_info("GICv3 features: %d PPIs%s%s\n", 1025 gic_data.ppi_nr, 1026 gic_data.has_rss ? ", RSS" : "", 1027 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); 1028 1029 if (gic_data.rdists.has_vlpis) 1030 pr_info("GICv4 features: %s%s%s\n", 1031 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", 1032 gic_data.rdists.has_rvpeid ? "RVPEID " : "", 1033 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); 1034 } 1035 1036 /* Check whether it's single security state view */ 1037 static inline bool gic_dist_security_disabled(void) 1038 { 1039 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; 1040 } 1041 1042 static void gic_cpu_sys_reg_init(void) 1043 { 1044 int i, cpu = smp_processor_id(); 1045 u64 mpidr = cpu_logical_map(cpu); 1046 u64 need_rss = MPIDR_RS(mpidr); 1047 bool group0; 1048 u32 pribits; 1049 1050 /* 1051 * Need to check that the SRE bit has actually been set. If 1052 * not, it means that SRE is disabled at EL2. We're going to 1053 * die painfully, and there is nothing we can do about it. 1054 * 1055 * Kindly inform the luser. 1056 */ 1057 if (!gic_enable_sre()) 1058 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); 1059 1060 pribits = gic_get_pribits(); 1061 1062 group0 = gic_has_group0(); 1063 1064 /* Set priority mask register */ 1065 if (!gic_prio_masking_enabled()) { 1066 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); 1067 } else if (gic_supports_nmi()) { 1068 /* 1069 * Mismatch configuration with boot CPU, the system is likely 1070 * to die as interrupt masking will not work properly on all 1071 * CPUs 1072 * 1073 * The boot CPU calls this function before enabling NMI support, 1074 * and as a result we'll never see this warning in the boot path 1075 * for that CPU. 1076 */ 1077 if (static_branch_unlikely(&gic_nonsecure_priorities)) 1078 WARN_ON(!group0 || gic_dist_security_disabled()); 1079 else 1080 WARN_ON(group0 && !gic_dist_security_disabled()); 1081 } 1082 1083 /* 1084 * Some firmwares hand over to the kernel with the BPR changed from 1085 * its reset value (and with a value large enough to prevent 1086 * any pre-emptive interrupts from working at all). Writing a zero 1087 * to BPR restores is reset value. 1088 */ 1089 gic_write_bpr1(0); 1090 1091 if (static_branch_likely(&supports_deactivate_key)) { 1092 /* EOI drops priority only (mode 1) */ 1093 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); 1094 } else { 1095 /* EOI deactivates interrupt too (mode 0) */ 1096 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); 1097 } 1098 1099 /* Always whack Group0 before Group1 */ 1100 if (group0) { 1101 switch(pribits) { 1102 case 8: 1103 case 7: 1104 write_gicreg(0, ICC_AP0R3_EL1); 1105 write_gicreg(0, ICC_AP0R2_EL1); 1106 fallthrough; 1107 case 6: 1108 write_gicreg(0, ICC_AP0R1_EL1); 1109 fallthrough; 1110 case 5: 1111 case 4: 1112 write_gicreg(0, ICC_AP0R0_EL1); 1113 } 1114 1115 isb(); 1116 } 1117 1118 switch(pribits) { 1119 case 8: 1120 case 7: 1121 write_gicreg(0, ICC_AP1R3_EL1); 1122 write_gicreg(0, ICC_AP1R2_EL1); 1123 fallthrough; 1124 case 6: 1125 write_gicreg(0, ICC_AP1R1_EL1); 1126 fallthrough; 1127 case 5: 1128 case 4: 1129 write_gicreg(0, ICC_AP1R0_EL1); 1130 } 1131 1132 isb(); 1133 1134 /* ... and let's hit the road... */ 1135 gic_write_grpen1(1); 1136 1137 /* Keep the RSS capability status in per_cpu variable */ 1138 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); 1139 1140 /* Check all the CPUs have capable of sending SGIs to other CPUs */ 1141 for_each_online_cpu(i) { 1142 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); 1143 1144 need_rss |= MPIDR_RS(cpu_logical_map(i)); 1145 if (need_rss && (!have_rss)) 1146 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", 1147 cpu, (unsigned long)mpidr, 1148 i, (unsigned long)cpu_logical_map(i)); 1149 } 1150 1151 /** 1152 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, 1153 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED 1154 * UNPREDICTABLE choice of : 1155 * - The write is ignored. 1156 * - The RS field is treated as 0. 1157 */ 1158 if (need_rss && (!gic_data.has_rss)) 1159 pr_crit_once("RSS is required but GICD doesn't support it\n"); 1160 } 1161 1162 static bool gicv3_nolpi; 1163 1164 static int __init gicv3_nolpi_cfg(char *buf) 1165 { 1166 return kstrtobool(buf, &gicv3_nolpi); 1167 } 1168 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); 1169 1170 static int gic_dist_supports_lpis(void) 1171 { 1172 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && 1173 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && 1174 !gicv3_nolpi); 1175 } 1176 1177 static void gic_cpu_init(void) 1178 { 1179 void __iomem *rbase; 1180 int i; 1181 1182 /* Register ourselves with the rest of the world */ 1183 if (gic_populate_rdist()) 1184 return; 1185 1186 gic_enable_redist(true); 1187 1188 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && 1189 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), 1190 "Distributor has extended ranges, but CPU%d doesn't\n", 1191 smp_processor_id()); 1192 1193 rbase = gic_data_rdist_sgi_base(); 1194 1195 /* Configure SGIs/PPIs as non-secure Group-1 */ 1196 for (i = 0; i < gic_data.ppi_nr + 16; i += 32) 1197 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); 1198 1199 gic_cpu_config(rbase, gic_data.ppi_nr + 16, gic_redist_wait_for_rwp); 1200 1201 /* initialise system registers */ 1202 gic_cpu_sys_reg_init(); 1203 } 1204 1205 #ifdef CONFIG_SMP 1206 1207 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) 1208 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) 1209 1210 static int gic_starting_cpu(unsigned int cpu) 1211 { 1212 gic_cpu_init(); 1213 1214 if (gic_dist_supports_lpis()) 1215 its_cpu_init(); 1216 1217 return 0; 1218 } 1219 1220 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 1221 unsigned long cluster_id) 1222 { 1223 int next_cpu, cpu = *base_cpu; 1224 unsigned long mpidr = cpu_logical_map(cpu); 1225 u16 tlist = 0; 1226 1227 while (cpu < nr_cpu_ids) { 1228 tlist |= 1 << (mpidr & 0xf); 1229 1230 next_cpu = cpumask_next(cpu, mask); 1231 if (next_cpu >= nr_cpu_ids) 1232 goto out; 1233 cpu = next_cpu; 1234 1235 mpidr = cpu_logical_map(cpu); 1236 1237 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { 1238 cpu--; 1239 goto out; 1240 } 1241 } 1242 out: 1243 *base_cpu = cpu; 1244 return tlist; 1245 } 1246 1247 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ 1248 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ 1249 << ICC_SGI1R_AFFINITY_## level ##_SHIFT) 1250 1251 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) 1252 { 1253 u64 val; 1254 1255 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | 1256 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 1257 irq << ICC_SGI1R_SGI_ID_SHIFT | 1258 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 1259 MPIDR_TO_SGI_RS(cluster_id) | 1260 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 1261 1262 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 1263 gic_write_sgi1r(val); 1264 } 1265 1266 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) 1267 { 1268 int cpu; 1269 1270 if (WARN_ON(d->hwirq >= 16)) 1271 return; 1272 1273 /* 1274 * Ensure that stores to Normal memory are visible to the 1275 * other CPUs before issuing the IPI. 1276 */ 1277 dsb(ishst); 1278 1279 for_each_cpu(cpu, mask) { 1280 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(cpu_logical_map(cpu)); 1281 u16 tlist; 1282 1283 tlist = gic_compute_target_list(&cpu, mask, cluster_id); 1284 gic_send_sgi(cluster_id, tlist, d->hwirq); 1285 } 1286 1287 /* Force the above writes to ICC_SGI1R_EL1 to be executed */ 1288 isb(); 1289 } 1290 1291 static void __init gic_smp_init(void) 1292 { 1293 struct irq_fwspec sgi_fwspec = { 1294 .fwnode = gic_data.fwnode, 1295 .param_count = 1, 1296 }; 1297 int base_sgi; 1298 1299 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, 1300 "irqchip/arm/gicv3:starting", 1301 gic_starting_cpu, NULL); 1302 1303 /* Register all 8 non-secure SGIs */ 1304 base_sgi = __irq_domain_alloc_irqs(gic_data.domain, -1, 8, 1305 NUMA_NO_NODE, &sgi_fwspec, 1306 false, NULL); 1307 if (WARN_ON(base_sgi <= 0)) 1308 return; 1309 1310 set_smp_ipi_range(base_sgi, 8); 1311 } 1312 1313 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1314 bool force) 1315 { 1316 unsigned int cpu; 1317 u32 offset, index; 1318 void __iomem *reg; 1319 int enabled; 1320 u64 val; 1321 1322 if (force) 1323 cpu = cpumask_first(mask_val); 1324 else 1325 cpu = cpumask_any_and(mask_val, cpu_online_mask); 1326 1327 if (cpu >= nr_cpu_ids) 1328 return -EINVAL; 1329 1330 if (gic_irq_in_rdist(d)) 1331 return -EINVAL; 1332 1333 /* If interrupt was enabled, disable it first */ 1334 enabled = gic_peek_irq(d, GICD_ISENABLER); 1335 if (enabled) 1336 gic_mask_irq(d); 1337 1338 offset = convert_offset_index(d, GICD_IROUTER, &index); 1339 reg = gic_dist_base(d) + offset + (index * 8); 1340 val = gic_mpidr_to_affinity(cpu_logical_map(cpu)); 1341 1342 gic_write_irouter(val, reg); 1343 1344 /* 1345 * If the interrupt was enabled, enabled it again. Otherwise, 1346 * just wait for the distributor to have digested our changes. 1347 */ 1348 if (enabled) 1349 gic_unmask_irq(d); 1350 1351 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 1352 1353 return IRQ_SET_MASK_OK_DONE; 1354 } 1355 #else 1356 #define gic_set_affinity NULL 1357 #define gic_ipi_send_mask NULL 1358 #define gic_smp_init() do { } while(0) 1359 #endif 1360 1361 static int gic_retrigger(struct irq_data *data) 1362 { 1363 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); 1364 } 1365 1366 #ifdef CONFIG_CPU_PM 1367 static int gic_cpu_pm_notifier(struct notifier_block *self, 1368 unsigned long cmd, void *v) 1369 { 1370 if (cmd == CPU_PM_EXIT) { 1371 if (gic_dist_security_disabled()) 1372 gic_enable_redist(true); 1373 gic_cpu_sys_reg_init(); 1374 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { 1375 gic_write_grpen1(0); 1376 gic_enable_redist(false); 1377 } 1378 return NOTIFY_OK; 1379 } 1380 1381 static struct notifier_block gic_cpu_pm_notifier_block = { 1382 .notifier_call = gic_cpu_pm_notifier, 1383 }; 1384 1385 static void gic_cpu_pm_init(void) 1386 { 1387 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); 1388 } 1389 1390 #else 1391 static inline void gic_cpu_pm_init(void) { } 1392 #endif /* CONFIG_CPU_PM */ 1393 1394 static struct irq_chip gic_chip = { 1395 .name = "GICv3", 1396 .irq_mask = gic_mask_irq, 1397 .irq_unmask = gic_unmask_irq, 1398 .irq_eoi = gic_eoi_irq, 1399 .irq_set_type = gic_set_type, 1400 .irq_set_affinity = gic_set_affinity, 1401 .irq_retrigger = gic_retrigger, 1402 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1403 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1404 .irq_nmi_setup = gic_irq_nmi_setup, 1405 .irq_nmi_teardown = gic_irq_nmi_teardown, 1406 .ipi_send_mask = gic_ipi_send_mask, 1407 .flags = IRQCHIP_SET_TYPE_MASKED | 1408 IRQCHIP_SKIP_SET_WAKE | 1409 IRQCHIP_MASK_ON_SUSPEND, 1410 }; 1411 1412 static struct irq_chip gic_eoimode1_chip = { 1413 .name = "GICv3", 1414 .irq_mask = gic_eoimode1_mask_irq, 1415 .irq_unmask = gic_unmask_irq, 1416 .irq_eoi = gic_eoimode1_eoi_irq, 1417 .irq_set_type = gic_set_type, 1418 .irq_set_affinity = gic_set_affinity, 1419 .irq_retrigger = gic_retrigger, 1420 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1421 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1422 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 1423 .irq_nmi_setup = gic_irq_nmi_setup, 1424 .irq_nmi_teardown = gic_irq_nmi_teardown, 1425 .ipi_send_mask = gic_ipi_send_mask, 1426 .flags = IRQCHIP_SET_TYPE_MASKED | 1427 IRQCHIP_SKIP_SET_WAKE | 1428 IRQCHIP_MASK_ON_SUSPEND, 1429 }; 1430 1431 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 1432 irq_hw_number_t hw) 1433 { 1434 struct irq_chip *chip = &gic_chip; 1435 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); 1436 1437 if (static_branch_likely(&supports_deactivate_key)) 1438 chip = &gic_eoimode1_chip; 1439 1440 switch (__get_intid_range(hw)) { 1441 case SGI_RANGE: 1442 case PPI_RANGE: 1443 case EPPI_RANGE: 1444 irq_set_percpu_devid(irq); 1445 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1446 handle_percpu_devid_irq, NULL, NULL); 1447 break; 1448 1449 case SPI_RANGE: 1450 case ESPI_RANGE: 1451 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1452 handle_fasteoi_irq, NULL, NULL); 1453 irq_set_probe(irq); 1454 irqd_set_single_target(irqd); 1455 break; 1456 1457 case LPI_RANGE: 1458 if (!gic_dist_supports_lpis()) 1459 return -EPERM; 1460 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1461 handle_fasteoi_irq, NULL, NULL); 1462 break; 1463 1464 default: 1465 return -EPERM; 1466 } 1467 1468 /* Prevents SW retriggers which mess up the ACK/EOI ordering */ 1469 irqd_set_handle_enforce_irqctx(irqd); 1470 return 0; 1471 } 1472 1473 static int gic_irq_domain_translate(struct irq_domain *d, 1474 struct irq_fwspec *fwspec, 1475 unsigned long *hwirq, 1476 unsigned int *type) 1477 { 1478 if (fwspec->param_count == 1 && fwspec->param[0] < 16) { 1479 *hwirq = fwspec->param[0]; 1480 *type = IRQ_TYPE_EDGE_RISING; 1481 return 0; 1482 } 1483 1484 if (is_of_node(fwspec->fwnode)) { 1485 if (fwspec->param_count < 3) 1486 return -EINVAL; 1487 1488 switch (fwspec->param[0]) { 1489 case 0: /* SPI */ 1490 *hwirq = fwspec->param[1] + 32; 1491 break; 1492 case 1: /* PPI */ 1493 *hwirq = fwspec->param[1] + 16; 1494 break; 1495 case 2: /* ESPI */ 1496 *hwirq = fwspec->param[1] + ESPI_BASE_INTID; 1497 break; 1498 case 3: /* EPPI */ 1499 *hwirq = fwspec->param[1] + EPPI_BASE_INTID; 1500 break; 1501 case GIC_IRQ_TYPE_LPI: /* LPI */ 1502 *hwirq = fwspec->param[1]; 1503 break; 1504 case GIC_IRQ_TYPE_PARTITION: 1505 *hwirq = fwspec->param[1]; 1506 if (fwspec->param[1] >= 16) 1507 *hwirq += EPPI_BASE_INTID - 16; 1508 else 1509 *hwirq += 16; 1510 break; 1511 default: 1512 return -EINVAL; 1513 } 1514 1515 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1516 1517 /* 1518 * Make it clear that broken DTs are... broken. 1519 * Partitioned PPIs are an unfortunate exception. 1520 */ 1521 WARN_ON(*type == IRQ_TYPE_NONE && 1522 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); 1523 return 0; 1524 } 1525 1526 if (is_fwnode_irqchip(fwspec->fwnode)) { 1527 if(fwspec->param_count != 2) 1528 return -EINVAL; 1529 1530 if (fwspec->param[0] < 16) { 1531 pr_err(FW_BUG "Illegal GSI%d translation request\n", 1532 fwspec->param[0]); 1533 return -EINVAL; 1534 } 1535 1536 *hwirq = fwspec->param[0]; 1537 *type = fwspec->param[1]; 1538 1539 WARN_ON(*type == IRQ_TYPE_NONE); 1540 return 0; 1541 } 1542 1543 return -EINVAL; 1544 } 1545 1546 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1547 unsigned int nr_irqs, void *arg) 1548 { 1549 int i, ret; 1550 irq_hw_number_t hwirq; 1551 unsigned int type = IRQ_TYPE_NONE; 1552 struct irq_fwspec *fwspec = arg; 1553 1554 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 1555 if (ret) 1556 return ret; 1557 1558 for (i = 0; i < nr_irqs; i++) { 1559 ret = gic_irq_domain_map(domain, virq + i, hwirq + i); 1560 if (ret) 1561 return ret; 1562 } 1563 1564 return 0; 1565 } 1566 1567 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1568 unsigned int nr_irqs) 1569 { 1570 int i; 1571 1572 for (i = 0; i < nr_irqs; i++) { 1573 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 1574 irq_set_handler(virq + i, NULL); 1575 irq_domain_reset_irq_data(d); 1576 } 1577 } 1578 1579 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, 1580 irq_hw_number_t hwirq) 1581 { 1582 enum gic_intid_range range; 1583 1584 if (!gic_data.ppi_descs) 1585 return false; 1586 1587 if (!is_of_node(fwspec->fwnode)) 1588 return false; 1589 1590 if (fwspec->param_count < 4 || !fwspec->param[3]) 1591 return false; 1592 1593 range = __get_intid_range(hwirq); 1594 if (range != PPI_RANGE && range != EPPI_RANGE) 1595 return false; 1596 1597 return true; 1598 } 1599 1600 static int gic_irq_domain_select(struct irq_domain *d, 1601 struct irq_fwspec *fwspec, 1602 enum irq_domain_bus_token bus_token) 1603 { 1604 unsigned int type, ret, ppi_idx; 1605 irq_hw_number_t hwirq; 1606 1607 /* Not for us */ 1608 if (fwspec->fwnode != d->fwnode) 1609 return 0; 1610 1611 /* If this is not DT, then we have a single domain */ 1612 if (!is_of_node(fwspec->fwnode)) 1613 return 1; 1614 1615 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); 1616 if (WARN_ON_ONCE(ret)) 1617 return 0; 1618 1619 if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) 1620 return d == gic_data.domain; 1621 1622 /* 1623 * If this is a PPI and we have a 4th (non-null) parameter, 1624 * then we need to match the partition domain. 1625 */ 1626 ppi_idx = __gic_get_ppi_index(hwirq); 1627 return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]); 1628 } 1629 1630 static const struct irq_domain_ops gic_irq_domain_ops = { 1631 .translate = gic_irq_domain_translate, 1632 .alloc = gic_irq_domain_alloc, 1633 .free = gic_irq_domain_free, 1634 .select = gic_irq_domain_select, 1635 }; 1636 1637 static int partition_domain_translate(struct irq_domain *d, 1638 struct irq_fwspec *fwspec, 1639 unsigned long *hwirq, 1640 unsigned int *type) 1641 { 1642 unsigned long ppi_intid; 1643 struct device_node *np; 1644 unsigned int ppi_idx; 1645 int ret; 1646 1647 if (!gic_data.ppi_descs) 1648 return -ENOMEM; 1649 1650 np = of_find_node_by_phandle(fwspec->param[3]); 1651 if (WARN_ON(!np)) 1652 return -EINVAL; 1653 1654 ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type); 1655 if (WARN_ON_ONCE(ret)) 1656 return 0; 1657 1658 ppi_idx = __gic_get_ppi_index(ppi_intid); 1659 ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], 1660 of_node_to_fwnode(np)); 1661 if (ret < 0) 1662 return ret; 1663 1664 *hwirq = ret; 1665 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1666 1667 return 0; 1668 } 1669 1670 static const struct irq_domain_ops partition_domain_ops = { 1671 .translate = partition_domain_translate, 1672 .select = gic_irq_domain_select, 1673 }; 1674 1675 static bool gic_enable_quirk_msm8996(void *data) 1676 { 1677 struct gic_chip_data *d = data; 1678 1679 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; 1680 1681 return true; 1682 } 1683 1684 static bool gic_enable_quirk_cavium_38539(void *data) 1685 { 1686 struct gic_chip_data *d = data; 1687 1688 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; 1689 1690 return true; 1691 } 1692 1693 static bool gic_enable_quirk_hip06_07(void *data) 1694 { 1695 struct gic_chip_data *d = data; 1696 1697 /* 1698 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite 1699 * not being an actual ARM implementation). The saving grace is 1700 * that GIC-600 doesn't have ESPI, so nothing to do in that case. 1701 * HIP07 doesn't even have a proper IIDR, and still pretends to 1702 * have ESPI. In both cases, put them right. 1703 */ 1704 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { 1705 /* Zero both ESPI and the RES0 field next to it... */ 1706 d->rdists.gicd_typer &= ~GENMASK(9, 8); 1707 return true; 1708 } 1709 1710 return false; 1711 } 1712 1713 static const struct gic_quirk gic_quirks[] = { 1714 { 1715 .desc = "GICv3: Qualcomm MSM8996 broken firmware", 1716 .compatible = "qcom,msm8996-gic-v3", 1717 .init = gic_enable_quirk_msm8996, 1718 }, 1719 { 1720 .desc = "GICv3: HIP06 erratum 161010803", 1721 .iidr = 0x0204043b, 1722 .mask = 0xffffffff, 1723 .init = gic_enable_quirk_hip06_07, 1724 }, 1725 { 1726 .desc = "GICv3: HIP07 erratum 161010803", 1727 .iidr = 0x00000000, 1728 .mask = 0xffffffff, 1729 .init = gic_enable_quirk_hip06_07, 1730 }, 1731 { 1732 /* 1733 * Reserved register accesses generate a Synchronous 1734 * External Abort. This erratum applies to: 1735 * - ThunderX: CN88xx 1736 * - OCTEON TX: CN83xx, CN81xx 1737 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* 1738 */ 1739 .desc = "GICv3: Cavium erratum 38539", 1740 .iidr = 0xa000034c, 1741 .mask = 0xe8f00fff, 1742 .init = gic_enable_quirk_cavium_38539, 1743 }, 1744 { 1745 } 1746 }; 1747 1748 static void gic_enable_nmi_support(void) 1749 { 1750 int i; 1751 1752 if (!gic_prio_masking_enabled()) 1753 return; 1754 1755 ppi_nmi_refs = kcalloc(gic_data.ppi_nr, sizeof(*ppi_nmi_refs), GFP_KERNEL); 1756 if (!ppi_nmi_refs) 1757 return; 1758 1759 for (i = 0; i < gic_data.ppi_nr; i++) 1760 refcount_set(&ppi_nmi_refs[i], 0); 1761 1762 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", 1763 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); 1764 1765 /* 1766 * How priority values are used by the GIC depends on two things: 1767 * the security state of the GIC (controlled by the GICD_CTRL.DS bit) 1768 * and if Group 0 interrupts can be delivered to Linux in the non-secure 1769 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the 1770 * ICC_PMR_EL1 register and the priority that software assigns to 1771 * interrupts: 1772 * 1773 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Group 1 priority 1774 * ----------------------------------------------------------- 1775 * 1 | - | unchanged | unchanged 1776 * ----------------------------------------------------------- 1777 * 0 | 1 | non-secure | non-secure 1778 * ----------------------------------------------------------- 1779 * 0 | 0 | unchanged | non-secure 1780 * 1781 * where non-secure means that the value is right-shifted by one and the 1782 * MSB bit set, to make it fit in the non-secure priority range. 1783 * 1784 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority 1785 * are both either modified or unchanged, we can use the same set of 1786 * priorities. 1787 * 1788 * In the last case, where only the interrupt priorities are modified to 1789 * be in the non-secure range, we use a different PMR value to mask IRQs 1790 * and the rest of the values that we use remain unchanged. 1791 */ 1792 if (gic_has_group0() && !gic_dist_security_disabled()) 1793 static_branch_enable(&gic_nonsecure_priorities); 1794 1795 static_branch_enable(&supports_pseudo_nmis); 1796 1797 if (static_branch_likely(&supports_deactivate_key)) 1798 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; 1799 else 1800 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; 1801 } 1802 1803 static int __init gic_init_bases(void __iomem *dist_base, 1804 struct redist_region *rdist_regs, 1805 u32 nr_redist_regions, 1806 u64 redist_stride, 1807 struct fwnode_handle *handle) 1808 { 1809 u32 typer; 1810 int err; 1811 1812 if (!is_hyp_mode_available()) 1813 static_branch_disable(&supports_deactivate_key); 1814 1815 if (static_branch_likely(&supports_deactivate_key)) 1816 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1817 1818 gic_data.fwnode = handle; 1819 gic_data.dist_base = dist_base; 1820 gic_data.redist_regions = rdist_regs; 1821 gic_data.nr_redist_regions = nr_redist_regions; 1822 gic_data.redist_stride = redist_stride; 1823 1824 /* 1825 * Find out how many interrupts are supported. 1826 */ 1827 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 1828 gic_data.rdists.gicd_typer = typer; 1829 1830 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), 1831 gic_quirks, &gic_data); 1832 1833 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); 1834 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); 1835 1836 /* 1837 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the 1838 * architecture spec (which says that reserved registers are RES0). 1839 */ 1840 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) 1841 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); 1842 1843 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 1844 &gic_data); 1845 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 1846 gic_data.rdists.has_rvpeid = true; 1847 gic_data.rdists.has_vlpis = true; 1848 gic_data.rdists.has_direct_lpi = true; 1849 gic_data.rdists.has_vpend_valid_dirty = true; 1850 1851 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 1852 err = -ENOMEM; 1853 goto out_free; 1854 } 1855 1856 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); 1857 1858 gic_data.has_rss = !!(typer & GICD_TYPER_RSS); 1859 1860 if (typer & GICD_TYPER_MBIS) { 1861 err = mbi_init(handle, gic_data.domain); 1862 if (err) 1863 pr_err("Failed to initialize MBIs\n"); 1864 } 1865 1866 set_handle_irq(gic_handle_irq); 1867 1868 gic_update_rdist_properties(); 1869 1870 gic_dist_init(); 1871 gic_cpu_init(); 1872 gic_smp_init(); 1873 gic_cpu_pm_init(); 1874 1875 if (gic_dist_supports_lpis()) { 1876 its_init(handle, &gic_data.rdists, gic_data.domain); 1877 its_cpu_init(); 1878 its_lpi_memreserve_init(); 1879 } else { 1880 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1881 gicv2m_init(handle, gic_data.domain); 1882 } 1883 1884 gic_enable_nmi_support(); 1885 1886 return 0; 1887 1888 out_free: 1889 if (gic_data.domain) 1890 irq_domain_remove(gic_data.domain); 1891 free_percpu(gic_data.rdists.rdist); 1892 return err; 1893 } 1894 1895 static int __init gic_validate_dist_version(void __iomem *dist_base) 1896 { 1897 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 1898 1899 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) 1900 return -ENODEV; 1901 1902 return 0; 1903 } 1904 1905 /* Create all possible partitions at boot time */ 1906 static void __init gic_populate_ppi_partitions(struct device_node *gic_node) 1907 { 1908 struct device_node *parts_node, *child_part; 1909 int part_idx = 0, i; 1910 int nr_parts; 1911 struct partition_affinity *parts; 1912 1913 parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); 1914 if (!parts_node) 1915 return; 1916 1917 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); 1918 if (!gic_data.ppi_descs) 1919 goto out_put_node; 1920 1921 nr_parts = of_get_child_count(parts_node); 1922 1923 if (!nr_parts) 1924 goto out_put_node; 1925 1926 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); 1927 if (WARN_ON(!parts)) 1928 goto out_put_node; 1929 1930 for_each_child_of_node(parts_node, child_part) { 1931 struct partition_affinity *part; 1932 int n; 1933 1934 part = &parts[part_idx]; 1935 1936 part->partition_id = of_node_to_fwnode(child_part); 1937 1938 pr_info("GIC: PPI partition %pOFn[%d] { ", 1939 child_part, part_idx); 1940 1941 n = of_property_count_elems_of_size(child_part, "affinity", 1942 sizeof(u32)); 1943 WARN_ON(n <= 0); 1944 1945 for (i = 0; i < n; i++) { 1946 int err, cpu; 1947 u32 cpu_phandle; 1948 struct device_node *cpu_node; 1949 1950 err = of_property_read_u32_index(child_part, "affinity", 1951 i, &cpu_phandle); 1952 if (WARN_ON(err)) 1953 continue; 1954 1955 cpu_node = of_find_node_by_phandle(cpu_phandle); 1956 if (WARN_ON(!cpu_node)) 1957 continue; 1958 1959 cpu = of_cpu_node_to_id(cpu_node); 1960 if (WARN_ON(cpu < 0)) { 1961 of_node_put(cpu_node); 1962 continue; 1963 } 1964 1965 pr_cont("%pOF[%d] ", cpu_node, cpu); 1966 1967 cpumask_set_cpu(cpu, &part->mask); 1968 of_node_put(cpu_node); 1969 } 1970 1971 pr_cont("}\n"); 1972 part_idx++; 1973 } 1974 1975 for (i = 0; i < gic_data.ppi_nr; i++) { 1976 unsigned int irq; 1977 struct partition_desc *desc; 1978 struct irq_fwspec ppi_fwspec = { 1979 .fwnode = gic_data.fwnode, 1980 .param_count = 3, 1981 .param = { 1982 [0] = GIC_IRQ_TYPE_PARTITION, 1983 [1] = i, 1984 [2] = IRQ_TYPE_NONE, 1985 }, 1986 }; 1987 1988 irq = irq_create_fwspec_mapping(&ppi_fwspec); 1989 if (WARN_ON(!irq)) 1990 continue; 1991 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, 1992 irq, &partition_domain_ops); 1993 if (WARN_ON(!desc)) 1994 continue; 1995 1996 gic_data.ppi_descs[i] = desc; 1997 } 1998 1999 out_put_node: 2000 of_node_put(parts_node); 2001 } 2002 2003 static void __init gic_of_setup_kvm_info(struct device_node *node) 2004 { 2005 int ret; 2006 struct resource r; 2007 u32 gicv_idx; 2008 2009 gic_v3_kvm_info.type = GIC_V3; 2010 2011 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); 2012 if (!gic_v3_kvm_info.maint_irq) 2013 return; 2014 2015 if (of_property_read_u32(node, "#redistributor-regions", 2016 &gicv_idx)) 2017 gicv_idx = 1; 2018 2019 gicv_idx += 3; /* Also skip GICD, GICC, GICH */ 2020 ret = of_address_to_resource(node, gicv_idx, &r); 2021 if (!ret) 2022 gic_v3_kvm_info.vcpu = r; 2023 2024 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 2025 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; 2026 vgic_set_kvm_info(&gic_v3_kvm_info); 2027 } 2028 2029 static void gic_request_region(resource_size_t base, resource_size_t size, 2030 const char *name) 2031 { 2032 if (!request_mem_region(base, size, name)) 2033 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", 2034 name, &base); 2035 } 2036 2037 static void __iomem *gic_of_iomap(struct device_node *node, int idx, 2038 const char *name, struct resource *res) 2039 { 2040 void __iomem *base; 2041 int ret; 2042 2043 ret = of_address_to_resource(node, idx, res); 2044 if (ret) 2045 return IOMEM_ERR_PTR(ret); 2046 2047 gic_request_region(res->start, resource_size(res), name); 2048 base = of_iomap(node, idx); 2049 2050 return base ?: IOMEM_ERR_PTR(-ENOMEM); 2051 } 2052 2053 static int __init gic_of_init(struct device_node *node, struct device_node *parent) 2054 { 2055 void __iomem *dist_base; 2056 struct redist_region *rdist_regs; 2057 struct resource res; 2058 u64 redist_stride; 2059 u32 nr_redist_regions; 2060 int err, i; 2061 2062 dist_base = gic_of_iomap(node, 0, "GICD", &res); 2063 if (IS_ERR(dist_base)) { 2064 pr_err("%pOF: unable to map gic dist registers\n", node); 2065 return PTR_ERR(dist_base); 2066 } 2067 2068 err = gic_validate_dist_version(dist_base); 2069 if (err) { 2070 pr_err("%pOF: no distributor detected, giving up\n", node); 2071 goto out_unmap_dist; 2072 } 2073 2074 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) 2075 nr_redist_regions = 1; 2076 2077 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), 2078 GFP_KERNEL); 2079 if (!rdist_regs) { 2080 err = -ENOMEM; 2081 goto out_unmap_dist; 2082 } 2083 2084 for (i = 0; i < nr_redist_regions; i++) { 2085 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); 2086 if (IS_ERR(rdist_regs[i].redist_base)) { 2087 pr_err("%pOF: couldn't map region %d\n", node, i); 2088 err = -ENODEV; 2089 goto out_unmap_rdist; 2090 } 2091 rdist_regs[i].phys_base = res.start; 2092 } 2093 2094 if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) 2095 redist_stride = 0; 2096 2097 gic_enable_of_quirks(node, gic_quirks, &gic_data); 2098 2099 err = gic_init_bases(dist_base, rdist_regs, nr_redist_regions, 2100 redist_stride, &node->fwnode); 2101 if (err) 2102 goto out_unmap_rdist; 2103 2104 gic_populate_ppi_partitions(node); 2105 2106 if (static_branch_likely(&supports_deactivate_key)) 2107 gic_of_setup_kvm_info(node); 2108 return 0; 2109 2110 out_unmap_rdist: 2111 for (i = 0; i < nr_redist_regions; i++) 2112 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) 2113 iounmap(rdist_regs[i].redist_base); 2114 kfree(rdist_regs); 2115 out_unmap_dist: 2116 iounmap(dist_base); 2117 return err; 2118 } 2119 2120 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); 2121 2122 #ifdef CONFIG_ACPI 2123 static struct 2124 { 2125 void __iomem *dist_base; 2126 struct redist_region *redist_regs; 2127 u32 nr_redist_regions; 2128 bool single_redist; 2129 int enabled_rdists; 2130 u32 maint_irq; 2131 int maint_irq_mode; 2132 phys_addr_t vcpu_base; 2133 } acpi_data __initdata; 2134 2135 static void __init 2136 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) 2137 { 2138 static int count = 0; 2139 2140 acpi_data.redist_regs[count].phys_base = phys_base; 2141 acpi_data.redist_regs[count].redist_base = redist_base; 2142 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; 2143 count++; 2144 } 2145 2146 static int __init 2147 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, 2148 const unsigned long end) 2149 { 2150 struct acpi_madt_generic_redistributor *redist = 2151 (struct acpi_madt_generic_redistributor *)header; 2152 void __iomem *redist_base; 2153 2154 redist_base = ioremap(redist->base_address, redist->length); 2155 if (!redist_base) { 2156 pr_err("Couldn't map GICR region @%llx\n", redist->base_address); 2157 return -ENOMEM; 2158 } 2159 gic_request_region(redist->base_address, redist->length, "GICR"); 2160 2161 gic_acpi_register_redist(redist->base_address, redist_base); 2162 return 0; 2163 } 2164 2165 static int __init 2166 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, 2167 const unsigned long end) 2168 { 2169 struct acpi_madt_generic_interrupt *gicc = 2170 (struct acpi_madt_generic_interrupt *)header; 2171 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 2172 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; 2173 void __iomem *redist_base; 2174 2175 /* GICC entry which has !ACPI_MADT_ENABLED is not unusable so skip */ 2176 if (!(gicc->flags & ACPI_MADT_ENABLED)) 2177 return 0; 2178 2179 redist_base = ioremap(gicc->gicr_base_address, size); 2180 if (!redist_base) 2181 return -ENOMEM; 2182 gic_request_region(gicc->gicr_base_address, size, "GICR"); 2183 2184 gic_acpi_register_redist(gicc->gicr_base_address, redist_base); 2185 return 0; 2186 } 2187 2188 static int __init gic_acpi_collect_gicr_base(void) 2189 { 2190 acpi_tbl_entry_handler redist_parser; 2191 enum acpi_madt_type type; 2192 2193 if (acpi_data.single_redist) { 2194 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; 2195 redist_parser = gic_acpi_parse_madt_gicc; 2196 } else { 2197 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; 2198 redist_parser = gic_acpi_parse_madt_redist; 2199 } 2200 2201 /* Collect redistributor base addresses in GICR entries */ 2202 if (acpi_table_parse_madt(type, redist_parser, 0) > 0) 2203 return 0; 2204 2205 pr_info("No valid GICR entries exist\n"); 2206 return -ENODEV; 2207 } 2208 2209 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, 2210 const unsigned long end) 2211 { 2212 /* Subtable presence means that redist exists, that's it */ 2213 return 0; 2214 } 2215 2216 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, 2217 const unsigned long end) 2218 { 2219 struct acpi_madt_generic_interrupt *gicc = 2220 (struct acpi_madt_generic_interrupt *)header; 2221 2222 /* 2223 * If GICC is enabled and has valid gicr base address, then it means 2224 * GICR base is presented via GICC 2225 */ 2226 if ((gicc->flags & ACPI_MADT_ENABLED) && gicc->gicr_base_address) { 2227 acpi_data.enabled_rdists++; 2228 return 0; 2229 } 2230 2231 /* 2232 * It's perfectly valid firmware can pass disabled GICC entry, driver 2233 * should not treat as errors, skip the entry instead of probe fail. 2234 */ 2235 if (!(gicc->flags & ACPI_MADT_ENABLED)) 2236 return 0; 2237 2238 return -ENODEV; 2239 } 2240 2241 static int __init gic_acpi_count_gicr_regions(void) 2242 { 2243 int count; 2244 2245 /* 2246 * Count how many redistributor regions we have. It is not allowed 2247 * to mix redistributor description, GICR and GICC subtables have to be 2248 * mutually exclusive. 2249 */ 2250 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 2251 gic_acpi_match_gicr, 0); 2252 if (count > 0) { 2253 acpi_data.single_redist = false; 2254 return count; 2255 } 2256 2257 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 2258 gic_acpi_match_gicc, 0); 2259 if (count > 0) { 2260 acpi_data.single_redist = true; 2261 count = acpi_data.enabled_rdists; 2262 } 2263 2264 return count; 2265 } 2266 2267 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, 2268 struct acpi_probe_entry *ape) 2269 { 2270 struct acpi_madt_generic_distributor *dist; 2271 int count; 2272 2273 dist = (struct acpi_madt_generic_distributor *)header; 2274 if (dist->version != ape->driver_data) 2275 return false; 2276 2277 /* We need to do that exercise anyway, the sooner the better */ 2278 count = gic_acpi_count_gicr_regions(); 2279 if (count <= 0) 2280 return false; 2281 2282 acpi_data.nr_redist_regions = count; 2283 return true; 2284 } 2285 2286 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, 2287 const unsigned long end) 2288 { 2289 struct acpi_madt_generic_interrupt *gicc = 2290 (struct acpi_madt_generic_interrupt *)header; 2291 int maint_irq_mode; 2292 static int first_madt = true; 2293 2294 /* Skip unusable CPUs */ 2295 if (!(gicc->flags & ACPI_MADT_ENABLED)) 2296 return 0; 2297 2298 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? 2299 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; 2300 2301 if (first_madt) { 2302 first_madt = false; 2303 2304 acpi_data.maint_irq = gicc->vgic_interrupt; 2305 acpi_data.maint_irq_mode = maint_irq_mode; 2306 acpi_data.vcpu_base = gicc->gicv_base_address; 2307 2308 return 0; 2309 } 2310 2311 /* 2312 * The maintenance interrupt and GICV should be the same for every CPU 2313 */ 2314 if ((acpi_data.maint_irq != gicc->vgic_interrupt) || 2315 (acpi_data.maint_irq_mode != maint_irq_mode) || 2316 (acpi_data.vcpu_base != gicc->gicv_base_address)) 2317 return -EINVAL; 2318 2319 return 0; 2320 } 2321 2322 static bool __init gic_acpi_collect_virt_info(void) 2323 { 2324 int count; 2325 2326 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 2327 gic_acpi_parse_virt_madt_gicc, 0); 2328 2329 return (count > 0); 2330 } 2331 2332 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) 2333 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) 2334 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) 2335 2336 static void __init gic_acpi_setup_kvm_info(void) 2337 { 2338 int irq; 2339 2340 if (!gic_acpi_collect_virt_info()) { 2341 pr_warn("Unable to get hardware information used for virtualization\n"); 2342 return; 2343 } 2344 2345 gic_v3_kvm_info.type = GIC_V3; 2346 2347 irq = acpi_register_gsi(NULL, acpi_data.maint_irq, 2348 acpi_data.maint_irq_mode, 2349 ACPI_ACTIVE_HIGH); 2350 if (irq <= 0) 2351 return; 2352 2353 gic_v3_kvm_info.maint_irq = irq; 2354 2355 if (acpi_data.vcpu_base) { 2356 struct resource *vcpu = &gic_v3_kvm_info.vcpu; 2357 2358 vcpu->flags = IORESOURCE_MEM; 2359 vcpu->start = acpi_data.vcpu_base; 2360 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 2361 } 2362 2363 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 2364 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; 2365 vgic_set_kvm_info(&gic_v3_kvm_info); 2366 } 2367 2368 static struct fwnode_handle *gsi_domain_handle; 2369 2370 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) 2371 { 2372 return gsi_domain_handle; 2373 } 2374 2375 static int __init 2376 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) 2377 { 2378 struct acpi_madt_generic_distributor *dist; 2379 size_t size; 2380 int i, err; 2381 2382 /* Get distributor base address */ 2383 dist = (struct acpi_madt_generic_distributor *)header; 2384 acpi_data.dist_base = ioremap(dist->base_address, 2385 ACPI_GICV3_DIST_MEM_SIZE); 2386 if (!acpi_data.dist_base) { 2387 pr_err("Unable to map GICD registers\n"); 2388 return -ENOMEM; 2389 } 2390 gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); 2391 2392 err = gic_validate_dist_version(acpi_data.dist_base); 2393 if (err) { 2394 pr_err("No distributor detected at @%p, giving up\n", 2395 acpi_data.dist_base); 2396 goto out_dist_unmap; 2397 } 2398 2399 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; 2400 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); 2401 if (!acpi_data.redist_regs) { 2402 err = -ENOMEM; 2403 goto out_dist_unmap; 2404 } 2405 2406 err = gic_acpi_collect_gicr_base(); 2407 if (err) 2408 goto out_redist_unmap; 2409 2410 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); 2411 if (!gsi_domain_handle) { 2412 err = -ENOMEM; 2413 goto out_redist_unmap; 2414 } 2415 2416 err = gic_init_bases(acpi_data.dist_base, acpi_data.redist_regs, 2417 acpi_data.nr_redist_regions, 0, gsi_domain_handle); 2418 if (err) 2419 goto out_fwhandle_free; 2420 2421 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); 2422 2423 if (static_branch_likely(&supports_deactivate_key)) 2424 gic_acpi_setup_kvm_info(); 2425 2426 return 0; 2427 2428 out_fwhandle_free: 2429 irq_domain_free_fwnode(gsi_domain_handle); 2430 out_redist_unmap: 2431 for (i = 0; i < acpi_data.nr_redist_regions; i++) 2432 if (acpi_data.redist_regs[i].redist_base) 2433 iounmap(acpi_data.redist_regs[i].redist_base); 2434 kfree(acpi_data.redist_regs); 2435 out_dist_unmap: 2436 iounmap(acpi_data.dist_base); 2437 return err; 2438 } 2439 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2440 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, 2441 gic_acpi_init); 2442 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2443 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4, 2444 gic_acpi_init); 2445 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2446 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE, 2447 gic_acpi_init); 2448 #endif 2449