1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. 4 * Author: Marc Zyngier <marc.zyngier@arm.com> 5 */ 6 7 #define pr_fmt(fmt) "GICv3: " fmt 8 9 #include <linux/acpi.h> 10 #include <linux/cpu.h> 11 #include <linux/cpu_pm.h> 12 #include <linux/delay.h> 13 #include <linux/interrupt.h> 14 #include <linux/irqdomain.h> 15 #include <linux/kernel.h> 16 #include <linux/kstrtox.h> 17 #include <linux/of.h> 18 #include <linux/of_address.h> 19 #include <linux/of_irq.h> 20 #include <linux/percpu.h> 21 #include <linux/refcount.h> 22 #include <linux/slab.h> 23 #include <linux/iopoll.h> 24 25 #include <linux/irqchip.h> 26 #include <linux/irqchip/arm-gic-common.h> 27 #include <linux/irqchip/arm-gic-v3.h> 28 #include <linux/irqchip/arm-gic-v3-prio.h> 29 #include <linux/irqchip/irq-partition-percpu.h> 30 #include <linux/bitfield.h> 31 #include <linux/bits.h> 32 #include <linux/arm-smccc.h> 33 34 #include <asm/cputype.h> 35 #include <asm/exception.h> 36 #include <asm/smp_plat.h> 37 #include <asm/virt.h> 38 39 #include "irq-gic-common.h" 40 41 static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ; 42 static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; 43 44 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) 45 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) 46 #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2) 47 48 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) 49 50 static struct cpumask broken_rdists __read_mostly __maybe_unused; 51 52 struct redist_region { 53 void __iomem *redist_base; 54 phys_addr_t phys_base; 55 bool single_redist; 56 }; 57 58 struct gic_chip_data { 59 struct fwnode_handle *fwnode; 60 phys_addr_t dist_phys_base; 61 void __iomem *dist_base; 62 struct redist_region *redist_regions; 63 struct rdists rdists; 64 struct irq_domain *domain; 65 u64 redist_stride; 66 u32 nr_redist_regions; 67 u64 flags; 68 bool has_rss; 69 unsigned int ppi_nr; 70 struct partition_desc **ppi_descs; 71 }; 72 73 #define T241_CHIPS_MAX 4 74 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; 75 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); 76 77 static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); 78 79 static struct gic_chip_data gic_data __read_mostly; 80 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); 81 82 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) 83 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) 84 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) 85 86 /* 87 * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs 88 * are potentially stolen by the secure side. Some code, especially code dealing 89 * with hwirq IDs, is simplified by accounting for all 16. 90 */ 91 #define SGI_NR 16 92 93 /* 94 * The behaviours of RPR and PMR registers differ depending on the value of 95 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the 96 * distributor and redistributors depends on whether security is enabled in the 97 * GIC. 98 * 99 * When security is enabled, non-secure priority values from the (re)distributor 100 * are presented to the GIC CPUIF as follow: 101 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; 102 * 103 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure 104 * EL1 are subject to a similar operation thus matching the priorities presented 105 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, 106 * these values are unchanged by the GIC. 107 * 108 * see GICv3/GICv4 Architecture Specification (IHI0069D): 109 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt 110 * priorities. 111 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 112 * interrupt. 113 */ 114 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); 115 116 static u32 gic_get_pribits(void) 117 { 118 u32 pribits; 119 120 pribits = gic_read_ctlr(); 121 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; 122 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; 123 pribits++; 124 125 return pribits; 126 } 127 128 static bool gic_has_group0(void) 129 { 130 u32 val; 131 u32 old_pmr; 132 133 old_pmr = gic_read_pmr(); 134 135 /* 136 * Let's find out if Group0 is under control of EL3 or not by 137 * setting the highest possible, non-zero priority in PMR. 138 * 139 * If SCR_EL3.FIQ is set, the priority gets shifted down in 140 * order for the CPU interface to set bit 7, and keep the 141 * actual priority in the non-secure range. In the process, it 142 * looses the least significant bit and the actual priority 143 * becomes 0x80. Reading it back returns 0, indicating that 144 * we're don't have access to Group0. 145 */ 146 gic_write_pmr(BIT(8 - gic_get_pribits())); 147 val = gic_read_pmr(); 148 149 gic_write_pmr(old_pmr); 150 151 return val != 0; 152 } 153 154 static inline bool gic_dist_security_disabled(void) 155 { 156 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; 157 } 158 159 static bool cpus_have_security_disabled __ro_after_init; 160 static bool cpus_have_group0 __ro_after_init; 161 162 static void __init gic_prio_init(void) 163 { 164 bool ds; 165 166 ds = gic_dist_security_disabled(); 167 if (!ds) { 168 u32 val; 169 170 val = readl_relaxed(gic_data.dist_base + GICD_CTLR); 171 val |= GICD_CTLR_DS; 172 writel_relaxed(val, gic_data.dist_base + GICD_CTLR); 173 174 ds = gic_dist_security_disabled(); 175 if (ds) 176 pr_warn("Broken GIC integration, security disabled"); 177 } 178 179 cpus_have_security_disabled = ds; 180 cpus_have_group0 = gic_has_group0(); 181 182 /* 183 * How priority values are used by the GIC depends on two things: 184 * the security state of the GIC (controlled by the GICD_CTRL.DS bit) 185 * and if Group 0 interrupts can be delivered to Linux in the non-secure 186 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the 187 * way priorities are presented in ICC_PMR_EL1 and in the distributor: 188 * 189 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor 190 * ------------------------------------------------------- 191 * 1 | - | unchanged | unchanged 192 * ------------------------------------------------------- 193 * 0 | 1 | non-secure | non-secure 194 * ------------------------------------------------------- 195 * 0 | 0 | unchanged | non-secure 196 * 197 * In the non-secure view reads and writes are modified: 198 * 199 * - A value written is right-shifted by one and the MSB is set, 200 * forcing the priority into the non-secure range. 201 * 202 * - A value read is left-shifted by one. 203 * 204 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority 205 * are both either modified or unchanged, we can use the same set of 206 * priorities. 207 * 208 * In the last case, where only the interrupt priorities are modified to 209 * be in the non-secure range, we program the non-secure values into 210 * the distributor to match the PMR values we want. 211 */ 212 if (cpus_have_group0 & !cpus_have_security_disabled) { 213 dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq); 214 dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi); 215 } 216 217 pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n", 218 cpus_have_security_disabled, 219 !cpus_have_group0); 220 } 221 222 /* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */ 223 static refcount_t *rdist_nmi_refs; 224 225 static struct gic_kvm_info gic_v3_kvm_info __initdata; 226 static DEFINE_PER_CPU(bool, has_rss); 227 228 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) 229 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) 230 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) 231 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) 232 233 /* Our default, arbitrary priority value. Linux only uses one anyway. */ 234 #define DEFAULT_PMR_VALUE 0xf0 235 236 enum gic_intid_range { 237 SGI_RANGE, 238 PPI_RANGE, 239 SPI_RANGE, 240 EPPI_RANGE, 241 ESPI_RANGE, 242 LPI_RANGE, 243 __INVALID_RANGE__ 244 }; 245 246 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) 247 { 248 switch (hwirq) { 249 case 0 ... 15: 250 return SGI_RANGE; 251 case 16 ... 31: 252 return PPI_RANGE; 253 case 32 ... 1019: 254 return SPI_RANGE; 255 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): 256 return EPPI_RANGE; 257 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): 258 return ESPI_RANGE; 259 case 8192 ... GENMASK(23, 0): 260 return LPI_RANGE; 261 default: 262 return __INVALID_RANGE__; 263 } 264 } 265 266 static enum gic_intid_range get_intid_range(struct irq_data *d) 267 { 268 return __get_intid_range(d->hwirq); 269 } 270 271 static inline bool gic_irq_in_rdist(struct irq_data *d) 272 { 273 switch (get_intid_range(d)) { 274 case SGI_RANGE: 275 case PPI_RANGE: 276 case EPPI_RANGE: 277 return true; 278 default: 279 return false; 280 } 281 } 282 283 static inline void __iomem *gic_dist_base_alias(struct irq_data *d) 284 { 285 if (static_branch_unlikely(&gic_nvidia_t241_erratum)) { 286 irq_hw_number_t hwirq = irqd_to_hwirq(d); 287 u32 chip; 288 289 /* 290 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} 291 * registers are directed to the chip that owns the SPI. The 292 * the alias region can also be used for writes to the 293 * GICD_In{E} except GICD_ICENABLERn. Each chip has support 294 * for 320 {E}SPIs. Mappings for all 4 chips: 295 * Chip0 = 32-351 296 * Chip1 = 352-671 297 * Chip2 = 672-991 298 * Chip3 = 4096-4415 299 */ 300 switch (__get_intid_range(hwirq)) { 301 case SPI_RANGE: 302 chip = (hwirq - 32) / 320; 303 break; 304 case ESPI_RANGE: 305 chip = 3; 306 break; 307 default: 308 unreachable(); 309 } 310 return t241_dist_base_alias[chip]; 311 } 312 313 return gic_data.dist_base; 314 } 315 316 static inline void __iomem *gic_dist_base(struct irq_data *d) 317 { 318 switch (get_intid_range(d)) { 319 case SGI_RANGE: 320 case PPI_RANGE: 321 case EPPI_RANGE: 322 /* SGI+PPI -> SGI_base for this CPU */ 323 return gic_data_rdist_sgi_base(); 324 325 case SPI_RANGE: 326 case ESPI_RANGE: 327 /* SPI -> dist_base */ 328 return gic_data.dist_base; 329 330 default: 331 return NULL; 332 } 333 } 334 335 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) 336 { 337 u32 val; 338 int ret; 339 340 ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit), 341 1, USEC_PER_SEC); 342 if (ret == -ETIMEDOUT) 343 pr_err_ratelimited("RWP timeout, gone fishing\n"); 344 } 345 346 /* Wait for completion of a distributor change */ 347 static void gic_dist_wait_for_rwp(void) 348 { 349 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP); 350 } 351 352 /* Wait for completion of a redistributor change */ 353 static void gic_redist_wait_for_rwp(void) 354 { 355 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); 356 } 357 358 static void gic_enable_redist(bool enable) 359 { 360 void __iomem *rbase; 361 u32 val; 362 int ret; 363 364 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) 365 return; 366 367 rbase = gic_data_rdist_rd_base(); 368 369 val = readl_relaxed(rbase + GICR_WAKER); 370 if (enable) 371 /* Wake up this CPU redistributor */ 372 val &= ~GICR_WAKER_ProcessorSleep; 373 else 374 val |= GICR_WAKER_ProcessorSleep; 375 writel_relaxed(val, rbase + GICR_WAKER); 376 377 if (!enable) { /* Check that GICR_WAKER is writeable */ 378 val = readl_relaxed(rbase + GICR_WAKER); 379 if (!(val & GICR_WAKER_ProcessorSleep)) 380 return; /* No PM support in this redistributor */ 381 } 382 383 ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val, 384 enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep), 385 1, USEC_PER_SEC); 386 if (ret == -ETIMEDOUT) { 387 pr_err_ratelimited("redistributor failed to %s...\n", 388 enable ? "wakeup" : "sleep"); 389 } 390 } 391 392 /* 393 * Routines to disable, enable, EOI and route interrupts 394 */ 395 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) 396 { 397 switch (get_intid_range(d)) { 398 case SGI_RANGE: 399 case PPI_RANGE: 400 case SPI_RANGE: 401 *index = d->hwirq; 402 return offset; 403 case EPPI_RANGE: 404 /* 405 * Contrary to the ESPI range, the EPPI range is contiguous 406 * to the PPI range in the registers, so let's adjust the 407 * displacement accordingly. Consistency is overrated. 408 */ 409 *index = d->hwirq - EPPI_BASE_INTID + 32; 410 return offset; 411 case ESPI_RANGE: 412 *index = d->hwirq - ESPI_BASE_INTID; 413 switch (offset) { 414 case GICD_ISENABLER: 415 return GICD_ISENABLERnE; 416 case GICD_ICENABLER: 417 return GICD_ICENABLERnE; 418 case GICD_ISPENDR: 419 return GICD_ISPENDRnE; 420 case GICD_ICPENDR: 421 return GICD_ICPENDRnE; 422 case GICD_ISACTIVER: 423 return GICD_ISACTIVERnE; 424 case GICD_ICACTIVER: 425 return GICD_ICACTIVERnE; 426 case GICD_IPRIORITYR: 427 return GICD_IPRIORITYRnE; 428 case GICD_ICFGR: 429 return GICD_ICFGRnE; 430 case GICD_IROUTER: 431 return GICD_IROUTERnE; 432 default: 433 break; 434 } 435 break; 436 default: 437 break; 438 } 439 440 WARN_ON(1); 441 *index = d->hwirq; 442 return offset; 443 } 444 445 static int gic_peek_irq(struct irq_data *d, u32 offset) 446 { 447 void __iomem *base; 448 u32 index, mask; 449 450 offset = convert_offset_index(d, offset, &index); 451 mask = 1 << (index % 32); 452 453 if (gic_irq_in_rdist(d)) 454 base = gic_data_rdist_sgi_base(); 455 else 456 base = gic_dist_base_alias(d); 457 458 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); 459 } 460 461 static void gic_poke_irq(struct irq_data *d, u32 offset) 462 { 463 void __iomem *base; 464 u32 index, mask; 465 466 offset = convert_offset_index(d, offset, &index); 467 mask = 1 << (index % 32); 468 469 if (gic_irq_in_rdist(d)) 470 base = gic_data_rdist_sgi_base(); 471 else 472 base = gic_data.dist_base; 473 474 writel_relaxed(mask, base + offset + (index / 32) * 4); 475 } 476 477 static void gic_mask_irq(struct irq_data *d) 478 { 479 gic_poke_irq(d, GICD_ICENABLER); 480 if (gic_irq_in_rdist(d)) 481 gic_redist_wait_for_rwp(); 482 else 483 gic_dist_wait_for_rwp(); 484 } 485 486 static void gic_eoimode1_mask_irq(struct irq_data *d) 487 { 488 gic_mask_irq(d); 489 /* 490 * When masking a forwarded interrupt, make sure it is 491 * deactivated as well. 492 * 493 * This ensures that an interrupt that is getting 494 * disabled/masked will not get "stuck", because there is 495 * noone to deactivate it (guest is being terminated). 496 */ 497 if (irqd_is_forwarded_to_vcpu(d)) 498 gic_poke_irq(d, GICD_ICACTIVER); 499 } 500 501 static void gic_unmask_irq(struct irq_data *d) 502 { 503 gic_poke_irq(d, GICD_ISENABLER); 504 } 505 506 static inline bool gic_supports_nmi(void) 507 { 508 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && 509 static_branch_likely(&supports_pseudo_nmis); 510 } 511 512 static int gic_irq_set_irqchip_state(struct irq_data *d, 513 enum irqchip_irq_state which, bool val) 514 { 515 u32 reg; 516 517 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ 518 return -EINVAL; 519 520 switch (which) { 521 case IRQCHIP_STATE_PENDING: 522 reg = val ? GICD_ISPENDR : GICD_ICPENDR; 523 break; 524 525 case IRQCHIP_STATE_ACTIVE: 526 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; 527 break; 528 529 case IRQCHIP_STATE_MASKED: 530 if (val) { 531 gic_mask_irq(d); 532 return 0; 533 } 534 reg = GICD_ISENABLER; 535 break; 536 537 default: 538 return -EINVAL; 539 } 540 541 gic_poke_irq(d, reg); 542 543 /* 544 * Force read-back to guarantee that the active state has taken 545 * effect, and won't race with a guest-driven deactivation. 546 */ 547 if (reg == GICD_ISACTIVER) 548 gic_peek_irq(d, reg); 549 return 0; 550 } 551 552 static int gic_irq_get_irqchip_state(struct irq_data *d, 553 enum irqchip_irq_state which, bool *val) 554 { 555 if (d->hwirq >= 8192) /* PPI/SPI only */ 556 return -EINVAL; 557 558 switch (which) { 559 case IRQCHIP_STATE_PENDING: 560 *val = gic_peek_irq(d, GICD_ISPENDR); 561 break; 562 563 case IRQCHIP_STATE_ACTIVE: 564 *val = gic_peek_irq(d, GICD_ISACTIVER); 565 break; 566 567 case IRQCHIP_STATE_MASKED: 568 *val = !gic_peek_irq(d, GICD_ISENABLER); 569 break; 570 571 default: 572 return -EINVAL; 573 } 574 575 return 0; 576 } 577 578 static void gic_irq_set_prio(struct irq_data *d, u8 prio) 579 { 580 void __iomem *base = gic_dist_base(d); 581 u32 offset, index; 582 583 offset = convert_offset_index(d, GICD_IPRIORITYR, &index); 584 585 writeb_relaxed(prio, base + offset + index); 586 } 587 588 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) 589 { 590 switch (__get_intid_range(hwirq)) { 591 case PPI_RANGE: 592 return hwirq - 16; 593 case EPPI_RANGE: 594 return hwirq - EPPI_BASE_INTID + 16; 595 default: 596 unreachable(); 597 } 598 } 599 600 static u32 __gic_get_rdist_index(irq_hw_number_t hwirq) 601 { 602 switch (__get_intid_range(hwirq)) { 603 case SGI_RANGE: 604 case PPI_RANGE: 605 return hwirq; 606 case EPPI_RANGE: 607 return hwirq - EPPI_BASE_INTID + 32; 608 default: 609 unreachable(); 610 } 611 } 612 613 static u32 gic_get_rdist_index(struct irq_data *d) 614 { 615 return __gic_get_rdist_index(d->hwirq); 616 } 617 618 static int gic_irq_nmi_setup(struct irq_data *d) 619 { 620 struct irq_desc *desc = irq_to_desc(d->irq); 621 622 if (!gic_supports_nmi()) 623 return -EINVAL; 624 625 if (gic_peek_irq(d, GICD_ISENABLER)) { 626 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 627 return -EINVAL; 628 } 629 630 /* 631 * A secondary irq_chip should be in charge of LPI request, 632 * it should not be possible to get there 633 */ 634 if (WARN_ON(irqd_to_hwirq(d) >= 8192)) 635 return -EINVAL; 636 637 /* desc lock should already be held */ 638 if (gic_irq_in_rdist(d)) { 639 u32 idx = gic_get_rdist_index(d); 640 641 /* 642 * Setting up a percpu interrupt as NMI, only switch handler 643 * for first NMI 644 */ 645 if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) { 646 refcount_set(&rdist_nmi_refs[idx], 1); 647 desc->handle_irq = handle_percpu_devid_fasteoi_nmi; 648 } 649 } else { 650 desc->handle_irq = handle_fasteoi_nmi; 651 } 652 653 gic_irq_set_prio(d, dist_prio_nmi); 654 655 return 0; 656 } 657 658 static void gic_irq_nmi_teardown(struct irq_data *d) 659 { 660 struct irq_desc *desc = irq_to_desc(d->irq); 661 662 if (WARN_ON(!gic_supports_nmi())) 663 return; 664 665 if (gic_peek_irq(d, GICD_ISENABLER)) { 666 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); 667 return; 668 } 669 670 /* 671 * A secondary irq_chip should be in charge of LPI request, 672 * it should not be possible to get there 673 */ 674 if (WARN_ON(irqd_to_hwirq(d) >= 8192)) 675 return; 676 677 /* desc lock should already be held */ 678 if (gic_irq_in_rdist(d)) { 679 u32 idx = gic_get_rdist_index(d); 680 681 /* Tearing down NMI, only switch handler for last NMI */ 682 if (refcount_dec_and_test(&rdist_nmi_refs[idx])) 683 desc->handle_irq = handle_percpu_devid_irq; 684 } else { 685 desc->handle_irq = handle_fasteoi_irq; 686 } 687 688 gic_irq_set_prio(d, dist_prio_irq); 689 } 690 691 static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) 692 { 693 enum gic_intid_range range; 694 695 if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) 696 return false; 697 698 range = get_intid_range(d); 699 700 /* 701 * The workaround is needed if the IRQ is an SPI and 702 * the target cpu is different from the one we are 703 * executing on. 704 */ 705 return (range == SPI_RANGE || range == ESPI_RANGE) && 706 !cpumask_test_cpu(raw_smp_processor_id(), 707 irq_data_get_effective_affinity_mask(d)); 708 } 709 710 static void gic_eoi_irq(struct irq_data *d) 711 { 712 write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1); 713 isb(); 714 715 if (gic_arm64_erratum_2941627_needed(d)) { 716 /* 717 * Make sure the GIC stream deactivate packet 718 * issued by ICC_EOIR1_EL1 has completed before 719 * deactivating through GICD_IACTIVER. 720 */ 721 dsb(sy); 722 gic_poke_irq(d, GICD_ICACTIVER); 723 } 724 } 725 726 static void gic_eoimode1_eoi_irq(struct irq_data *d) 727 { 728 /* 729 * No need to deactivate an LPI, or an interrupt that 730 * is is getting forwarded to a vcpu. 731 */ 732 if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) 733 return; 734 735 if (!gic_arm64_erratum_2941627_needed(d)) 736 gic_write_dir(irqd_to_hwirq(d)); 737 else 738 gic_poke_irq(d, GICD_ICACTIVER); 739 } 740 741 static int gic_set_type(struct irq_data *d, unsigned int type) 742 { 743 irq_hw_number_t irq = irqd_to_hwirq(d); 744 enum gic_intid_range range; 745 void __iomem *base; 746 u32 offset, index; 747 int ret; 748 749 range = get_intid_range(d); 750 751 /* Interrupt configuration for SGIs can't be changed */ 752 if (range == SGI_RANGE) 753 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; 754 755 /* SPIs have restrictions on the supported types */ 756 if ((range == SPI_RANGE || range == ESPI_RANGE) && 757 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) 758 return -EINVAL; 759 760 if (gic_irq_in_rdist(d)) 761 base = gic_data_rdist_sgi_base(); 762 else 763 base = gic_dist_base_alias(d); 764 765 offset = convert_offset_index(d, GICD_ICFGR, &index); 766 767 ret = gic_configure_irq(index, type, base + offset); 768 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { 769 /* Misconfigured PPIs are usually not fatal */ 770 pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq); 771 ret = 0; 772 } 773 774 return ret; 775 } 776 777 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 778 { 779 if (get_intid_range(d) == SGI_RANGE) 780 return -EINVAL; 781 782 if (vcpu) 783 irqd_set_forwarded_to_vcpu(d); 784 else 785 irqd_clr_forwarded_to_vcpu(d); 786 return 0; 787 } 788 789 static u64 gic_cpu_to_affinity(int cpu) 790 { 791 u64 mpidr = cpu_logical_map(cpu); 792 u64 aff; 793 794 /* ASR8601 needs to have its affinities shifted down... */ 795 if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) 796 mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | 797 (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); 798 799 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | 800 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 801 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 802 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 803 804 return aff; 805 } 806 807 static void gic_deactivate_unhandled(u32 irqnr) 808 { 809 if (static_branch_likely(&supports_deactivate_key)) { 810 if (irqnr < 8192) 811 gic_write_dir(irqnr); 812 } else { 813 write_gicreg(irqnr, ICC_EOIR1_EL1); 814 isb(); 815 } 816 } 817 818 /* 819 * Follow a read of the IAR with any HW maintenance that needs to happen prior 820 * to invoking the relevant IRQ handler. We must do two things: 821 * 822 * (1) Ensure instruction ordering between a read of IAR and subsequent 823 * instructions in the IRQ handler using an ISB. 824 * 825 * It is possible for the IAR to report an IRQ which was signalled *after* 826 * the CPU took an IRQ exception as multiple interrupts can race to be 827 * recognized by the GIC, earlier interrupts could be withdrawn, and/or 828 * later interrupts could be prioritized by the GIC. 829 * 830 * For devices which are tightly coupled to the CPU, such as PMUs, a 831 * context synchronization event is necessary to ensure that system 832 * register state is not stale, as these may have been indirectly written 833 * *after* exception entry. 834 * 835 * (2) Execute an interrupt priority drop when EOI mode 1 is in use. 836 */ 837 static inline void gic_complete_ack(u32 irqnr) 838 { 839 if (static_branch_likely(&supports_deactivate_key)) 840 write_gicreg(irqnr, ICC_EOIR1_EL1); 841 842 isb(); 843 } 844 845 static bool gic_rpr_is_nmi_prio(void) 846 { 847 if (!gic_supports_nmi()) 848 return false; 849 850 return unlikely(gic_read_rpr() == GICV3_PRIO_NMI); 851 } 852 853 static bool gic_irqnr_is_special(u32 irqnr) 854 { 855 return irqnr >= 1020 && irqnr <= 1023; 856 } 857 858 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) 859 { 860 if (gic_irqnr_is_special(irqnr)) 861 return; 862 863 gic_complete_ack(irqnr); 864 865 if (generic_handle_domain_irq(gic_data.domain, irqnr)) { 866 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); 867 gic_deactivate_unhandled(irqnr); 868 } 869 } 870 871 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) 872 { 873 if (gic_irqnr_is_special(irqnr)) 874 return; 875 876 gic_complete_ack(irqnr); 877 878 if (generic_handle_domain_nmi(gic_data.domain, irqnr)) { 879 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); 880 gic_deactivate_unhandled(irqnr); 881 } 882 } 883 884 /* 885 * An exception has been taken from a context with IRQs enabled, and this could 886 * be an IRQ or an NMI. 887 * 888 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear 889 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, 890 * after handling any NMI but before handling any IRQ. 891 * 892 * The entry code has performed IRQ entry, and if an NMI is detected we must 893 * perform NMI entry/exit around invoking the handler. 894 */ 895 static void __gic_handle_irq_from_irqson(struct pt_regs *regs) 896 { 897 bool is_nmi; 898 u32 irqnr; 899 900 irqnr = gic_read_iar(); 901 902 is_nmi = gic_rpr_is_nmi_prio(); 903 904 if (is_nmi) { 905 nmi_enter(); 906 __gic_handle_nmi(irqnr, regs); 907 nmi_exit(); 908 } 909 910 if (gic_prio_masking_enabled()) { 911 gic_pmr_mask_irqs(); 912 gic_arch_enable_irqs(); 913 } 914 915 if (!is_nmi) 916 __gic_handle_irq(irqnr, regs); 917 } 918 919 /* 920 * An exception has been taken from a context with IRQs disabled, which can only 921 * be an NMI. 922 * 923 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave 924 * DAIF.IF (and ICC_PMR_EL1) unchanged. 925 * 926 * The entry code has performed NMI entry. 927 */ 928 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) 929 { 930 u64 pmr; 931 u32 irqnr; 932 933 /* 934 * We were in a context with IRQs disabled. However, the 935 * entry code has set PMR to a value that allows any 936 * interrupt to be acknowledged, and not just NMIs. This can 937 * lead to surprising effects if the NMI has been retired in 938 * the meantime, and that there is an IRQ pending. The IRQ 939 * would then be taken in NMI context, something that nobody 940 * wants to debug twice. 941 * 942 * Until we sort this, drop PMR again to a level that will 943 * actually only allow NMIs before reading IAR, and then 944 * restore it to what it was. 945 */ 946 pmr = gic_read_pmr(); 947 gic_pmr_mask_irqs(); 948 isb(); 949 irqnr = gic_read_iar(); 950 gic_write_pmr(pmr); 951 952 __gic_handle_nmi(irqnr, regs); 953 } 954 955 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 956 { 957 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs))) 958 __gic_handle_irq_from_irqsoff(regs); 959 else 960 __gic_handle_irq_from_irqson(regs); 961 } 962 963 static void __init gic_dist_init(void) 964 { 965 unsigned int i; 966 u64 affinity; 967 void __iomem *base = gic_data.dist_base; 968 u32 val; 969 970 /* Disable the distributor */ 971 writel_relaxed(0, base + GICD_CTLR); 972 gic_dist_wait_for_rwp(); 973 974 /* 975 * Configure SPIs as non-secure Group-1. This will only matter 976 * if the GIC only has a single security state. This will not 977 * do the right thing if the kernel is running in secure mode, 978 * but that's not the intended use case anyway. 979 */ 980 for (i = 32; i < GIC_LINE_NR; i += 32) 981 writel_relaxed(~0, base + GICD_IGROUPR + i / 8); 982 983 /* Extended SPI range, not handled by the GICv2/GICv3 common code */ 984 for (i = 0; i < GIC_ESPI_NR; i += 32) { 985 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); 986 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); 987 } 988 989 for (i = 0; i < GIC_ESPI_NR; i += 32) 990 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); 991 992 for (i = 0; i < GIC_ESPI_NR; i += 16) 993 writel_relaxed(0, base + GICD_ICFGRnE + i / 4); 994 995 for (i = 0; i < GIC_ESPI_NR; i += 4) 996 writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq), 997 base + GICD_IPRIORITYRnE + i); 998 999 /* Now do the common stuff */ 1000 gic_dist_config(base, GIC_LINE_NR, dist_prio_irq); 1001 1002 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; 1003 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { 1004 pr_info("Enabling SGIs without active state\n"); 1005 val |= GICD_CTLR_nASSGIreq; 1006 } 1007 1008 /* Enable distributor with ARE, Group1, and wait for it to drain */ 1009 writel_relaxed(val, base + GICD_CTLR); 1010 gic_dist_wait_for_rwp(); 1011 1012 /* 1013 * Set all global interrupts to the boot CPU only. ARE must be 1014 * enabled. 1015 */ 1016 affinity = gic_cpu_to_affinity(smp_processor_id()); 1017 for (i = 32; i < GIC_LINE_NR; i++) 1018 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); 1019 1020 for (i = 0; i < GIC_ESPI_NR; i++) 1021 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); 1022 } 1023 1024 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) 1025 { 1026 int ret = -ENODEV; 1027 int i; 1028 1029 for (i = 0; i < gic_data.nr_redist_regions; i++) { 1030 void __iomem *ptr = gic_data.redist_regions[i].redist_base; 1031 u64 typer; 1032 u32 reg; 1033 1034 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; 1035 if (reg != GIC_PIDR2_ARCH_GICv3 && 1036 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ 1037 pr_warn("No redistributor present @%p\n", ptr); 1038 break; 1039 } 1040 1041 do { 1042 typer = gic_read_typer(ptr + GICR_TYPER); 1043 ret = fn(gic_data.redist_regions + i, ptr); 1044 if (!ret) 1045 return 0; 1046 1047 if (gic_data.redist_regions[i].single_redist) 1048 break; 1049 1050 if (gic_data.redist_stride) { 1051 ptr += gic_data.redist_stride; 1052 } else { 1053 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ 1054 if (typer & GICR_TYPER_VLPIS) 1055 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ 1056 } 1057 } while (!(typer & GICR_TYPER_LAST)); 1058 } 1059 1060 return ret ? -ENODEV : 0; 1061 } 1062 1063 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) 1064 { 1065 unsigned long mpidr; 1066 u64 typer; 1067 u32 aff; 1068 1069 /* 1070 * Convert affinity to a 32bit value that can be matched to 1071 * GICR_TYPER bits [63:32]. 1072 */ 1073 mpidr = gic_cpu_to_affinity(smp_processor_id()); 1074 1075 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | 1076 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | 1077 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | 1078 MPIDR_AFFINITY_LEVEL(mpidr, 0)); 1079 1080 typer = gic_read_typer(ptr + GICR_TYPER); 1081 if ((typer >> 32) == aff) { 1082 u64 offset = ptr - region->redist_base; 1083 raw_spin_lock_init(&gic_data_rdist()->rd_lock); 1084 gic_data_rdist_rd_base() = ptr; 1085 gic_data_rdist()->phys_base = region->phys_base + offset; 1086 1087 pr_info("CPU%d: found redistributor %lx region %d:%pa\n", 1088 smp_processor_id(), mpidr, 1089 (int)(region - gic_data.redist_regions), 1090 &gic_data_rdist()->phys_base); 1091 return 0; 1092 } 1093 1094 /* Try next one */ 1095 return 1; 1096 } 1097 1098 static int gic_populate_rdist(void) 1099 { 1100 if (gic_iterate_rdists(__gic_populate_rdist) == 0) 1101 return 0; 1102 1103 /* We couldn't even deal with ourselves... */ 1104 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", 1105 smp_processor_id(), 1106 (unsigned long)cpu_logical_map(smp_processor_id())); 1107 return -ENODEV; 1108 } 1109 1110 static int __gic_update_rdist_properties(struct redist_region *region, 1111 void __iomem *ptr) 1112 { 1113 u64 typer = gic_read_typer(ptr + GICR_TYPER); 1114 u32 ctlr = readl_relaxed(ptr + GICR_CTLR); 1115 1116 /* Boot-time cleanup */ 1117 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { 1118 u64 val; 1119 1120 /* Deactivate any present vPE */ 1121 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); 1122 if (val & GICR_VPENDBASER_Valid) 1123 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, 1124 ptr + SZ_128K + GICR_VPENDBASER); 1125 1126 /* Mark the VPE table as invalid */ 1127 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); 1128 val &= ~GICR_VPROPBASER_4_1_VALID; 1129 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); 1130 } 1131 1132 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); 1133 1134 /* 1135 * TYPER.RVPEID implies some form of DirectLPI, no matter what the 1136 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI 1137 * that the ITS driver can make use of for LPIs (and not VLPIs). 1138 * 1139 * These are 3 different ways to express the same thing, depending 1140 * on the revision of the architecture and its relaxations over 1141 * time. Just group them under the 'direct_lpi' banner. 1142 */ 1143 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); 1144 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | 1145 !!(ctlr & GICR_CTLR_IR) | 1146 gic_data.rdists.has_rvpeid); 1147 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); 1148 1149 /* Detect non-sensical configurations */ 1150 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { 1151 gic_data.rdists.has_direct_lpi = false; 1152 gic_data.rdists.has_vlpis = false; 1153 gic_data.rdists.has_rvpeid = false; 1154 } 1155 1156 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); 1157 1158 return 1; 1159 } 1160 1161 static void gic_update_rdist_properties(void) 1162 { 1163 gic_data.ppi_nr = UINT_MAX; 1164 gic_iterate_rdists(__gic_update_rdist_properties); 1165 if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) 1166 gic_data.ppi_nr = 0; 1167 pr_info("GICv3 features: %d PPIs%s%s\n", 1168 gic_data.ppi_nr, 1169 gic_data.has_rss ? ", RSS" : "", 1170 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : ""); 1171 1172 if (gic_data.rdists.has_vlpis) 1173 pr_info("GICv4 features: %s%s%s\n", 1174 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "", 1175 gic_data.rdists.has_rvpeid ? "RVPEID " : "", 1176 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : ""); 1177 } 1178 1179 static void gic_cpu_sys_reg_enable(void) 1180 { 1181 /* 1182 * Need to check that the SRE bit has actually been set. If 1183 * not, it means that SRE is disabled at EL2. We're going to 1184 * die painfully, and there is nothing we can do about it. 1185 * 1186 * Kindly inform the luser. 1187 */ 1188 if (!gic_enable_sre()) 1189 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); 1190 1191 } 1192 1193 static void gic_cpu_sys_reg_init(void) 1194 { 1195 int i, cpu = smp_processor_id(); 1196 u64 mpidr = gic_cpu_to_affinity(cpu); 1197 u64 need_rss = MPIDR_RS(mpidr); 1198 bool group0; 1199 u32 pribits; 1200 1201 pribits = gic_get_pribits(); 1202 1203 group0 = gic_has_group0(); 1204 1205 /* Set priority mask register */ 1206 if (!gic_prio_masking_enabled()) { 1207 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); 1208 } else if (gic_supports_nmi()) { 1209 /* 1210 * Check that all CPUs use the same priority space. 1211 * 1212 * If there's a mismatch with the boot CPU, the system is 1213 * likely to die as interrupt masking will not work properly on 1214 * all CPUs. 1215 */ 1216 WARN_ON(group0 != cpus_have_group0); 1217 WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled); 1218 } 1219 1220 /* 1221 * Some firmwares hand over to the kernel with the BPR changed from 1222 * its reset value (and with a value large enough to prevent 1223 * any pre-emptive interrupts from working at all). Writing a zero 1224 * to BPR restores is reset value. 1225 */ 1226 gic_write_bpr1(0); 1227 1228 if (static_branch_likely(&supports_deactivate_key)) { 1229 /* EOI drops priority only (mode 1) */ 1230 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); 1231 } else { 1232 /* EOI deactivates interrupt too (mode 0) */ 1233 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); 1234 } 1235 1236 /* Always whack Group0 before Group1 */ 1237 if (group0) { 1238 switch(pribits) { 1239 case 8: 1240 case 7: 1241 write_gicreg(0, ICC_AP0R3_EL1); 1242 write_gicreg(0, ICC_AP0R2_EL1); 1243 fallthrough; 1244 case 6: 1245 write_gicreg(0, ICC_AP0R1_EL1); 1246 fallthrough; 1247 case 5: 1248 case 4: 1249 write_gicreg(0, ICC_AP0R0_EL1); 1250 } 1251 1252 isb(); 1253 } 1254 1255 switch(pribits) { 1256 case 8: 1257 case 7: 1258 write_gicreg(0, ICC_AP1R3_EL1); 1259 write_gicreg(0, ICC_AP1R2_EL1); 1260 fallthrough; 1261 case 6: 1262 write_gicreg(0, ICC_AP1R1_EL1); 1263 fallthrough; 1264 case 5: 1265 case 4: 1266 write_gicreg(0, ICC_AP1R0_EL1); 1267 } 1268 1269 isb(); 1270 1271 /* ... and let's hit the road... */ 1272 gic_write_grpen1(1); 1273 1274 /* Keep the RSS capability status in per_cpu variable */ 1275 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); 1276 1277 /* Check all the CPUs have capable of sending SGIs to other CPUs */ 1278 for_each_online_cpu(i) { 1279 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); 1280 1281 need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); 1282 if (need_rss && (!have_rss)) 1283 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", 1284 cpu, (unsigned long)mpidr, 1285 i, (unsigned long)gic_cpu_to_affinity(i)); 1286 } 1287 1288 /** 1289 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, 1290 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED 1291 * UNPREDICTABLE choice of : 1292 * - The write is ignored. 1293 * - The RS field is treated as 0. 1294 */ 1295 if (need_rss && (!gic_data.has_rss)) 1296 pr_crit_once("RSS is required but GICD doesn't support it\n"); 1297 } 1298 1299 static bool gicv3_nolpi; 1300 1301 static int __init gicv3_nolpi_cfg(char *buf) 1302 { 1303 return kstrtobool(buf, &gicv3_nolpi); 1304 } 1305 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); 1306 1307 static int gic_dist_supports_lpis(void) 1308 { 1309 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && 1310 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && 1311 !gicv3_nolpi); 1312 } 1313 1314 static void gic_cpu_init(void) 1315 { 1316 void __iomem *rbase; 1317 int i; 1318 1319 /* Register ourselves with the rest of the world */ 1320 if (gic_populate_rdist()) 1321 return; 1322 1323 gic_enable_redist(true); 1324 1325 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && 1326 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), 1327 "Distributor has extended ranges, but CPU%d doesn't\n", 1328 smp_processor_id()); 1329 1330 rbase = gic_data_rdist_sgi_base(); 1331 1332 /* Configure SGIs/PPIs as non-secure Group-1 */ 1333 for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32) 1334 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); 1335 1336 gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq); 1337 gic_redist_wait_for_rwp(); 1338 1339 /* initialise system registers */ 1340 gic_cpu_sys_reg_init(); 1341 } 1342 1343 #ifdef CONFIG_SMP 1344 1345 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) 1346 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) 1347 1348 /* 1349 * gic_starting_cpu() is called after the last point where cpuhp is allowed 1350 * to fail. So pre check for problems earlier. 1351 */ 1352 static int gic_check_rdist(unsigned int cpu) 1353 { 1354 if (cpumask_test_cpu(cpu, &broken_rdists)) 1355 return -EINVAL; 1356 1357 return 0; 1358 } 1359 1360 static int gic_starting_cpu(unsigned int cpu) 1361 { 1362 gic_cpu_sys_reg_enable(); 1363 gic_cpu_init(); 1364 1365 if (gic_dist_supports_lpis()) 1366 its_cpu_init(); 1367 1368 return 0; 1369 } 1370 1371 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, 1372 unsigned long cluster_id) 1373 { 1374 int next_cpu, cpu = *base_cpu; 1375 unsigned long mpidr; 1376 u16 tlist = 0; 1377 1378 mpidr = gic_cpu_to_affinity(cpu); 1379 1380 while (cpu < nr_cpu_ids) { 1381 tlist |= 1 << (mpidr & 0xf); 1382 1383 next_cpu = cpumask_next(cpu, mask); 1384 if (next_cpu >= nr_cpu_ids) 1385 goto out; 1386 cpu = next_cpu; 1387 1388 mpidr = gic_cpu_to_affinity(cpu); 1389 1390 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { 1391 cpu--; 1392 goto out; 1393 } 1394 } 1395 out: 1396 *base_cpu = cpu; 1397 return tlist; 1398 } 1399 1400 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ 1401 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ 1402 << ICC_SGI1R_AFFINITY_## level ##_SHIFT) 1403 1404 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) 1405 { 1406 u64 val; 1407 1408 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | 1409 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | 1410 irq << ICC_SGI1R_SGI_ID_SHIFT | 1411 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | 1412 MPIDR_TO_SGI_RS(cluster_id) | 1413 tlist << ICC_SGI1R_TARGET_LIST_SHIFT); 1414 1415 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); 1416 gic_write_sgi1r(val); 1417 } 1418 1419 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) 1420 { 1421 int cpu; 1422 1423 if (WARN_ON(d->hwirq >= 16)) 1424 return; 1425 1426 /* 1427 * Ensure that stores to Normal memory are visible to the 1428 * other CPUs before issuing the IPI. 1429 */ 1430 dsb(ishst); 1431 1432 for_each_cpu(cpu, mask) { 1433 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); 1434 u16 tlist; 1435 1436 tlist = gic_compute_target_list(&cpu, mask, cluster_id); 1437 gic_send_sgi(cluster_id, tlist, d->hwirq); 1438 } 1439 1440 /* Force the above writes to ICC_SGI1R_EL1 to be executed */ 1441 isb(); 1442 } 1443 1444 static void __init gic_smp_init(void) 1445 { 1446 struct irq_fwspec sgi_fwspec = { 1447 .fwnode = gic_data.fwnode, 1448 .param_count = 1, 1449 }; 1450 int base_sgi; 1451 1452 cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN, 1453 "irqchip/arm/gicv3:checkrdist", 1454 gic_check_rdist, NULL); 1455 1456 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING, 1457 "irqchip/arm/gicv3:starting", 1458 gic_starting_cpu, NULL); 1459 1460 /* Register all 8 non-secure SGIs */ 1461 base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec); 1462 if (WARN_ON(base_sgi <= 0)) 1463 return; 1464 1465 set_smp_ipi_range(base_sgi, 8); 1466 } 1467 1468 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 1469 bool force) 1470 { 1471 unsigned int cpu; 1472 u32 offset, index; 1473 void __iomem *reg; 1474 int enabled; 1475 u64 val; 1476 1477 if (force) 1478 cpu = cpumask_first(mask_val); 1479 else 1480 cpu = cpumask_any_and(mask_val, cpu_online_mask); 1481 1482 if (cpu >= nr_cpu_ids) 1483 return -EINVAL; 1484 1485 if (gic_irq_in_rdist(d)) 1486 return -EINVAL; 1487 1488 /* If interrupt was enabled, disable it first */ 1489 enabled = gic_peek_irq(d, GICD_ISENABLER); 1490 if (enabled) 1491 gic_mask_irq(d); 1492 1493 offset = convert_offset_index(d, GICD_IROUTER, &index); 1494 reg = gic_dist_base(d) + offset + (index * 8); 1495 val = gic_cpu_to_affinity(cpu); 1496 1497 gic_write_irouter(val, reg); 1498 1499 /* 1500 * If the interrupt was enabled, enabled it again. Otherwise, 1501 * just wait for the distributor to have digested our changes. 1502 */ 1503 if (enabled) 1504 gic_unmask_irq(d); 1505 1506 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 1507 1508 return IRQ_SET_MASK_OK_DONE; 1509 } 1510 #else 1511 #define gic_set_affinity NULL 1512 #define gic_ipi_send_mask NULL 1513 #define gic_smp_init() do { } while(0) 1514 #endif 1515 1516 static int gic_retrigger(struct irq_data *data) 1517 { 1518 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true); 1519 } 1520 1521 #ifdef CONFIG_CPU_PM 1522 static int gic_cpu_pm_notifier(struct notifier_block *self, 1523 unsigned long cmd, void *v) 1524 { 1525 if (cmd == CPU_PM_EXIT) { 1526 if (gic_dist_security_disabled()) 1527 gic_enable_redist(true); 1528 gic_cpu_sys_reg_enable(); 1529 gic_cpu_sys_reg_init(); 1530 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { 1531 gic_write_grpen1(0); 1532 gic_enable_redist(false); 1533 } 1534 return NOTIFY_OK; 1535 } 1536 1537 static struct notifier_block gic_cpu_pm_notifier_block = { 1538 .notifier_call = gic_cpu_pm_notifier, 1539 }; 1540 1541 static void gic_cpu_pm_init(void) 1542 { 1543 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); 1544 } 1545 1546 #else 1547 static inline void gic_cpu_pm_init(void) { } 1548 #endif /* CONFIG_CPU_PM */ 1549 1550 static struct irq_chip gic_chip = { 1551 .name = "GICv3", 1552 .irq_mask = gic_mask_irq, 1553 .irq_unmask = gic_unmask_irq, 1554 .irq_eoi = gic_eoi_irq, 1555 .irq_set_type = gic_set_type, 1556 .irq_set_affinity = gic_set_affinity, 1557 .irq_retrigger = gic_retrigger, 1558 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1559 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1560 .irq_nmi_setup = gic_irq_nmi_setup, 1561 .irq_nmi_teardown = gic_irq_nmi_teardown, 1562 .ipi_send_mask = gic_ipi_send_mask, 1563 .flags = IRQCHIP_SET_TYPE_MASKED | 1564 IRQCHIP_SKIP_SET_WAKE | 1565 IRQCHIP_MASK_ON_SUSPEND, 1566 }; 1567 1568 static struct irq_chip gic_eoimode1_chip = { 1569 .name = "GICv3", 1570 .irq_mask = gic_eoimode1_mask_irq, 1571 .irq_unmask = gic_unmask_irq, 1572 .irq_eoi = gic_eoimode1_eoi_irq, 1573 .irq_set_type = gic_set_type, 1574 .irq_set_affinity = gic_set_affinity, 1575 .irq_retrigger = gic_retrigger, 1576 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 1577 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 1578 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 1579 .irq_nmi_setup = gic_irq_nmi_setup, 1580 .irq_nmi_teardown = gic_irq_nmi_teardown, 1581 .ipi_send_mask = gic_ipi_send_mask, 1582 .flags = IRQCHIP_SET_TYPE_MASKED | 1583 IRQCHIP_SKIP_SET_WAKE | 1584 IRQCHIP_MASK_ON_SUSPEND, 1585 }; 1586 1587 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 1588 irq_hw_number_t hw) 1589 { 1590 struct irq_chip *chip = &gic_chip; 1591 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq)); 1592 1593 if (static_branch_likely(&supports_deactivate_key)) 1594 chip = &gic_eoimode1_chip; 1595 1596 switch (__get_intid_range(hw)) { 1597 case SGI_RANGE: 1598 case PPI_RANGE: 1599 case EPPI_RANGE: 1600 irq_set_percpu_devid(irq); 1601 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1602 handle_percpu_devid_irq, NULL, NULL); 1603 break; 1604 1605 case SPI_RANGE: 1606 case ESPI_RANGE: 1607 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1608 handle_fasteoi_irq, NULL, NULL); 1609 irq_set_probe(irq); 1610 irqd_set_single_target(irqd); 1611 break; 1612 1613 case LPI_RANGE: 1614 if (!gic_dist_supports_lpis()) 1615 return -EPERM; 1616 irq_domain_set_info(d, irq, hw, chip, d->host_data, 1617 handle_fasteoi_irq, NULL, NULL); 1618 break; 1619 1620 default: 1621 return -EPERM; 1622 } 1623 1624 /* Prevents SW retriggers which mess up the ACK/EOI ordering */ 1625 irqd_set_handle_enforce_irqctx(irqd); 1626 return 0; 1627 } 1628 1629 static int gic_irq_domain_translate(struct irq_domain *d, 1630 struct irq_fwspec *fwspec, 1631 unsigned long *hwirq, 1632 unsigned int *type) 1633 { 1634 if (fwspec->param_count == 1 && fwspec->param[0] < 16) { 1635 *hwirq = fwspec->param[0]; 1636 *type = IRQ_TYPE_EDGE_RISING; 1637 return 0; 1638 } 1639 1640 if (is_of_node(fwspec->fwnode)) { 1641 if (fwspec->param_count < 3) 1642 return -EINVAL; 1643 1644 switch (fwspec->param[0]) { 1645 case 0: /* SPI */ 1646 *hwirq = fwspec->param[1] + 32; 1647 break; 1648 case 1: /* PPI */ 1649 *hwirq = fwspec->param[1] + 16; 1650 break; 1651 case 2: /* ESPI */ 1652 *hwirq = fwspec->param[1] + ESPI_BASE_INTID; 1653 break; 1654 case 3: /* EPPI */ 1655 *hwirq = fwspec->param[1] + EPPI_BASE_INTID; 1656 break; 1657 case GIC_IRQ_TYPE_LPI: /* LPI */ 1658 *hwirq = fwspec->param[1]; 1659 break; 1660 case GIC_IRQ_TYPE_PARTITION: 1661 *hwirq = fwspec->param[1]; 1662 if (fwspec->param[1] >= 16) 1663 *hwirq += EPPI_BASE_INTID - 16; 1664 else 1665 *hwirq += 16; 1666 break; 1667 default: 1668 return -EINVAL; 1669 } 1670 1671 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1672 1673 /* 1674 * Make it clear that broken DTs are... broken. 1675 * Partitioned PPIs are an unfortunate exception. 1676 */ 1677 WARN_ON(*type == IRQ_TYPE_NONE && 1678 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); 1679 return 0; 1680 } 1681 1682 if (is_fwnode_irqchip(fwspec->fwnode)) { 1683 if(fwspec->param_count != 2) 1684 return -EINVAL; 1685 1686 if (fwspec->param[0] < 16) { 1687 pr_err(FW_BUG "Illegal GSI%d translation request\n", 1688 fwspec->param[0]); 1689 return -EINVAL; 1690 } 1691 1692 *hwirq = fwspec->param[0]; 1693 *type = fwspec->param[1]; 1694 1695 WARN_ON(*type == IRQ_TYPE_NONE); 1696 return 0; 1697 } 1698 1699 return -EINVAL; 1700 } 1701 1702 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1703 unsigned int nr_irqs, void *arg) 1704 { 1705 int i, ret; 1706 irq_hw_number_t hwirq; 1707 unsigned int type = IRQ_TYPE_NONE; 1708 struct irq_fwspec *fwspec = arg; 1709 1710 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 1711 if (ret) 1712 return ret; 1713 1714 for (i = 0; i < nr_irqs; i++) { 1715 ret = gic_irq_domain_map(domain, virq + i, hwirq + i); 1716 if (ret) 1717 return ret; 1718 } 1719 1720 return 0; 1721 } 1722 1723 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, 1724 unsigned int nr_irqs) 1725 { 1726 int i; 1727 1728 for (i = 0; i < nr_irqs; i++) { 1729 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); 1730 irq_set_handler(virq + i, NULL); 1731 irq_domain_reset_irq_data(d); 1732 } 1733 } 1734 1735 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, 1736 irq_hw_number_t hwirq) 1737 { 1738 enum gic_intid_range range; 1739 1740 if (!gic_data.ppi_descs) 1741 return false; 1742 1743 if (!is_of_node(fwspec->fwnode)) 1744 return false; 1745 1746 if (fwspec->param_count < 4 || !fwspec->param[3]) 1747 return false; 1748 1749 range = __get_intid_range(hwirq); 1750 if (range != PPI_RANGE && range != EPPI_RANGE) 1751 return false; 1752 1753 return true; 1754 } 1755 1756 static int gic_irq_domain_select(struct irq_domain *d, 1757 struct irq_fwspec *fwspec, 1758 enum irq_domain_bus_token bus_token) 1759 { 1760 unsigned int type, ret, ppi_idx; 1761 irq_hw_number_t hwirq; 1762 1763 /* Not for us */ 1764 if (fwspec->fwnode != d->fwnode) 1765 return 0; 1766 1767 /* Handle pure domain searches */ 1768 if (!fwspec->param_count) 1769 return d->bus_token == bus_token; 1770 1771 /* If this is not DT, then we have a single domain */ 1772 if (!is_of_node(fwspec->fwnode)) 1773 return 1; 1774 1775 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type); 1776 if (WARN_ON_ONCE(ret)) 1777 return 0; 1778 1779 if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) 1780 return d == gic_data.domain; 1781 1782 /* 1783 * If this is a PPI and we have a 4th (non-null) parameter, 1784 * then we need to match the partition domain. 1785 */ 1786 ppi_idx = __gic_get_ppi_index(hwirq); 1787 return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]); 1788 } 1789 1790 static const struct irq_domain_ops gic_irq_domain_ops = { 1791 .translate = gic_irq_domain_translate, 1792 .alloc = gic_irq_domain_alloc, 1793 .free = gic_irq_domain_free, 1794 .select = gic_irq_domain_select, 1795 }; 1796 1797 static int partition_domain_translate(struct irq_domain *d, 1798 struct irq_fwspec *fwspec, 1799 unsigned long *hwirq, 1800 unsigned int *type) 1801 { 1802 unsigned long ppi_intid; 1803 struct device_node *np; 1804 unsigned int ppi_idx; 1805 int ret; 1806 1807 if (!gic_data.ppi_descs) 1808 return -ENOMEM; 1809 1810 np = of_find_node_by_phandle(fwspec->param[3]); 1811 if (WARN_ON(!np)) 1812 return -EINVAL; 1813 1814 ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type); 1815 if (WARN_ON_ONCE(ret)) 1816 return 0; 1817 1818 ppi_idx = __gic_get_ppi_index(ppi_intid); 1819 ret = partition_translate_id(gic_data.ppi_descs[ppi_idx], 1820 of_node_to_fwnode(np)); 1821 if (ret < 0) 1822 return ret; 1823 1824 *hwirq = ret; 1825 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 1826 1827 return 0; 1828 } 1829 1830 static const struct irq_domain_ops partition_domain_ops = { 1831 .translate = partition_domain_translate, 1832 .select = gic_irq_domain_select, 1833 }; 1834 1835 static bool gic_enable_quirk_msm8996(void *data) 1836 { 1837 struct gic_chip_data *d = data; 1838 1839 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; 1840 1841 return true; 1842 } 1843 1844 static bool gic_enable_quirk_cavium_38539(void *data) 1845 { 1846 struct gic_chip_data *d = data; 1847 1848 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; 1849 1850 return true; 1851 } 1852 1853 static bool gic_enable_quirk_hip06_07(void *data) 1854 { 1855 struct gic_chip_data *d = data; 1856 1857 /* 1858 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite 1859 * not being an actual ARM implementation). The saving grace is 1860 * that GIC-600 doesn't have ESPI, so nothing to do in that case. 1861 * HIP07 doesn't even have a proper IIDR, and still pretends to 1862 * have ESPI. In both cases, put them right. 1863 */ 1864 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { 1865 /* Zero both ESPI and the RES0 field next to it... */ 1866 d->rdists.gicd_typer &= ~GENMASK(9, 8); 1867 return true; 1868 } 1869 1870 return false; 1871 } 1872 1873 #define T241_CHIPN_MASK GENMASK_ULL(45, 44) 1874 #define T241_CHIP_GICDA_OFFSET 0x1580000 1875 #define SMCCC_SOC_ID_T241 0x036b0241 1876 1877 static bool gic_enable_quirk_nvidia_t241(void *data) 1878 { 1879 s32 soc_id = arm_smccc_get_soc_id_version(); 1880 unsigned long chip_bmask = 0; 1881 phys_addr_t phys; 1882 u32 i; 1883 1884 /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ 1885 if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) 1886 return false; 1887 1888 /* Find the chips based on GICR regions PHYS addr */ 1889 for (i = 0; i < gic_data.nr_redist_regions; i++) { 1890 chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK, 1891 (u64)gic_data.redist_regions[i].phys_base)); 1892 } 1893 1894 if (hweight32(chip_bmask) < 3) 1895 return false; 1896 1897 /* Setup GICD alias regions */ 1898 for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { 1899 if (chip_bmask & BIT(i)) { 1900 phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET; 1901 phys |= FIELD_PREP(T241_CHIPN_MASK, i); 1902 t241_dist_base_alias[i] = ioremap(phys, SZ_64K); 1903 WARN_ON_ONCE(!t241_dist_base_alias[i]); 1904 } 1905 } 1906 static_branch_enable(&gic_nvidia_t241_erratum); 1907 return true; 1908 } 1909 1910 static bool gic_enable_quirk_asr8601(void *data) 1911 { 1912 struct gic_chip_data *d = data; 1913 1914 d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; 1915 1916 return true; 1917 } 1918 1919 static bool gic_enable_quirk_arm64_2941627(void *data) 1920 { 1921 static_branch_enable(&gic_arm64_2941627_erratum); 1922 return true; 1923 } 1924 1925 static bool rd_set_non_coherent(void *data) 1926 { 1927 struct gic_chip_data *d = data; 1928 1929 d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; 1930 return true; 1931 } 1932 1933 static const struct gic_quirk gic_quirks[] = { 1934 { 1935 .desc = "GICv3: Qualcomm MSM8996 broken firmware", 1936 .compatible = "qcom,msm8996-gic-v3", 1937 .init = gic_enable_quirk_msm8996, 1938 }, 1939 { 1940 .desc = "GICv3: ASR erratum 8601001", 1941 .compatible = "asr,asr8601-gic-v3", 1942 .init = gic_enable_quirk_asr8601, 1943 }, 1944 { 1945 .desc = "GICv3: HIP06 erratum 161010803", 1946 .iidr = 0x0204043b, 1947 .mask = 0xffffffff, 1948 .init = gic_enable_quirk_hip06_07, 1949 }, 1950 { 1951 .desc = "GICv3: HIP07 erratum 161010803", 1952 .iidr = 0x00000000, 1953 .mask = 0xffffffff, 1954 .init = gic_enable_quirk_hip06_07, 1955 }, 1956 { 1957 /* 1958 * Reserved register accesses generate a Synchronous 1959 * External Abort. This erratum applies to: 1960 * - ThunderX: CN88xx 1961 * - OCTEON TX: CN83xx, CN81xx 1962 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* 1963 */ 1964 .desc = "GICv3: Cavium erratum 38539", 1965 .iidr = 0xa000034c, 1966 .mask = 0xe8f00fff, 1967 .init = gic_enable_quirk_cavium_38539, 1968 }, 1969 { 1970 .desc = "GICv3: NVIDIA erratum T241-FABRIC-4", 1971 .iidr = 0x0402043b, 1972 .mask = 0xffffffff, 1973 .init = gic_enable_quirk_nvidia_t241, 1974 }, 1975 { 1976 /* 1977 * GIC-700: 2941627 workaround - IP variant [0,1] 1978 * 1979 */ 1980 .desc = "GICv3: ARM64 erratum 2941627", 1981 .iidr = 0x0400043b, 1982 .mask = 0xff0e0fff, 1983 .init = gic_enable_quirk_arm64_2941627, 1984 }, 1985 { 1986 /* 1987 * GIC-700: 2941627 workaround - IP variant [2] 1988 */ 1989 .desc = "GICv3: ARM64 erratum 2941627", 1990 .iidr = 0x0402043b, 1991 .mask = 0xff0f0fff, 1992 .init = gic_enable_quirk_arm64_2941627, 1993 }, 1994 { 1995 .desc = "GICv3: non-coherent attribute", 1996 .property = "dma-noncoherent", 1997 .init = rd_set_non_coherent, 1998 }, 1999 { 2000 } 2001 }; 2002 2003 static void gic_enable_nmi_support(void) 2004 { 2005 int i; 2006 2007 if (!gic_prio_masking_enabled()) 2008 return; 2009 2010 rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR, 2011 sizeof(*rdist_nmi_refs), GFP_KERNEL); 2012 if (!rdist_nmi_refs) 2013 return; 2014 2015 for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++) 2016 refcount_set(&rdist_nmi_refs[i], 0); 2017 2018 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", 2019 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced"); 2020 2021 static_branch_enable(&supports_pseudo_nmis); 2022 2023 if (static_branch_likely(&supports_deactivate_key)) 2024 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; 2025 else 2026 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; 2027 } 2028 2029 static int __init gic_init_bases(phys_addr_t dist_phys_base, 2030 void __iomem *dist_base, 2031 struct redist_region *rdist_regs, 2032 u32 nr_redist_regions, 2033 u64 redist_stride, 2034 struct fwnode_handle *handle) 2035 { 2036 u32 typer; 2037 int err; 2038 2039 if (!is_hyp_mode_available()) 2040 static_branch_disable(&supports_deactivate_key); 2041 2042 if (static_branch_likely(&supports_deactivate_key)) 2043 pr_info("GIC: Using split EOI/Deactivate mode\n"); 2044 2045 gic_data.fwnode = handle; 2046 gic_data.dist_phys_base = dist_phys_base; 2047 gic_data.dist_base = dist_base; 2048 gic_data.redist_regions = rdist_regs; 2049 gic_data.nr_redist_regions = nr_redist_regions; 2050 gic_data.redist_stride = redist_stride; 2051 2052 /* 2053 * Find out how many interrupts are supported. 2054 */ 2055 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); 2056 gic_data.rdists.gicd_typer = typer; 2057 2058 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), 2059 gic_quirks, &gic_data); 2060 2061 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); 2062 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); 2063 2064 /* 2065 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the 2066 * architecture spec (which says that reserved registers are RES0). 2067 */ 2068 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) 2069 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); 2070 2071 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops, 2072 &gic_data); 2073 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); 2074 if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { 2075 /* Disable GICv4.x features for the erratum T241-FABRIC-4 */ 2076 gic_data.rdists.has_rvpeid = true; 2077 gic_data.rdists.has_vlpis = true; 2078 gic_data.rdists.has_direct_lpi = true; 2079 gic_data.rdists.has_vpend_valid_dirty = true; 2080 } 2081 2082 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { 2083 err = -ENOMEM; 2084 goto out_free; 2085 } 2086 2087 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED); 2088 2089 gic_data.has_rss = !!(typer & GICD_TYPER_RSS); 2090 2091 if (typer & GICD_TYPER_MBIS) { 2092 err = mbi_init(handle, gic_data.domain); 2093 if (err) 2094 pr_err("Failed to initialize MBIs\n"); 2095 } 2096 2097 set_handle_irq(gic_handle_irq); 2098 2099 gic_update_rdist_properties(); 2100 2101 gic_cpu_sys_reg_enable(); 2102 gic_prio_init(); 2103 gic_dist_init(); 2104 gic_cpu_init(); 2105 gic_enable_nmi_support(); 2106 gic_smp_init(); 2107 gic_cpu_pm_init(); 2108 2109 if (gic_dist_supports_lpis()) { 2110 its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq); 2111 its_cpu_init(); 2112 its_lpi_memreserve_init(); 2113 } else { 2114 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 2115 gicv2m_init(handle, gic_data.domain); 2116 } 2117 2118 return 0; 2119 2120 out_free: 2121 if (gic_data.domain) 2122 irq_domain_remove(gic_data.domain); 2123 free_percpu(gic_data.rdists.rdist); 2124 return err; 2125 } 2126 2127 static int __init gic_validate_dist_version(void __iomem *dist_base) 2128 { 2129 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 2130 2131 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) 2132 return -ENODEV; 2133 2134 return 0; 2135 } 2136 2137 /* Create all possible partitions at boot time */ 2138 static void __init gic_populate_ppi_partitions(struct device_node *gic_node) 2139 { 2140 struct device_node *parts_node, *child_part; 2141 int part_idx = 0, i; 2142 int nr_parts; 2143 struct partition_affinity *parts; 2144 2145 parts_node = of_get_child_by_name(gic_node, "ppi-partitions"); 2146 if (!parts_node) 2147 return; 2148 2149 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); 2150 if (!gic_data.ppi_descs) 2151 goto out_put_node; 2152 2153 nr_parts = of_get_child_count(parts_node); 2154 2155 if (!nr_parts) 2156 goto out_put_node; 2157 2158 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); 2159 if (WARN_ON(!parts)) 2160 goto out_put_node; 2161 2162 for_each_child_of_node(parts_node, child_part) { 2163 struct partition_affinity *part; 2164 int n; 2165 2166 part = &parts[part_idx]; 2167 2168 part->partition_id = of_node_to_fwnode(child_part); 2169 2170 pr_info("GIC: PPI partition %pOFn[%d] { ", 2171 child_part, part_idx); 2172 2173 n = of_property_count_elems_of_size(child_part, "affinity", 2174 sizeof(u32)); 2175 WARN_ON(n <= 0); 2176 2177 for (i = 0; i < n; i++) { 2178 int err, cpu; 2179 u32 cpu_phandle; 2180 struct device_node *cpu_node; 2181 2182 err = of_property_read_u32_index(child_part, "affinity", 2183 i, &cpu_phandle); 2184 if (WARN_ON(err)) 2185 continue; 2186 2187 cpu_node = of_find_node_by_phandle(cpu_phandle); 2188 if (WARN_ON(!cpu_node)) 2189 continue; 2190 2191 cpu = of_cpu_node_to_id(cpu_node); 2192 if (WARN_ON(cpu < 0)) { 2193 of_node_put(cpu_node); 2194 continue; 2195 } 2196 2197 pr_cont("%pOF[%d] ", cpu_node, cpu); 2198 2199 cpumask_set_cpu(cpu, &part->mask); 2200 of_node_put(cpu_node); 2201 } 2202 2203 pr_cont("}\n"); 2204 part_idx++; 2205 } 2206 2207 for (i = 0; i < gic_data.ppi_nr; i++) { 2208 unsigned int irq; 2209 struct partition_desc *desc; 2210 struct irq_fwspec ppi_fwspec = { 2211 .fwnode = gic_data.fwnode, 2212 .param_count = 3, 2213 .param = { 2214 [0] = GIC_IRQ_TYPE_PARTITION, 2215 [1] = i, 2216 [2] = IRQ_TYPE_NONE, 2217 }, 2218 }; 2219 2220 irq = irq_create_fwspec_mapping(&ppi_fwspec); 2221 if (WARN_ON(!irq)) 2222 continue; 2223 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts, 2224 irq, &partition_domain_ops); 2225 if (WARN_ON(!desc)) 2226 continue; 2227 2228 gic_data.ppi_descs[i] = desc; 2229 } 2230 2231 out_put_node: 2232 of_node_put(parts_node); 2233 } 2234 2235 static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions) 2236 { 2237 int ret; 2238 struct resource r; 2239 2240 gic_v3_kvm_info.type = GIC_V3; 2241 2242 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0); 2243 if (!gic_v3_kvm_info.maint_irq) 2244 return; 2245 2246 /* Also skip GICD, GICC, GICH */ 2247 ret = of_address_to_resource(node, nr_redist_regions + 3, &r); 2248 if (!ret) 2249 gic_v3_kvm_info.vcpu = r; 2250 2251 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 2252 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; 2253 vgic_set_kvm_info(&gic_v3_kvm_info); 2254 } 2255 2256 static void gic_request_region(resource_size_t base, resource_size_t size, 2257 const char *name) 2258 { 2259 if (!request_mem_region(base, size, name)) 2260 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", 2261 name, &base); 2262 } 2263 2264 static void __iomem *gic_of_iomap(struct device_node *node, int idx, 2265 const char *name, struct resource *res) 2266 { 2267 void __iomem *base; 2268 int ret; 2269 2270 ret = of_address_to_resource(node, idx, res); 2271 if (ret) 2272 return IOMEM_ERR_PTR(ret); 2273 2274 gic_request_region(res->start, resource_size(res), name); 2275 base = of_iomap(node, idx); 2276 2277 return base ?: IOMEM_ERR_PTR(-ENOMEM); 2278 } 2279 2280 static int __init gic_of_init(struct device_node *node, struct device_node *parent) 2281 { 2282 phys_addr_t dist_phys_base; 2283 void __iomem *dist_base; 2284 struct redist_region *rdist_regs; 2285 struct resource res; 2286 u64 redist_stride; 2287 u32 nr_redist_regions; 2288 int err, i; 2289 2290 dist_base = gic_of_iomap(node, 0, "GICD", &res); 2291 if (IS_ERR(dist_base)) { 2292 pr_err("%pOF: unable to map gic dist registers\n", node); 2293 return PTR_ERR(dist_base); 2294 } 2295 2296 dist_phys_base = res.start; 2297 2298 err = gic_validate_dist_version(dist_base); 2299 if (err) { 2300 pr_err("%pOF: no distributor detected, giving up\n", node); 2301 goto out_unmap_dist; 2302 } 2303 2304 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) 2305 nr_redist_regions = 1; 2306 2307 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), 2308 GFP_KERNEL); 2309 if (!rdist_regs) { 2310 err = -ENOMEM; 2311 goto out_unmap_dist; 2312 } 2313 2314 for (i = 0; i < nr_redist_regions; i++) { 2315 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res); 2316 if (IS_ERR(rdist_regs[i].redist_base)) { 2317 pr_err("%pOF: couldn't map region %d\n", node, i); 2318 err = -ENODEV; 2319 goto out_unmap_rdist; 2320 } 2321 rdist_regs[i].phys_base = res.start; 2322 } 2323 2324 if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) 2325 redist_stride = 0; 2326 2327 gic_enable_of_quirks(node, gic_quirks, &gic_data); 2328 2329 err = gic_init_bases(dist_phys_base, dist_base, rdist_regs, 2330 nr_redist_regions, redist_stride, &node->fwnode); 2331 if (err) 2332 goto out_unmap_rdist; 2333 2334 gic_populate_ppi_partitions(node); 2335 2336 if (static_branch_likely(&supports_deactivate_key)) 2337 gic_of_setup_kvm_info(node, nr_redist_regions); 2338 return 0; 2339 2340 out_unmap_rdist: 2341 for (i = 0; i < nr_redist_regions; i++) 2342 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base)) 2343 iounmap(rdist_regs[i].redist_base); 2344 kfree(rdist_regs); 2345 out_unmap_dist: 2346 iounmap(dist_base); 2347 return err; 2348 } 2349 2350 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); 2351 2352 #ifdef CONFIG_ACPI 2353 static struct 2354 { 2355 void __iomem *dist_base; 2356 struct redist_region *redist_regs; 2357 u32 nr_redist_regions; 2358 bool single_redist; 2359 int enabled_rdists; 2360 u32 maint_irq; 2361 int maint_irq_mode; 2362 phys_addr_t vcpu_base; 2363 } acpi_data __initdata; 2364 2365 static void __init 2366 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) 2367 { 2368 static int count = 0; 2369 2370 acpi_data.redist_regs[count].phys_base = phys_base; 2371 acpi_data.redist_regs[count].redist_base = redist_base; 2372 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; 2373 count++; 2374 } 2375 2376 static int __init 2377 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, 2378 const unsigned long end) 2379 { 2380 struct acpi_madt_generic_redistributor *redist = 2381 (struct acpi_madt_generic_redistributor *)header; 2382 void __iomem *redist_base; 2383 2384 redist_base = ioremap(redist->base_address, redist->length); 2385 if (!redist_base) { 2386 pr_err("Couldn't map GICR region @%llx\n", redist->base_address); 2387 return -ENOMEM; 2388 } 2389 2390 if (acpi_get_madt_revision() >= 7 && 2391 (redist->flags & ACPI_MADT_GICR_NON_COHERENT)) 2392 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; 2393 2394 gic_request_region(redist->base_address, redist->length, "GICR"); 2395 2396 gic_acpi_register_redist(redist->base_address, redist_base); 2397 return 0; 2398 } 2399 2400 static int __init 2401 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, 2402 const unsigned long end) 2403 { 2404 struct acpi_madt_generic_interrupt *gicc = 2405 (struct acpi_madt_generic_interrupt *)header; 2406 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; 2407 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; 2408 void __iomem *redist_base; 2409 2410 /* Neither enabled or online capable means it doesn't exist, skip it */ 2411 if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) 2412 return 0; 2413 2414 /* 2415 * Capable but disabled CPUs can be brought online later. What about 2416 * the redistributor? ACPI doesn't want to say! 2417 * Virtual hotplug systems can use the MADT's "always-on" GICR entries. 2418 * Otherwise, prevent such CPUs from being brought online. 2419 */ 2420 if (!(gicc->flags & ACPI_MADT_ENABLED)) { 2421 int cpu = get_cpu_for_acpi_id(gicc->uid); 2422 2423 pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu); 2424 if (cpu >= 0) 2425 cpumask_set_cpu(cpu, &broken_rdists); 2426 return 0; 2427 } 2428 2429 redist_base = ioremap(gicc->gicr_base_address, size); 2430 if (!redist_base) 2431 return -ENOMEM; 2432 gic_request_region(gicc->gicr_base_address, size, "GICR"); 2433 2434 if (acpi_get_madt_revision() >= 7 && 2435 (gicc->flags & ACPI_MADT_GICC_NON_COHERENT)) 2436 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; 2437 2438 gic_acpi_register_redist(gicc->gicr_base_address, redist_base); 2439 return 0; 2440 } 2441 2442 static int __init gic_acpi_collect_gicr_base(void) 2443 { 2444 acpi_tbl_entry_handler redist_parser; 2445 enum acpi_madt_type type; 2446 2447 if (acpi_data.single_redist) { 2448 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; 2449 redist_parser = gic_acpi_parse_madt_gicc; 2450 } else { 2451 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; 2452 redist_parser = gic_acpi_parse_madt_redist; 2453 } 2454 2455 /* Collect redistributor base addresses in GICR entries */ 2456 if (acpi_table_parse_madt(type, redist_parser, 0) > 0) 2457 return 0; 2458 2459 pr_info("No valid GICR entries exist\n"); 2460 return -ENODEV; 2461 } 2462 2463 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, 2464 const unsigned long end) 2465 { 2466 /* Subtable presence means that redist exists, that's it */ 2467 return 0; 2468 } 2469 2470 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, 2471 const unsigned long end) 2472 { 2473 struct acpi_madt_generic_interrupt *gicc = 2474 (struct acpi_madt_generic_interrupt *)header; 2475 2476 /* 2477 * If GICC is enabled and has valid gicr base address, then it means 2478 * GICR base is presented via GICC. The redistributor is only known to 2479 * be accessible if the GICC is marked as enabled. If this bit is not 2480 * set, we'd need to add the redistributor at runtime, which isn't 2481 * supported. 2482 */ 2483 if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address) 2484 acpi_data.enabled_rdists++; 2485 2486 return 0; 2487 } 2488 2489 static int __init gic_acpi_count_gicr_regions(void) 2490 { 2491 int count; 2492 2493 /* 2494 * Count how many redistributor regions we have. It is not allowed 2495 * to mix redistributor description, GICR and GICC subtables have to be 2496 * mutually exclusive. 2497 */ 2498 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 2499 gic_acpi_match_gicr, 0); 2500 if (count > 0) { 2501 acpi_data.single_redist = false; 2502 return count; 2503 } 2504 2505 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 2506 gic_acpi_match_gicc, 0); 2507 if (count > 0) { 2508 acpi_data.single_redist = true; 2509 count = acpi_data.enabled_rdists; 2510 } 2511 2512 return count; 2513 } 2514 2515 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, 2516 struct acpi_probe_entry *ape) 2517 { 2518 struct acpi_madt_generic_distributor *dist; 2519 int count; 2520 2521 dist = (struct acpi_madt_generic_distributor *)header; 2522 if (dist->version != ape->driver_data) 2523 return false; 2524 2525 /* We need to do that exercise anyway, the sooner the better */ 2526 count = gic_acpi_count_gicr_regions(); 2527 if (count <= 0) 2528 return false; 2529 2530 acpi_data.nr_redist_regions = count; 2531 return true; 2532 } 2533 2534 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, 2535 const unsigned long end) 2536 { 2537 struct acpi_madt_generic_interrupt *gicc = 2538 (struct acpi_madt_generic_interrupt *)header; 2539 int maint_irq_mode; 2540 static int first_madt = true; 2541 2542 if (!(gicc->flags & 2543 (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) 2544 return 0; 2545 2546 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? 2547 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; 2548 2549 if (first_madt) { 2550 first_madt = false; 2551 2552 acpi_data.maint_irq = gicc->vgic_interrupt; 2553 acpi_data.maint_irq_mode = maint_irq_mode; 2554 acpi_data.vcpu_base = gicc->gicv_base_address; 2555 2556 return 0; 2557 } 2558 2559 /* 2560 * The maintenance interrupt and GICV should be the same for every CPU 2561 */ 2562 if ((acpi_data.maint_irq != gicc->vgic_interrupt) || 2563 (acpi_data.maint_irq_mode != maint_irq_mode) || 2564 (acpi_data.vcpu_base != gicc->gicv_base_address)) 2565 return -EINVAL; 2566 2567 return 0; 2568 } 2569 2570 static bool __init gic_acpi_collect_virt_info(void) 2571 { 2572 int count; 2573 2574 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 2575 gic_acpi_parse_virt_madt_gicc, 0); 2576 2577 return (count > 0); 2578 } 2579 2580 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) 2581 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) 2582 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) 2583 2584 static void __init gic_acpi_setup_kvm_info(void) 2585 { 2586 int irq; 2587 2588 if (!gic_acpi_collect_virt_info()) { 2589 pr_warn("Unable to get hardware information used for virtualization\n"); 2590 return; 2591 } 2592 2593 gic_v3_kvm_info.type = GIC_V3; 2594 2595 irq = acpi_register_gsi(NULL, acpi_data.maint_irq, 2596 acpi_data.maint_irq_mode, 2597 ACPI_ACTIVE_HIGH); 2598 if (irq <= 0) 2599 return; 2600 2601 gic_v3_kvm_info.maint_irq = irq; 2602 2603 if (acpi_data.vcpu_base) { 2604 struct resource *vcpu = &gic_v3_kvm_info.vcpu; 2605 2606 vcpu->flags = IORESOURCE_MEM; 2607 vcpu->start = acpi_data.vcpu_base; 2608 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; 2609 } 2610 2611 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; 2612 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; 2613 vgic_set_kvm_info(&gic_v3_kvm_info); 2614 } 2615 2616 static struct fwnode_handle *gsi_domain_handle; 2617 2618 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) 2619 { 2620 return gsi_domain_handle; 2621 } 2622 2623 static int __init 2624 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) 2625 { 2626 struct acpi_madt_generic_distributor *dist; 2627 size_t size; 2628 int i, err; 2629 2630 /* Get distributor base address */ 2631 dist = (struct acpi_madt_generic_distributor *)header; 2632 acpi_data.dist_base = ioremap(dist->base_address, 2633 ACPI_GICV3_DIST_MEM_SIZE); 2634 if (!acpi_data.dist_base) { 2635 pr_err("Unable to map GICD registers\n"); 2636 return -ENOMEM; 2637 } 2638 gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD"); 2639 2640 err = gic_validate_dist_version(acpi_data.dist_base); 2641 if (err) { 2642 pr_err("No distributor detected at @%p, giving up\n", 2643 acpi_data.dist_base); 2644 goto out_dist_unmap; 2645 } 2646 2647 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; 2648 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); 2649 if (!acpi_data.redist_regs) { 2650 err = -ENOMEM; 2651 goto out_dist_unmap; 2652 } 2653 2654 err = gic_acpi_collect_gicr_base(); 2655 if (err) 2656 goto out_redist_unmap; 2657 2658 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address); 2659 if (!gsi_domain_handle) { 2660 err = -ENOMEM; 2661 goto out_redist_unmap; 2662 } 2663 2664 err = gic_init_bases(dist->base_address, acpi_data.dist_base, 2665 acpi_data.redist_regs, acpi_data.nr_redist_regions, 2666 0, gsi_domain_handle); 2667 if (err) 2668 goto out_fwhandle_free; 2669 2670 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id); 2671 2672 if (static_branch_likely(&supports_deactivate_key)) 2673 gic_acpi_setup_kvm_info(); 2674 2675 return 0; 2676 2677 out_fwhandle_free: 2678 irq_domain_free_fwnode(gsi_domain_handle); 2679 out_redist_unmap: 2680 for (i = 0; i < acpi_data.nr_redist_regions; i++) 2681 if (acpi_data.redist_regs[i].redist_base) 2682 iounmap(acpi_data.redist_regs[i].redist_base); 2683 kfree(acpi_data.redist_regs); 2684 out_dist_unmap: 2685 iounmap(acpi_data.dist_base); 2686 return err; 2687 } 2688 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2689 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, 2690 gic_acpi_init); 2691 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2692 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4, 2693 gic_acpi_init); 2694 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 2695 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE, 2696 gic_acpi_init); 2697 #endif 2698