1 /* 2 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Interrupt architecture for the GIC: 9 * 10 * o There is one Interrupt Distributor, which receives interrupts 11 * from system devices and sends them to the Interrupt Controllers. 12 * 13 * o There is one CPU Interface per CPU, which sends interrupts sent 14 * by the Distributor, and interrupts generated locally, to the 15 * associated CPU. The base address of the CPU interface is usually 16 * aliased so that the same address points to different chips depending 17 * on the CPU it is accessed from. 18 * 19 * Note that IRQs 0-31 are special - they are local to each CPU. 20 * As such, the enable set/clear, pending set/clear and active bit 21 * registers are banked per-cpu for these sources. 22 */ 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/err.h> 26 #include <linux/module.h> 27 #include <linux/list.h> 28 #include <linux/smp.h> 29 #include <linux/cpu.h> 30 #include <linux/cpu_pm.h> 31 #include <linux/cpumask.h> 32 #include <linux/io.h> 33 #include <linux/of.h> 34 #include <linux/of_address.h> 35 #include <linux/of_irq.h> 36 #include <linux/acpi.h> 37 #include <linux/irqdomain.h> 38 #include <linux/interrupt.h> 39 #include <linux/percpu.h> 40 #include <linux/slab.h> 41 #include <linux/irqchip.h> 42 #include <linux/irqchip/chained_irq.h> 43 #include <linux/irqchip/arm-gic.h> 44 45 #include <asm/cputype.h> 46 #include <asm/irq.h> 47 #include <asm/exception.h> 48 #include <asm/smp_plat.h> 49 #include <asm/virt.h> 50 51 #include "irq-gic-common.h" 52 53 #ifdef CONFIG_ARM64 54 #include <asm/cpufeature.h> 55 56 static void gic_check_cpu_features(void) 57 { 58 WARN_TAINT_ONCE(cpus_have_cap(ARM64_HAS_SYSREG_GIC_CPUIF), 59 TAINT_CPU_OUT_OF_SPEC, 60 "GICv3 system registers enabled, broken firmware!\n"); 61 } 62 #else 63 #define gic_check_cpu_features() do { } while(0) 64 #endif 65 66 union gic_base { 67 void __iomem *common_base; 68 void __percpu * __iomem *percpu_base; 69 }; 70 71 struct gic_chip_data { 72 union gic_base dist_base; 73 union gic_base cpu_base; 74 #ifdef CONFIG_CPU_PM 75 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 76 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 77 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 78 u32 __percpu *saved_ppi_enable; 79 u32 __percpu *saved_ppi_conf; 80 #endif 81 struct irq_domain *domain; 82 unsigned int gic_irqs; 83 #ifdef CONFIG_GIC_NON_BANKED 84 void __iomem *(*get_base)(union gic_base *); 85 #endif 86 }; 87 88 static DEFINE_RAW_SPINLOCK(irq_controller_lock); 89 90 /* 91 * The GIC mapping of CPU interfaces does not necessarily match 92 * the logical CPU numbering. Let's use a mapping as returned 93 * by the GIC itself. 94 */ 95 #define NR_GIC_CPU_IF 8 96 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; 97 98 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; 99 100 #ifndef MAX_GIC_NR 101 #define MAX_GIC_NR 1 102 #endif 103 104 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; 105 106 #ifdef CONFIG_GIC_NON_BANKED 107 static void __iomem *gic_get_percpu_base(union gic_base *base) 108 { 109 return raw_cpu_read(*base->percpu_base); 110 } 111 112 static void __iomem *gic_get_common_base(union gic_base *base) 113 { 114 return base->common_base; 115 } 116 117 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) 118 { 119 return data->get_base(&data->dist_base); 120 } 121 122 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) 123 { 124 return data->get_base(&data->cpu_base); 125 } 126 127 static inline void gic_set_base_accessor(struct gic_chip_data *data, 128 void __iomem *(*f)(union gic_base *)) 129 { 130 data->get_base = f; 131 } 132 #else 133 #define gic_data_dist_base(d) ((d)->dist_base.common_base) 134 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) 135 #define gic_set_base_accessor(d, f) 136 #endif 137 138 static inline void __iomem *gic_dist_base(struct irq_data *d) 139 { 140 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 141 return gic_data_dist_base(gic_data); 142 } 143 144 static inline void __iomem *gic_cpu_base(struct irq_data *d) 145 { 146 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 147 return gic_data_cpu_base(gic_data); 148 } 149 150 static inline unsigned int gic_irq(struct irq_data *d) 151 { 152 return d->hwirq; 153 } 154 155 static inline bool cascading_gic_irq(struct irq_data *d) 156 { 157 void *data = irq_data_get_irq_handler_data(d); 158 159 /* 160 * If handler_data is set, this is a cascading interrupt, and 161 * it cannot possibly be forwarded. 162 */ 163 return data != NULL; 164 } 165 166 /* 167 * Routines to acknowledge, disable and enable interrupts 168 */ 169 static void gic_poke_irq(struct irq_data *d, u32 offset) 170 { 171 u32 mask = 1 << (gic_irq(d) % 32); 172 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); 173 } 174 175 static int gic_peek_irq(struct irq_data *d, u32 offset) 176 { 177 u32 mask = 1 << (gic_irq(d) % 32); 178 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); 179 } 180 181 static void gic_mask_irq(struct irq_data *d) 182 { 183 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); 184 } 185 186 static void gic_eoimode1_mask_irq(struct irq_data *d) 187 { 188 gic_mask_irq(d); 189 /* 190 * When masking a forwarded interrupt, make sure it is 191 * deactivated as well. 192 * 193 * This ensures that an interrupt that is getting 194 * disabled/masked will not get "stuck", because there is 195 * noone to deactivate it (guest is being terminated). 196 */ 197 if (irqd_is_forwarded_to_vcpu(d)) 198 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); 199 } 200 201 static void gic_unmask_irq(struct irq_data *d) 202 { 203 gic_poke_irq(d, GIC_DIST_ENABLE_SET); 204 } 205 206 static void gic_eoi_irq(struct irq_data *d) 207 { 208 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); 209 } 210 211 static void gic_eoimode1_eoi_irq(struct irq_data *d) 212 { 213 /* Do not deactivate an IRQ forwarded to a vcpu. */ 214 if (irqd_is_forwarded_to_vcpu(d)) 215 return; 216 217 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); 218 } 219 220 static int gic_irq_set_irqchip_state(struct irq_data *d, 221 enum irqchip_irq_state which, bool val) 222 { 223 u32 reg; 224 225 switch (which) { 226 case IRQCHIP_STATE_PENDING: 227 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR; 228 break; 229 230 case IRQCHIP_STATE_ACTIVE: 231 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR; 232 break; 233 234 case IRQCHIP_STATE_MASKED: 235 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET; 236 break; 237 238 default: 239 return -EINVAL; 240 } 241 242 gic_poke_irq(d, reg); 243 return 0; 244 } 245 246 static int gic_irq_get_irqchip_state(struct irq_data *d, 247 enum irqchip_irq_state which, bool *val) 248 { 249 switch (which) { 250 case IRQCHIP_STATE_PENDING: 251 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET); 252 break; 253 254 case IRQCHIP_STATE_ACTIVE: 255 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET); 256 break; 257 258 case IRQCHIP_STATE_MASKED: 259 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET); 260 break; 261 262 default: 263 return -EINVAL; 264 } 265 266 return 0; 267 } 268 269 static int gic_set_type(struct irq_data *d, unsigned int type) 270 { 271 void __iomem *base = gic_dist_base(d); 272 unsigned int gicirq = gic_irq(d); 273 274 /* Interrupt configuration for SGIs can't be changed */ 275 if (gicirq < 16) 276 return -EINVAL; 277 278 /* SPIs have restrictions on the supported types */ 279 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && 280 type != IRQ_TYPE_EDGE_RISING) 281 return -EINVAL; 282 283 return gic_configure_irq(gicirq, type, base, NULL); 284 } 285 286 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 287 { 288 /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ 289 if (cascading_gic_irq(d)) 290 return -EINVAL; 291 292 if (vcpu) 293 irqd_set_forwarded_to_vcpu(d); 294 else 295 irqd_clr_forwarded_to_vcpu(d); 296 return 0; 297 } 298 299 #ifdef CONFIG_SMP 300 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 301 bool force) 302 { 303 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 304 unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 305 u32 val, mask, bit; 306 unsigned long flags; 307 308 if (!force) 309 cpu = cpumask_any_and(mask_val, cpu_online_mask); 310 else 311 cpu = cpumask_first(mask_val); 312 313 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 314 return -EINVAL; 315 316 raw_spin_lock_irqsave(&irq_controller_lock, flags); 317 mask = 0xff << shift; 318 bit = gic_cpu_map[cpu] << shift; 319 val = readl_relaxed(reg) & ~mask; 320 writel_relaxed(val | bit, reg); 321 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 322 323 return IRQ_SET_MASK_OK; 324 } 325 #endif 326 327 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 328 { 329 u32 irqstat, irqnr; 330 struct gic_chip_data *gic = &gic_data[0]; 331 void __iomem *cpu_base = gic_data_cpu_base(gic); 332 333 do { 334 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); 335 irqnr = irqstat & GICC_IAR_INT_ID_MASK; 336 337 if (likely(irqnr > 15 && irqnr < 1021)) { 338 if (static_key_true(&supports_deactivate)) 339 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 340 handle_domain_irq(gic->domain, irqnr, regs); 341 continue; 342 } 343 if (irqnr < 16) { 344 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 345 if (static_key_true(&supports_deactivate)) 346 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); 347 #ifdef CONFIG_SMP 348 handle_IPI(irqnr, regs); 349 #endif 350 continue; 351 } 352 break; 353 } while (1); 354 } 355 356 static void gic_handle_cascade_irq(struct irq_desc *desc) 357 { 358 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); 359 struct irq_chip *chip = irq_desc_get_chip(desc); 360 unsigned int cascade_irq, gic_irq; 361 unsigned long status; 362 363 chained_irq_enter(chip, desc); 364 365 raw_spin_lock(&irq_controller_lock); 366 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); 367 raw_spin_unlock(&irq_controller_lock); 368 369 gic_irq = (status & GICC_IAR_INT_ID_MASK); 370 if (gic_irq == GICC_INT_SPURIOUS) 371 goto out; 372 373 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); 374 if (unlikely(gic_irq < 32 || gic_irq > 1020)) 375 handle_bad_irq(desc); 376 else 377 generic_handle_irq(cascade_irq); 378 379 out: 380 chained_irq_exit(chip, desc); 381 } 382 383 static struct irq_chip gic_chip = { 384 .name = "GIC", 385 .irq_mask = gic_mask_irq, 386 .irq_unmask = gic_unmask_irq, 387 .irq_eoi = gic_eoi_irq, 388 .irq_set_type = gic_set_type, 389 #ifdef CONFIG_SMP 390 .irq_set_affinity = gic_set_affinity, 391 #endif 392 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 393 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 394 .flags = IRQCHIP_SET_TYPE_MASKED | 395 IRQCHIP_SKIP_SET_WAKE | 396 IRQCHIP_MASK_ON_SUSPEND, 397 }; 398 399 static struct irq_chip gic_eoimode1_chip = { 400 .name = "GICv2", 401 .irq_mask = gic_eoimode1_mask_irq, 402 .irq_unmask = gic_unmask_irq, 403 .irq_eoi = gic_eoimode1_eoi_irq, 404 .irq_set_type = gic_set_type, 405 #ifdef CONFIG_SMP 406 .irq_set_affinity = gic_set_affinity, 407 #endif 408 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 409 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 410 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 411 .flags = IRQCHIP_SET_TYPE_MASKED | 412 IRQCHIP_SKIP_SET_WAKE | 413 IRQCHIP_MASK_ON_SUSPEND, 414 }; 415 416 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 417 { 418 if (gic_nr >= MAX_GIC_NR) 419 BUG(); 420 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, 421 &gic_data[gic_nr]); 422 } 423 424 static u8 gic_get_cpumask(struct gic_chip_data *gic) 425 { 426 void __iomem *base = gic_data_dist_base(gic); 427 u32 mask, i; 428 429 for (i = mask = 0; i < 32; i += 4) { 430 mask = readl_relaxed(base + GIC_DIST_TARGET + i); 431 mask |= mask >> 16; 432 mask |= mask >> 8; 433 if (mask) 434 break; 435 } 436 437 if (!mask && num_possible_cpus() > 1) 438 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); 439 440 return mask; 441 } 442 443 static void gic_cpu_if_up(struct gic_chip_data *gic) 444 { 445 void __iomem *cpu_base = gic_data_cpu_base(gic); 446 u32 bypass = 0; 447 u32 mode = 0; 448 449 if (static_key_true(&supports_deactivate)) 450 mode = GIC_CPU_CTRL_EOImodeNS; 451 452 /* 453 * Preserve bypass disable bits to be written back later 454 */ 455 bypass = readl(cpu_base + GIC_CPU_CTRL); 456 bypass &= GICC_DIS_BYPASS_MASK; 457 458 writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL); 459 } 460 461 462 static void __init gic_dist_init(struct gic_chip_data *gic) 463 { 464 unsigned int i; 465 u32 cpumask; 466 unsigned int gic_irqs = gic->gic_irqs; 467 void __iomem *base = gic_data_dist_base(gic); 468 469 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL); 470 471 /* 472 * Set all global interrupts to this CPU only. 473 */ 474 cpumask = gic_get_cpumask(gic); 475 cpumask |= cpumask << 8; 476 cpumask |= cpumask << 16; 477 for (i = 32; i < gic_irqs; i += 4) 478 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 479 480 gic_dist_config(base, gic_irqs, NULL); 481 482 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); 483 } 484 485 static void gic_cpu_init(struct gic_chip_data *gic) 486 { 487 void __iomem *dist_base = gic_data_dist_base(gic); 488 void __iomem *base = gic_data_cpu_base(gic); 489 unsigned int cpu_mask, cpu = smp_processor_id(); 490 int i; 491 492 /* 493 * Setting up the CPU map is only relevant for the primary GIC 494 * because any nested/secondary GICs do not directly interface 495 * with the CPU(s). 496 */ 497 if (gic == &gic_data[0]) { 498 /* 499 * Get what the GIC says our CPU mask is. 500 */ 501 BUG_ON(cpu >= NR_GIC_CPU_IF); 502 cpu_mask = gic_get_cpumask(gic); 503 gic_cpu_map[cpu] = cpu_mask; 504 505 /* 506 * Clear our mask from the other map entries in case they're 507 * still undefined. 508 */ 509 for (i = 0; i < NR_GIC_CPU_IF; i++) 510 if (i != cpu) 511 gic_cpu_map[i] &= ~cpu_mask; 512 } 513 514 gic_cpu_config(dist_base, NULL); 515 516 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 517 gic_cpu_if_up(gic); 518 } 519 520 int gic_cpu_if_down(unsigned int gic_nr) 521 { 522 void __iomem *cpu_base; 523 u32 val = 0; 524 525 if (gic_nr >= MAX_GIC_NR) 526 return -EINVAL; 527 528 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 529 val = readl(cpu_base + GIC_CPU_CTRL); 530 val &= ~GICC_ENABLE; 531 writel_relaxed(val, cpu_base + GIC_CPU_CTRL); 532 533 return 0; 534 } 535 536 #ifdef CONFIG_CPU_PM 537 /* 538 * Saves the GIC distributor registers during suspend or idle. Must be called 539 * with interrupts disabled but before powering down the GIC. After calling 540 * this function, no interrupts will be delivered by the GIC, and another 541 * platform-specific wakeup source must be enabled. 542 */ 543 static void gic_dist_save(unsigned int gic_nr) 544 { 545 unsigned int gic_irqs; 546 void __iomem *dist_base; 547 int i; 548 549 if (gic_nr >= MAX_GIC_NR) 550 BUG(); 551 552 gic_irqs = gic_data[gic_nr].gic_irqs; 553 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 554 555 if (!dist_base) 556 return; 557 558 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 559 gic_data[gic_nr].saved_spi_conf[i] = 560 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 561 562 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 563 gic_data[gic_nr].saved_spi_target[i] = 564 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 565 566 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 567 gic_data[gic_nr].saved_spi_enable[i] = 568 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 569 } 570 571 /* 572 * Restores the GIC distributor registers during resume or when coming out of 573 * idle. Must be called before enabling interrupts. If a level interrupt 574 * that occured while the GIC was suspended is still present, it will be 575 * handled normally, but any edge interrupts that occured will not be seen by 576 * the GIC and need to be handled by the platform-specific wakeup source. 577 */ 578 static void gic_dist_restore(unsigned int gic_nr) 579 { 580 unsigned int gic_irqs; 581 unsigned int i; 582 void __iomem *dist_base; 583 584 if (gic_nr >= MAX_GIC_NR) 585 BUG(); 586 587 gic_irqs = gic_data[gic_nr].gic_irqs; 588 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 589 590 if (!dist_base) 591 return; 592 593 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); 594 595 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 596 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], 597 dist_base + GIC_DIST_CONFIG + i * 4); 598 599 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 600 writel_relaxed(GICD_INT_DEF_PRI_X4, 601 dist_base + GIC_DIST_PRI + i * 4); 602 603 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 604 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 605 dist_base + GIC_DIST_TARGET + i * 4); 606 607 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 608 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 609 dist_base + GIC_DIST_ENABLE_SET + i * 4); 610 611 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 612 } 613 614 static void gic_cpu_save(unsigned int gic_nr) 615 { 616 int i; 617 u32 *ptr; 618 void __iomem *dist_base; 619 void __iomem *cpu_base; 620 621 if (gic_nr >= MAX_GIC_NR) 622 BUG(); 623 624 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 625 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 626 627 if (!dist_base || !cpu_base) 628 return; 629 630 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 631 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 632 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 633 634 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 635 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 636 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 637 638 } 639 640 static void gic_cpu_restore(unsigned int gic_nr) 641 { 642 int i; 643 u32 *ptr; 644 void __iomem *dist_base; 645 void __iomem *cpu_base; 646 647 if (gic_nr >= MAX_GIC_NR) 648 BUG(); 649 650 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 651 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 652 653 if (!dist_base || !cpu_base) 654 return; 655 656 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 657 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 658 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 659 660 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 661 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 662 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); 663 664 for (i = 0; i < DIV_ROUND_UP(32, 4); i++) 665 writel_relaxed(GICD_INT_DEF_PRI_X4, 666 dist_base + GIC_DIST_PRI + i * 4); 667 668 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); 669 gic_cpu_if_up(&gic_data[gic_nr]); 670 } 671 672 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) 673 { 674 int i; 675 676 for (i = 0; i < MAX_GIC_NR; i++) { 677 #ifdef CONFIG_GIC_NON_BANKED 678 /* Skip over unused GICs */ 679 if (!gic_data[i].get_base) 680 continue; 681 #endif 682 switch (cmd) { 683 case CPU_PM_ENTER: 684 gic_cpu_save(i); 685 break; 686 case CPU_PM_ENTER_FAILED: 687 case CPU_PM_EXIT: 688 gic_cpu_restore(i); 689 break; 690 case CPU_CLUSTER_PM_ENTER: 691 gic_dist_save(i); 692 break; 693 case CPU_CLUSTER_PM_ENTER_FAILED: 694 case CPU_CLUSTER_PM_EXIT: 695 gic_dist_restore(i); 696 break; 697 } 698 } 699 700 return NOTIFY_OK; 701 } 702 703 static struct notifier_block gic_notifier_block = { 704 .notifier_call = gic_notifier, 705 }; 706 707 static void __init gic_pm_init(struct gic_chip_data *gic) 708 { 709 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 710 sizeof(u32)); 711 BUG_ON(!gic->saved_ppi_enable); 712 713 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 714 sizeof(u32)); 715 BUG_ON(!gic->saved_ppi_conf); 716 717 if (gic == &gic_data[0]) 718 cpu_pm_register_notifier(&gic_notifier_block); 719 } 720 #else 721 static void __init gic_pm_init(struct gic_chip_data *gic) 722 { 723 } 724 #endif 725 726 #ifdef CONFIG_SMP 727 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 728 { 729 int cpu; 730 unsigned long flags, map = 0; 731 732 raw_spin_lock_irqsave(&irq_controller_lock, flags); 733 734 /* Convert our logical CPU mask into a physical one. */ 735 for_each_cpu(cpu, mask) 736 map |= gic_cpu_map[cpu]; 737 738 /* 739 * Ensure that stores to Normal memory are visible to the 740 * other CPUs before they observe us issuing the IPI. 741 */ 742 dmb(ishst); 743 744 /* this always happens on GIC0 */ 745 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 746 747 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 748 } 749 #endif 750 751 #ifdef CONFIG_BL_SWITCHER 752 /* 753 * gic_send_sgi - send a SGI directly to given CPU interface number 754 * 755 * cpu_id: the ID for the destination CPU interface 756 * irq: the IPI number to send a SGI for 757 */ 758 void gic_send_sgi(unsigned int cpu_id, unsigned int irq) 759 { 760 BUG_ON(cpu_id >= NR_GIC_CPU_IF); 761 cpu_id = 1 << cpu_id; 762 /* this always happens on GIC0 */ 763 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 764 } 765 766 /* 767 * gic_get_cpu_id - get the CPU interface ID for the specified CPU 768 * 769 * @cpu: the logical CPU number to get the GIC ID for. 770 * 771 * Return the CPU interface ID for the given logical CPU number, 772 * or -1 if the CPU number is too large or the interface ID is 773 * unknown (more than one bit set). 774 */ 775 int gic_get_cpu_id(unsigned int cpu) 776 { 777 unsigned int cpu_bit; 778 779 if (cpu >= NR_GIC_CPU_IF) 780 return -1; 781 cpu_bit = gic_cpu_map[cpu]; 782 if (cpu_bit & (cpu_bit - 1)) 783 return -1; 784 return __ffs(cpu_bit); 785 } 786 787 /* 788 * gic_migrate_target - migrate IRQs to another CPU interface 789 * 790 * @new_cpu_id: the CPU target ID to migrate IRQs to 791 * 792 * Migrate all peripheral interrupts with a target matching the current CPU 793 * to the interface corresponding to @new_cpu_id. The CPU interface mapping 794 * is also updated. Targets to other CPU interfaces are unchanged. 795 * This must be called with IRQs locally disabled. 796 */ 797 void gic_migrate_target(unsigned int new_cpu_id) 798 { 799 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; 800 void __iomem *dist_base; 801 int i, ror_val, cpu = smp_processor_id(); 802 u32 val, cur_target_mask, active_mask; 803 804 if (gic_nr >= MAX_GIC_NR) 805 BUG(); 806 807 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 808 if (!dist_base) 809 return; 810 gic_irqs = gic_data[gic_nr].gic_irqs; 811 812 cur_cpu_id = __ffs(gic_cpu_map[cpu]); 813 cur_target_mask = 0x01010101 << cur_cpu_id; 814 ror_val = (cur_cpu_id - new_cpu_id) & 31; 815 816 raw_spin_lock(&irq_controller_lock); 817 818 /* Update the target interface for this logical CPU */ 819 gic_cpu_map[cpu] = 1 << new_cpu_id; 820 821 /* 822 * Find all the peripheral interrupts targetting the current 823 * CPU interface and migrate them to the new CPU interface. 824 * We skip DIST_TARGET 0 to 7 as they are read-only. 825 */ 826 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { 827 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 828 active_mask = val & cur_target_mask; 829 if (active_mask) { 830 val &= ~active_mask; 831 val |= ror32(active_mask, ror_val); 832 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); 833 } 834 } 835 836 raw_spin_unlock(&irq_controller_lock); 837 838 /* 839 * Now let's migrate and clear any potential SGIs that might be 840 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET 841 * is a banked register, we can only forward the SGI using 842 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux 843 * doesn't use that information anyway. 844 * 845 * For the same reason we do not adjust SGI source information 846 * for previously sent SGIs by us to other CPUs either. 847 */ 848 for (i = 0; i < 16; i += 4) { 849 int j; 850 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); 851 if (!val) 852 continue; 853 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); 854 for (j = i; j < i + 4; j++) { 855 if (val & 0xff) 856 writel_relaxed((1 << (new_cpu_id + 16)) | j, 857 dist_base + GIC_DIST_SOFTINT); 858 val >>= 8; 859 } 860 } 861 } 862 863 /* 864 * gic_get_sgir_physaddr - get the physical address for the SGI register 865 * 866 * REturn the physical address of the SGI register to be used 867 * by some early assembly code when the kernel is not yet available. 868 */ 869 static unsigned long gic_dist_physaddr; 870 871 unsigned long gic_get_sgir_physaddr(void) 872 { 873 if (!gic_dist_physaddr) 874 return 0; 875 return gic_dist_physaddr + GIC_DIST_SOFTINT; 876 } 877 878 void __init gic_init_physaddr(struct device_node *node) 879 { 880 struct resource res; 881 if (of_address_to_resource(node, 0, &res) == 0) { 882 gic_dist_physaddr = res.start; 883 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); 884 } 885 } 886 887 #else 888 #define gic_init_physaddr(node) do { } while (0) 889 #endif 890 891 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 892 irq_hw_number_t hw) 893 { 894 struct irq_chip *chip = &gic_chip; 895 896 if (static_key_true(&supports_deactivate)) { 897 if (d->host_data == (void *)&gic_data[0]) 898 chip = &gic_eoimode1_chip; 899 } 900 901 if (hw < 32) { 902 irq_set_percpu_devid(irq); 903 irq_domain_set_info(d, irq, hw, chip, d->host_data, 904 handle_percpu_devid_irq, NULL, NULL); 905 irq_set_status_flags(irq, IRQ_NOAUTOEN); 906 } else { 907 irq_domain_set_info(d, irq, hw, chip, d->host_data, 908 handle_fasteoi_irq, NULL, NULL); 909 irq_set_probe(irq); 910 } 911 return 0; 912 } 913 914 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) 915 { 916 } 917 918 static int gic_irq_domain_translate(struct irq_domain *d, 919 struct irq_fwspec *fwspec, 920 unsigned long *hwirq, 921 unsigned int *type) 922 { 923 if (is_of_node(fwspec->fwnode)) { 924 if (fwspec->param_count < 3) 925 return -EINVAL; 926 927 /* Get the interrupt number and add 16 to skip over SGIs */ 928 *hwirq = fwspec->param[1] + 16; 929 930 /* 931 * For SPIs, we need to add 16 more to get the GIC irq 932 * ID number 933 */ 934 if (!fwspec->param[0]) 935 *hwirq += 16; 936 937 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; 938 return 0; 939 } 940 941 if (fwspec->fwnode->type == FWNODE_IRQCHIP) { 942 if(fwspec->param_count != 2) 943 return -EINVAL; 944 945 *hwirq = fwspec->param[0]; 946 *type = fwspec->param[1]; 947 return 0; 948 } 949 950 return -EINVAL; 951 } 952 953 #ifdef CONFIG_SMP 954 static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, 955 void *hcpu) 956 { 957 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 958 gic_cpu_init(&gic_data[0]); 959 return NOTIFY_OK; 960 } 961 962 /* 963 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high 964 * priority because the GIC needs to be up before the ARM generic timers. 965 */ 966 static struct notifier_block gic_cpu_notifier = { 967 .notifier_call = gic_secondary_init, 968 .priority = 100, 969 }; 970 #endif 971 972 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 973 unsigned int nr_irqs, void *arg) 974 { 975 int i, ret; 976 irq_hw_number_t hwirq; 977 unsigned int type = IRQ_TYPE_NONE; 978 struct irq_fwspec *fwspec = arg; 979 980 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type); 981 if (ret) 982 return ret; 983 984 for (i = 0; i < nr_irqs; i++) 985 gic_irq_domain_map(domain, virq + i, hwirq + i); 986 987 return 0; 988 } 989 990 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { 991 .translate = gic_irq_domain_translate, 992 .alloc = gic_irq_domain_alloc, 993 .free = irq_domain_free_irqs_top, 994 }; 995 996 static const struct irq_domain_ops gic_irq_domain_ops = { 997 .map = gic_irq_domain_map, 998 .unmap = gic_irq_domain_unmap, 999 }; 1000 1001 static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, 1002 void __iomem *dist_base, void __iomem *cpu_base, 1003 u32 percpu_offset, struct fwnode_handle *handle) 1004 { 1005 irq_hw_number_t hwirq_base; 1006 struct gic_chip_data *gic; 1007 int gic_irqs, irq_base, i; 1008 1009 BUG_ON(gic_nr >= MAX_GIC_NR); 1010 1011 gic_check_cpu_features(); 1012 1013 gic = &gic_data[gic_nr]; 1014 #ifdef CONFIG_GIC_NON_BANKED 1015 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 1016 unsigned int cpu; 1017 1018 gic->dist_base.percpu_base = alloc_percpu(void __iomem *); 1019 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); 1020 if (WARN_ON(!gic->dist_base.percpu_base || 1021 !gic->cpu_base.percpu_base)) { 1022 free_percpu(gic->dist_base.percpu_base); 1023 free_percpu(gic->cpu_base.percpu_base); 1024 return; 1025 } 1026 1027 for_each_possible_cpu(cpu) { 1028 u32 mpidr = cpu_logical_map(cpu); 1029 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 1030 unsigned long offset = percpu_offset * core_id; 1031 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 1032 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 1033 } 1034 1035 gic_set_base_accessor(gic, gic_get_percpu_base); 1036 } else 1037 #endif 1038 { /* Normal, sane GIC... */ 1039 WARN(percpu_offset, 1040 "GIC_NON_BANKED not enabled, ignoring %08x offset!", 1041 percpu_offset); 1042 gic->dist_base.common_base = dist_base; 1043 gic->cpu_base.common_base = cpu_base; 1044 gic_set_base_accessor(gic, gic_get_common_base); 1045 } 1046 1047 /* 1048 * Find out how many interrupts are supported. 1049 * The GIC only supports up to 1020 interrupt sources. 1050 */ 1051 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; 1052 gic_irqs = (gic_irqs + 1) * 32; 1053 if (gic_irqs > 1020) 1054 gic_irqs = 1020; 1055 gic->gic_irqs = gic_irqs; 1056 1057 if (handle) { /* DT/ACPI */ 1058 gic->domain = irq_domain_create_linear(handle, gic_irqs, 1059 &gic_irq_domain_hierarchy_ops, 1060 gic); 1061 } else { /* Legacy support */ 1062 /* 1063 * For primary GICs, skip over SGIs. 1064 * For secondary GICs, skip over PPIs, too. 1065 */ 1066 if (gic_nr == 0 && (irq_start & 31) > 0) { 1067 hwirq_base = 16; 1068 if (irq_start != -1) 1069 irq_start = (irq_start & ~31) + 16; 1070 } else { 1071 hwirq_base = 32; 1072 } 1073 1074 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 1075 1076 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1077 numa_node_id()); 1078 if (IS_ERR_VALUE(irq_base)) { 1079 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1080 irq_start); 1081 irq_base = irq_start; 1082 } 1083 1084 gic->domain = irq_domain_add_legacy(NULL, gic_irqs, irq_base, 1085 hwirq_base, &gic_irq_domain_ops, gic); 1086 } 1087 1088 if (WARN_ON(!gic->domain)) 1089 return; 1090 1091 if (gic_nr == 0) { 1092 /* 1093 * Initialize the CPU interface map to all CPUs. 1094 * It will be refined as each CPU probes its ID. 1095 * This is only necessary for the primary GIC. 1096 */ 1097 for (i = 0; i < NR_GIC_CPU_IF; i++) 1098 gic_cpu_map[i] = 0xff; 1099 #ifdef CONFIG_SMP 1100 set_smp_cross_call(gic_raise_softirq); 1101 register_cpu_notifier(&gic_cpu_notifier); 1102 #endif 1103 set_handle_irq(gic_handle_irq); 1104 if (static_key_true(&supports_deactivate)) 1105 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1106 } 1107 1108 gic_dist_init(gic); 1109 gic_cpu_init(gic); 1110 gic_pm_init(gic); 1111 } 1112 1113 void __init gic_init(unsigned int gic_nr, int irq_start, 1114 void __iomem *dist_base, void __iomem *cpu_base) 1115 { 1116 /* 1117 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1118 * bother with these... 1119 */ 1120 static_key_slow_dec(&supports_deactivate); 1121 __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 0, NULL); 1122 } 1123 1124 #ifdef CONFIG_OF 1125 static int gic_cnt __initdata; 1126 1127 static bool gic_check_eoimode(struct device_node *node, void __iomem **base) 1128 { 1129 struct resource cpuif_res; 1130 1131 of_address_to_resource(node, 1, &cpuif_res); 1132 1133 if (!is_hyp_mode_available()) 1134 return false; 1135 if (resource_size(&cpuif_res) < SZ_8K) 1136 return false; 1137 if (resource_size(&cpuif_res) == SZ_128K) { 1138 u32 val_low, val_high; 1139 1140 /* 1141 * Verify that we have the first 4kB of a GIC400 1142 * aliased over the first 64kB by checking the 1143 * GICC_IIDR register on both ends. 1144 */ 1145 val_low = readl_relaxed(*base + GIC_CPU_IDENT); 1146 val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); 1147 if ((val_low & 0xffff0fff) != 0x0202043B || 1148 val_low != val_high) 1149 return false; 1150 1151 /* 1152 * Move the base up by 60kB, so that we have a 8kB 1153 * contiguous region, which allows us to use GICC_DIR 1154 * at its normal offset. Please pass me that bucket. 1155 */ 1156 *base += 0xf000; 1157 cpuif_res.start += 0xf000; 1158 pr_warn("GIC: Adjusting CPU interface base to %pa", 1159 &cpuif_res.start); 1160 } 1161 1162 return true; 1163 } 1164 1165 static int __init 1166 gic_of_init(struct device_node *node, struct device_node *parent) 1167 { 1168 void __iomem *cpu_base; 1169 void __iomem *dist_base; 1170 u32 percpu_offset; 1171 int irq; 1172 1173 if (WARN_ON(!node)) 1174 return -ENODEV; 1175 1176 dist_base = of_iomap(node, 0); 1177 WARN(!dist_base, "unable to map gic dist registers\n"); 1178 1179 cpu_base = of_iomap(node, 1); 1180 WARN(!cpu_base, "unable to map gic cpu registers\n"); 1181 1182 /* 1183 * Disable split EOI/Deactivate if either HYP is not available 1184 * or the CPU interface is too small. 1185 */ 1186 if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) 1187 static_key_slow_dec(&supports_deactivate); 1188 1189 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 1190 percpu_offset = 0; 1191 1192 __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, 1193 &node->fwnode); 1194 if (!gic_cnt) 1195 gic_init_physaddr(node); 1196 1197 if (parent) { 1198 irq = irq_of_parse_and_map(node, 0); 1199 gic_cascade_irq(gic_cnt, irq); 1200 } 1201 1202 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1203 gicv2m_of_init(node, gic_data[gic_cnt].domain); 1204 1205 gic_cnt++; 1206 return 0; 1207 } 1208 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); 1209 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init); 1210 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init); 1211 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1212 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1213 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); 1214 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1215 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1216 IRQCHIP_DECLARE(pl390, "arm,pl390", gic_of_init); 1217 1218 #endif 1219 1220 #ifdef CONFIG_ACPI 1221 static phys_addr_t cpu_phy_base __initdata; 1222 1223 static int __init 1224 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, 1225 const unsigned long end) 1226 { 1227 struct acpi_madt_generic_interrupt *processor; 1228 phys_addr_t gic_cpu_base; 1229 static int cpu_base_assigned; 1230 1231 processor = (struct acpi_madt_generic_interrupt *)header; 1232 1233 if (BAD_MADT_GICC_ENTRY(processor, end)) 1234 return -EINVAL; 1235 1236 /* 1237 * There is no support for non-banked GICv1/2 register in ACPI spec. 1238 * All CPU interface addresses have to be the same. 1239 */ 1240 gic_cpu_base = processor->base_address; 1241 if (cpu_base_assigned && gic_cpu_base != cpu_phy_base) 1242 return -EINVAL; 1243 1244 cpu_phy_base = gic_cpu_base; 1245 cpu_base_assigned = 1; 1246 return 0; 1247 } 1248 1249 /* The things you have to do to just *count* something... */ 1250 static int __init acpi_dummy_func(struct acpi_subtable_header *header, 1251 const unsigned long end) 1252 { 1253 return 0; 1254 } 1255 1256 static bool __init acpi_gic_redist_is_present(void) 1257 { 1258 return acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, 1259 acpi_dummy_func, 0) > 0; 1260 } 1261 1262 static bool __init gic_validate_dist(struct acpi_subtable_header *header, 1263 struct acpi_probe_entry *ape) 1264 { 1265 struct acpi_madt_generic_distributor *dist; 1266 dist = (struct acpi_madt_generic_distributor *)header; 1267 1268 return (dist->version == ape->driver_data && 1269 (dist->version != ACPI_MADT_GIC_VERSION_NONE || 1270 !acpi_gic_redist_is_present())); 1271 } 1272 1273 #define ACPI_GICV2_DIST_MEM_SIZE (SZ_4K) 1274 #define ACPI_GIC_CPU_IF_MEM_SIZE (SZ_8K) 1275 1276 static int __init gic_v2_acpi_init(struct acpi_subtable_header *header, 1277 const unsigned long end) 1278 { 1279 struct acpi_madt_generic_distributor *dist; 1280 void __iomem *cpu_base, *dist_base; 1281 struct fwnode_handle *domain_handle; 1282 int count; 1283 1284 /* Collect CPU base addresses */ 1285 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT, 1286 gic_acpi_parse_madt_cpu, 0); 1287 if (count <= 0) { 1288 pr_err("No valid GICC entries exist\n"); 1289 return -EINVAL; 1290 } 1291 1292 cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE); 1293 if (!cpu_base) { 1294 pr_err("Unable to map GICC registers\n"); 1295 return -ENOMEM; 1296 } 1297 1298 dist = (struct acpi_madt_generic_distributor *)header; 1299 dist_base = ioremap(dist->base_address, ACPI_GICV2_DIST_MEM_SIZE); 1300 if (!dist_base) { 1301 pr_err("Unable to map GICD registers\n"); 1302 iounmap(cpu_base); 1303 return -ENOMEM; 1304 } 1305 1306 /* 1307 * Disable split EOI/Deactivate if HYP is not available. ACPI 1308 * guarantees that we'll always have a GICv2, so the CPU 1309 * interface will always be the right size. 1310 */ 1311 if (!is_hyp_mode_available()) 1312 static_key_slow_dec(&supports_deactivate); 1313 1314 /* 1315 * Initialize GIC instance zero (no multi-GIC support). 1316 */ 1317 domain_handle = irq_domain_alloc_fwnode(dist_base); 1318 if (!domain_handle) { 1319 pr_err("Unable to allocate domain handle\n"); 1320 iounmap(cpu_base); 1321 iounmap(dist_base); 1322 return -ENOMEM; 1323 } 1324 1325 __gic_init_bases(0, -1, dist_base, cpu_base, 0, domain_handle); 1326 1327 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, domain_handle); 1328 return 0; 1329 } 1330 IRQCHIP_ACPI_DECLARE(gic_v2, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1331 gic_validate_dist, ACPI_MADT_GIC_VERSION_V2, 1332 gic_v2_acpi_init); 1333 IRQCHIP_ACPI_DECLARE(gic_v2_maybe, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 1334 gic_validate_dist, ACPI_MADT_GIC_VERSION_NONE, 1335 gic_v2_acpi_init); 1336 #endif 1337