1 /* 2 * Copyright (C) 2002 ARM Limited, All Rights Reserved. 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License version 2 as 6 * published by the Free Software Foundation. 7 * 8 * Interrupt architecture for the GIC: 9 * 10 * o There is one Interrupt Distributor, which receives interrupts 11 * from system devices and sends them to the Interrupt Controllers. 12 * 13 * o There is one CPU Interface per CPU, which sends interrupts sent 14 * by the Distributor, and interrupts generated locally, to the 15 * associated CPU. The base address of the CPU interface is usually 16 * aliased so that the same address points to different chips depending 17 * on the CPU it is accessed from. 18 * 19 * Note that IRQs 0-31 are special - they are local to each CPU. 20 * As such, the enable set/clear, pending set/clear and active bit 21 * registers are banked per-cpu for these sources. 22 */ 23 #include <linux/init.h> 24 #include <linux/kernel.h> 25 #include <linux/err.h> 26 #include <linux/module.h> 27 #include <linux/list.h> 28 #include <linux/smp.h> 29 #include <linux/cpu.h> 30 #include <linux/cpu_pm.h> 31 #include <linux/cpumask.h> 32 #include <linux/io.h> 33 #include <linux/of.h> 34 #include <linux/of_address.h> 35 #include <linux/of_irq.h> 36 #include <linux/acpi.h> 37 #include <linux/irqdomain.h> 38 #include <linux/interrupt.h> 39 #include <linux/percpu.h> 40 #include <linux/slab.h> 41 #include <linux/irqchip.h> 42 #include <linux/irqchip/chained_irq.h> 43 #include <linux/irqchip/arm-gic.h> 44 #include <linux/irqchip/arm-gic-acpi.h> 45 46 #include <asm/cputype.h> 47 #include <asm/irq.h> 48 #include <asm/exception.h> 49 #include <asm/smp_plat.h> 50 #include <asm/virt.h> 51 52 #include "irq-gic-common.h" 53 54 union gic_base { 55 void __iomem *common_base; 56 void __percpu * __iomem *percpu_base; 57 }; 58 59 struct gic_chip_data { 60 union gic_base dist_base; 61 union gic_base cpu_base; 62 #ifdef CONFIG_CPU_PM 63 u32 saved_spi_enable[DIV_ROUND_UP(1020, 32)]; 64 u32 saved_spi_conf[DIV_ROUND_UP(1020, 16)]; 65 u32 saved_spi_target[DIV_ROUND_UP(1020, 4)]; 66 u32 __percpu *saved_ppi_enable; 67 u32 __percpu *saved_ppi_conf; 68 #endif 69 struct irq_domain *domain; 70 unsigned int gic_irqs; 71 #ifdef CONFIG_GIC_NON_BANKED 72 void __iomem *(*get_base)(union gic_base *); 73 #endif 74 }; 75 76 static DEFINE_RAW_SPINLOCK(irq_controller_lock); 77 78 /* 79 * The GIC mapping of CPU interfaces does not necessarily match 80 * the logical CPU numbering. Let's use a mapping as returned 81 * by the GIC itself. 82 */ 83 #define NR_GIC_CPU_IF 8 84 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly; 85 86 static struct static_key supports_deactivate = STATIC_KEY_INIT_TRUE; 87 88 #ifndef MAX_GIC_NR 89 #define MAX_GIC_NR 1 90 #endif 91 92 static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly; 93 94 #ifdef CONFIG_GIC_NON_BANKED 95 static void __iomem *gic_get_percpu_base(union gic_base *base) 96 { 97 return raw_cpu_read(*base->percpu_base); 98 } 99 100 static void __iomem *gic_get_common_base(union gic_base *base) 101 { 102 return base->common_base; 103 } 104 105 static inline void __iomem *gic_data_dist_base(struct gic_chip_data *data) 106 { 107 return data->get_base(&data->dist_base); 108 } 109 110 static inline void __iomem *gic_data_cpu_base(struct gic_chip_data *data) 111 { 112 return data->get_base(&data->cpu_base); 113 } 114 115 static inline void gic_set_base_accessor(struct gic_chip_data *data, 116 void __iomem *(*f)(union gic_base *)) 117 { 118 data->get_base = f; 119 } 120 #else 121 #define gic_data_dist_base(d) ((d)->dist_base.common_base) 122 #define gic_data_cpu_base(d) ((d)->cpu_base.common_base) 123 #define gic_set_base_accessor(d, f) 124 #endif 125 126 static inline void __iomem *gic_dist_base(struct irq_data *d) 127 { 128 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 129 return gic_data_dist_base(gic_data); 130 } 131 132 static inline void __iomem *gic_cpu_base(struct irq_data *d) 133 { 134 struct gic_chip_data *gic_data = irq_data_get_irq_chip_data(d); 135 return gic_data_cpu_base(gic_data); 136 } 137 138 static inline unsigned int gic_irq(struct irq_data *d) 139 { 140 return d->hwirq; 141 } 142 143 static inline bool cascading_gic_irq(struct irq_data *d) 144 { 145 void *data = irq_data_get_irq_handler_data(d); 146 147 /* 148 * If handler_data is set, this is a cascading interrupt, and 149 * it cannot possibly be forwarded. 150 */ 151 return data != NULL; 152 } 153 154 /* 155 * Routines to acknowledge, disable and enable interrupts 156 */ 157 static void gic_poke_irq(struct irq_data *d, u32 offset) 158 { 159 u32 mask = 1 << (gic_irq(d) % 32); 160 writel_relaxed(mask, gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4); 161 } 162 163 static int gic_peek_irq(struct irq_data *d, u32 offset) 164 { 165 u32 mask = 1 << (gic_irq(d) % 32); 166 return !!(readl_relaxed(gic_dist_base(d) + offset + (gic_irq(d) / 32) * 4) & mask); 167 } 168 169 static void gic_mask_irq(struct irq_data *d) 170 { 171 gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR); 172 } 173 174 static void gic_eoimode1_mask_irq(struct irq_data *d) 175 { 176 gic_mask_irq(d); 177 /* 178 * When masking a forwarded interrupt, make sure it is 179 * deactivated as well. 180 * 181 * This ensures that an interrupt that is getting 182 * disabled/masked will not get "stuck", because there is 183 * noone to deactivate it (guest is being terminated). 184 */ 185 if (irqd_is_forwarded_to_vcpu(d)) 186 gic_poke_irq(d, GIC_DIST_ACTIVE_CLEAR); 187 } 188 189 static void gic_unmask_irq(struct irq_data *d) 190 { 191 gic_poke_irq(d, GIC_DIST_ENABLE_SET); 192 } 193 194 static void gic_eoi_irq(struct irq_data *d) 195 { 196 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI); 197 } 198 199 static void gic_eoimode1_eoi_irq(struct irq_data *d) 200 { 201 /* Do not deactivate an IRQ forwarded to a vcpu. */ 202 if (irqd_is_forwarded_to_vcpu(d)) 203 return; 204 205 writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_DEACTIVATE); 206 } 207 208 static int gic_irq_set_irqchip_state(struct irq_data *d, 209 enum irqchip_irq_state which, bool val) 210 { 211 u32 reg; 212 213 switch (which) { 214 case IRQCHIP_STATE_PENDING: 215 reg = val ? GIC_DIST_PENDING_SET : GIC_DIST_PENDING_CLEAR; 216 break; 217 218 case IRQCHIP_STATE_ACTIVE: 219 reg = val ? GIC_DIST_ACTIVE_SET : GIC_DIST_ACTIVE_CLEAR; 220 break; 221 222 case IRQCHIP_STATE_MASKED: 223 reg = val ? GIC_DIST_ENABLE_CLEAR : GIC_DIST_ENABLE_SET; 224 break; 225 226 default: 227 return -EINVAL; 228 } 229 230 gic_poke_irq(d, reg); 231 return 0; 232 } 233 234 static int gic_irq_get_irqchip_state(struct irq_data *d, 235 enum irqchip_irq_state which, bool *val) 236 { 237 switch (which) { 238 case IRQCHIP_STATE_PENDING: 239 *val = gic_peek_irq(d, GIC_DIST_PENDING_SET); 240 break; 241 242 case IRQCHIP_STATE_ACTIVE: 243 *val = gic_peek_irq(d, GIC_DIST_ACTIVE_SET); 244 break; 245 246 case IRQCHIP_STATE_MASKED: 247 *val = !gic_peek_irq(d, GIC_DIST_ENABLE_SET); 248 break; 249 250 default: 251 return -EINVAL; 252 } 253 254 return 0; 255 } 256 257 static int gic_set_type(struct irq_data *d, unsigned int type) 258 { 259 void __iomem *base = gic_dist_base(d); 260 unsigned int gicirq = gic_irq(d); 261 262 /* Interrupt configuration for SGIs can't be changed */ 263 if (gicirq < 16) 264 return -EINVAL; 265 266 /* SPIs have restrictions on the supported types */ 267 if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && 268 type != IRQ_TYPE_EDGE_RISING) 269 return -EINVAL; 270 271 return gic_configure_irq(gicirq, type, base, NULL); 272 } 273 274 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) 275 { 276 /* Only interrupts on the primary GIC can be forwarded to a vcpu. */ 277 if (cascading_gic_irq(d)) 278 return -EINVAL; 279 280 if (vcpu) 281 irqd_set_forwarded_to_vcpu(d); 282 else 283 irqd_clr_forwarded_to_vcpu(d); 284 return 0; 285 } 286 287 #ifdef CONFIG_SMP 288 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, 289 bool force) 290 { 291 void __iomem *reg = gic_dist_base(d) + GIC_DIST_TARGET + (gic_irq(d) & ~3); 292 unsigned int cpu, shift = (gic_irq(d) % 4) * 8; 293 u32 val, mask, bit; 294 unsigned long flags; 295 296 if (!force) 297 cpu = cpumask_any_and(mask_val, cpu_online_mask); 298 else 299 cpu = cpumask_first(mask_val); 300 301 if (cpu >= NR_GIC_CPU_IF || cpu >= nr_cpu_ids) 302 return -EINVAL; 303 304 raw_spin_lock_irqsave(&irq_controller_lock, flags); 305 mask = 0xff << shift; 306 bit = gic_cpu_map[cpu] << shift; 307 val = readl_relaxed(reg) & ~mask; 308 writel_relaxed(val | bit, reg); 309 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 310 311 return IRQ_SET_MASK_OK; 312 } 313 #endif 314 315 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) 316 { 317 u32 irqstat, irqnr; 318 struct gic_chip_data *gic = &gic_data[0]; 319 void __iomem *cpu_base = gic_data_cpu_base(gic); 320 321 do { 322 irqstat = readl_relaxed(cpu_base + GIC_CPU_INTACK); 323 irqnr = irqstat & GICC_IAR_INT_ID_MASK; 324 325 if (likely(irqnr > 15 && irqnr < 1021)) { 326 if (static_key_true(&supports_deactivate)) 327 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 328 handle_domain_irq(gic->domain, irqnr, regs); 329 continue; 330 } 331 if (irqnr < 16) { 332 writel_relaxed(irqstat, cpu_base + GIC_CPU_EOI); 333 if (static_key_true(&supports_deactivate)) 334 writel_relaxed(irqstat, cpu_base + GIC_CPU_DEACTIVATE); 335 #ifdef CONFIG_SMP 336 handle_IPI(irqnr, regs); 337 #endif 338 continue; 339 } 340 break; 341 } while (1); 342 } 343 344 static void gic_handle_cascade_irq(struct irq_desc *desc) 345 { 346 struct gic_chip_data *chip_data = irq_desc_get_handler_data(desc); 347 struct irq_chip *chip = irq_desc_get_chip(desc); 348 unsigned int cascade_irq, gic_irq; 349 unsigned long status; 350 351 chained_irq_enter(chip, desc); 352 353 raw_spin_lock(&irq_controller_lock); 354 status = readl_relaxed(gic_data_cpu_base(chip_data) + GIC_CPU_INTACK); 355 raw_spin_unlock(&irq_controller_lock); 356 357 gic_irq = (status & GICC_IAR_INT_ID_MASK); 358 if (gic_irq == GICC_INT_SPURIOUS) 359 goto out; 360 361 cascade_irq = irq_find_mapping(chip_data->domain, gic_irq); 362 if (unlikely(gic_irq < 32 || gic_irq > 1020)) 363 handle_bad_irq(desc); 364 else 365 generic_handle_irq(cascade_irq); 366 367 out: 368 chained_irq_exit(chip, desc); 369 } 370 371 static struct irq_chip gic_chip = { 372 .name = "GIC", 373 .irq_mask = gic_mask_irq, 374 .irq_unmask = gic_unmask_irq, 375 .irq_eoi = gic_eoi_irq, 376 .irq_set_type = gic_set_type, 377 #ifdef CONFIG_SMP 378 .irq_set_affinity = gic_set_affinity, 379 #endif 380 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 381 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 382 .flags = IRQCHIP_SET_TYPE_MASKED | 383 IRQCHIP_SKIP_SET_WAKE | 384 IRQCHIP_MASK_ON_SUSPEND, 385 }; 386 387 static struct irq_chip gic_eoimode1_chip = { 388 .name = "GICv2", 389 .irq_mask = gic_eoimode1_mask_irq, 390 .irq_unmask = gic_unmask_irq, 391 .irq_eoi = gic_eoimode1_eoi_irq, 392 .irq_set_type = gic_set_type, 393 #ifdef CONFIG_SMP 394 .irq_set_affinity = gic_set_affinity, 395 #endif 396 .irq_get_irqchip_state = gic_irq_get_irqchip_state, 397 .irq_set_irqchip_state = gic_irq_set_irqchip_state, 398 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, 399 .flags = IRQCHIP_SET_TYPE_MASKED | 400 IRQCHIP_SKIP_SET_WAKE | 401 IRQCHIP_MASK_ON_SUSPEND, 402 }; 403 404 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq) 405 { 406 if (gic_nr >= MAX_GIC_NR) 407 BUG(); 408 irq_set_chained_handler_and_data(irq, gic_handle_cascade_irq, 409 &gic_data[gic_nr]); 410 } 411 412 static u8 gic_get_cpumask(struct gic_chip_data *gic) 413 { 414 void __iomem *base = gic_data_dist_base(gic); 415 u32 mask, i; 416 417 for (i = mask = 0; i < 32; i += 4) { 418 mask = readl_relaxed(base + GIC_DIST_TARGET + i); 419 mask |= mask >> 16; 420 mask |= mask >> 8; 421 if (mask) 422 break; 423 } 424 425 if (!mask && num_possible_cpus() > 1) 426 pr_crit("GIC CPU mask not found - kernel will fail to boot.\n"); 427 428 return mask; 429 } 430 431 static void gic_cpu_if_up(struct gic_chip_data *gic) 432 { 433 void __iomem *cpu_base = gic_data_cpu_base(gic); 434 u32 bypass = 0; 435 u32 mode = 0; 436 437 if (static_key_true(&supports_deactivate)) 438 mode = GIC_CPU_CTRL_EOImodeNS; 439 440 /* 441 * Preserve bypass disable bits to be written back later 442 */ 443 bypass = readl(cpu_base + GIC_CPU_CTRL); 444 bypass &= GICC_DIS_BYPASS_MASK; 445 446 writel_relaxed(bypass | mode | GICC_ENABLE, cpu_base + GIC_CPU_CTRL); 447 } 448 449 450 static void __init gic_dist_init(struct gic_chip_data *gic) 451 { 452 unsigned int i; 453 u32 cpumask; 454 unsigned int gic_irqs = gic->gic_irqs; 455 void __iomem *base = gic_data_dist_base(gic); 456 457 writel_relaxed(GICD_DISABLE, base + GIC_DIST_CTRL); 458 459 /* 460 * Set all global interrupts to this CPU only. 461 */ 462 cpumask = gic_get_cpumask(gic); 463 cpumask |= cpumask << 8; 464 cpumask |= cpumask << 16; 465 for (i = 32; i < gic_irqs; i += 4) 466 writel_relaxed(cpumask, base + GIC_DIST_TARGET + i * 4 / 4); 467 468 gic_dist_config(base, gic_irqs, NULL); 469 470 writel_relaxed(GICD_ENABLE, base + GIC_DIST_CTRL); 471 } 472 473 static void gic_cpu_init(struct gic_chip_data *gic) 474 { 475 void __iomem *dist_base = gic_data_dist_base(gic); 476 void __iomem *base = gic_data_cpu_base(gic); 477 unsigned int cpu_mask, cpu = smp_processor_id(); 478 int i; 479 480 /* 481 * Setting up the CPU map is only relevant for the primary GIC 482 * because any nested/secondary GICs do not directly interface 483 * with the CPU(s). 484 */ 485 if (gic == &gic_data[0]) { 486 /* 487 * Get what the GIC says our CPU mask is. 488 */ 489 BUG_ON(cpu >= NR_GIC_CPU_IF); 490 cpu_mask = gic_get_cpumask(gic); 491 gic_cpu_map[cpu] = cpu_mask; 492 493 /* 494 * Clear our mask from the other map entries in case they're 495 * still undefined. 496 */ 497 for (i = 0; i < NR_GIC_CPU_IF; i++) 498 if (i != cpu) 499 gic_cpu_map[i] &= ~cpu_mask; 500 } 501 502 gic_cpu_config(dist_base, NULL); 503 504 writel_relaxed(GICC_INT_PRI_THRESHOLD, base + GIC_CPU_PRIMASK); 505 gic_cpu_if_up(gic); 506 } 507 508 int gic_cpu_if_down(unsigned int gic_nr) 509 { 510 void __iomem *cpu_base; 511 u32 val = 0; 512 513 if (gic_nr >= MAX_GIC_NR) 514 return -EINVAL; 515 516 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 517 val = readl(cpu_base + GIC_CPU_CTRL); 518 val &= ~GICC_ENABLE; 519 writel_relaxed(val, cpu_base + GIC_CPU_CTRL); 520 521 return 0; 522 } 523 524 #ifdef CONFIG_CPU_PM 525 /* 526 * Saves the GIC distributor registers during suspend or idle. Must be called 527 * with interrupts disabled but before powering down the GIC. After calling 528 * this function, no interrupts will be delivered by the GIC, and another 529 * platform-specific wakeup source must be enabled. 530 */ 531 static void gic_dist_save(unsigned int gic_nr) 532 { 533 unsigned int gic_irqs; 534 void __iomem *dist_base; 535 int i; 536 537 if (gic_nr >= MAX_GIC_NR) 538 BUG(); 539 540 gic_irqs = gic_data[gic_nr].gic_irqs; 541 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 542 543 if (!dist_base) 544 return; 545 546 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 547 gic_data[gic_nr].saved_spi_conf[i] = 548 readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 549 550 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 551 gic_data[gic_nr].saved_spi_target[i] = 552 readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 553 554 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 555 gic_data[gic_nr].saved_spi_enable[i] = 556 readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 557 } 558 559 /* 560 * Restores the GIC distributor registers during resume or when coming out of 561 * idle. Must be called before enabling interrupts. If a level interrupt 562 * that occured while the GIC was suspended is still present, it will be 563 * handled normally, but any edge interrupts that occured will not be seen by 564 * the GIC and need to be handled by the platform-specific wakeup source. 565 */ 566 static void gic_dist_restore(unsigned int gic_nr) 567 { 568 unsigned int gic_irqs; 569 unsigned int i; 570 void __iomem *dist_base; 571 572 if (gic_nr >= MAX_GIC_NR) 573 BUG(); 574 575 gic_irqs = gic_data[gic_nr].gic_irqs; 576 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 577 578 if (!dist_base) 579 return; 580 581 writel_relaxed(GICD_DISABLE, dist_base + GIC_DIST_CTRL); 582 583 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 16); i++) 584 writel_relaxed(gic_data[gic_nr].saved_spi_conf[i], 585 dist_base + GIC_DIST_CONFIG + i * 4); 586 587 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 588 writel_relaxed(GICD_INT_DEF_PRI_X4, 589 dist_base + GIC_DIST_PRI + i * 4); 590 591 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 4); i++) 592 writel_relaxed(gic_data[gic_nr].saved_spi_target[i], 593 dist_base + GIC_DIST_TARGET + i * 4); 594 595 for (i = 0; i < DIV_ROUND_UP(gic_irqs, 32); i++) 596 writel_relaxed(gic_data[gic_nr].saved_spi_enable[i], 597 dist_base + GIC_DIST_ENABLE_SET + i * 4); 598 599 writel_relaxed(GICD_ENABLE, dist_base + GIC_DIST_CTRL); 600 } 601 602 static void gic_cpu_save(unsigned int gic_nr) 603 { 604 int i; 605 u32 *ptr; 606 void __iomem *dist_base; 607 void __iomem *cpu_base; 608 609 if (gic_nr >= MAX_GIC_NR) 610 BUG(); 611 612 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 613 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 614 615 if (!dist_base || !cpu_base) 616 return; 617 618 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 619 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 620 ptr[i] = readl_relaxed(dist_base + GIC_DIST_ENABLE_SET + i * 4); 621 622 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 623 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 624 ptr[i] = readl_relaxed(dist_base + GIC_DIST_CONFIG + i * 4); 625 626 } 627 628 static void gic_cpu_restore(unsigned int gic_nr) 629 { 630 int i; 631 u32 *ptr; 632 void __iomem *dist_base; 633 void __iomem *cpu_base; 634 635 if (gic_nr >= MAX_GIC_NR) 636 BUG(); 637 638 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 639 cpu_base = gic_data_cpu_base(&gic_data[gic_nr]); 640 641 if (!dist_base || !cpu_base) 642 return; 643 644 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_enable); 645 for (i = 0; i < DIV_ROUND_UP(32, 32); i++) 646 writel_relaxed(ptr[i], dist_base + GIC_DIST_ENABLE_SET + i * 4); 647 648 ptr = raw_cpu_ptr(gic_data[gic_nr].saved_ppi_conf); 649 for (i = 0; i < DIV_ROUND_UP(32, 16); i++) 650 writel_relaxed(ptr[i], dist_base + GIC_DIST_CONFIG + i * 4); 651 652 for (i = 0; i < DIV_ROUND_UP(32, 4); i++) 653 writel_relaxed(GICD_INT_DEF_PRI_X4, 654 dist_base + GIC_DIST_PRI + i * 4); 655 656 writel_relaxed(GICC_INT_PRI_THRESHOLD, cpu_base + GIC_CPU_PRIMASK); 657 gic_cpu_if_up(&gic_data[gic_nr]); 658 } 659 660 static int gic_notifier(struct notifier_block *self, unsigned long cmd, void *v) 661 { 662 int i; 663 664 for (i = 0; i < MAX_GIC_NR; i++) { 665 #ifdef CONFIG_GIC_NON_BANKED 666 /* Skip over unused GICs */ 667 if (!gic_data[i].get_base) 668 continue; 669 #endif 670 switch (cmd) { 671 case CPU_PM_ENTER: 672 gic_cpu_save(i); 673 break; 674 case CPU_PM_ENTER_FAILED: 675 case CPU_PM_EXIT: 676 gic_cpu_restore(i); 677 break; 678 case CPU_CLUSTER_PM_ENTER: 679 gic_dist_save(i); 680 break; 681 case CPU_CLUSTER_PM_ENTER_FAILED: 682 case CPU_CLUSTER_PM_EXIT: 683 gic_dist_restore(i); 684 break; 685 } 686 } 687 688 return NOTIFY_OK; 689 } 690 691 static struct notifier_block gic_notifier_block = { 692 .notifier_call = gic_notifier, 693 }; 694 695 static void __init gic_pm_init(struct gic_chip_data *gic) 696 { 697 gic->saved_ppi_enable = __alloc_percpu(DIV_ROUND_UP(32, 32) * 4, 698 sizeof(u32)); 699 BUG_ON(!gic->saved_ppi_enable); 700 701 gic->saved_ppi_conf = __alloc_percpu(DIV_ROUND_UP(32, 16) * 4, 702 sizeof(u32)); 703 BUG_ON(!gic->saved_ppi_conf); 704 705 if (gic == &gic_data[0]) 706 cpu_pm_register_notifier(&gic_notifier_block); 707 } 708 #else 709 static void __init gic_pm_init(struct gic_chip_data *gic) 710 { 711 } 712 #endif 713 714 #ifdef CONFIG_SMP 715 static void gic_raise_softirq(const struct cpumask *mask, unsigned int irq) 716 { 717 int cpu; 718 unsigned long flags, map = 0; 719 720 raw_spin_lock_irqsave(&irq_controller_lock, flags); 721 722 /* Convert our logical CPU mask into a physical one. */ 723 for_each_cpu(cpu, mask) 724 map |= gic_cpu_map[cpu]; 725 726 /* 727 * Ensure that stores to Normal memory are visible to the 728 * other CPUs before they observe us issuing the IPI. 729 */ 730 dmb(ishst); 731 732 /* this always happens on GIC0 */ 733 writel_relaxed(map << 16 | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 734 735 raw_spin_unlock_irqrestore(&irq_controller_lock, flags); 736 } 737 #endif 738 739 #ifdef CONFIG_BL_SWITCHER 740 /* 741 * gic_send_sgi - send a SGI directly to given CPU interface number 742 * 743 * cpu_id: the ID for the destination CPU interface 744 * irq: the IPI number to send a SGI for 745 */ 746 void gic_send_sgi(unsigned int cpu_id, unsigned int irq) 747 { 748 BUG_ON(cpu_id >= NR_GIC_CPU_IF); 749 cpu_id = 1 << cpu_id; 750 /* this always happens on GIC0 */ 751 writel_relaxed((cpu_id << 16) | irq, gic_data_dist_base(&gic_data[0]) + GIC_DIST_SOFTINT); 752 } 753 754 /* 755 * gic_get_cpu_id - get the CPU interface ID for the specified CPU 756 * 757 * @cpu: the logical CPU number to get the GIC ID for. 758 * 759 * Return the CPU interface ID for the given logical CPU number, 760 * or -1 if the CPU number is too large or the interface ID is 761 * unknown (more than one bit set). 762 */ 763 int gic_get_cpu_id(unsigned int cpu) 764 { 765 unsigned int cpu_bit; 766 767 if (cpu >= NR_GIC_CPU_IF) 768 return -1; 769 cpu_bit = gic_cpu_map[cpu]; 770 if (cpu_bit & (cpu_bit - 1)) 771 return -1; 772 return __ffs(cpu_bit); 773 } 774 775 /* 776 * gic_migrate_target - migrate IRQs to another CPU interface 777 * 778 * @new_cpu_id: the CPU target ID to migrate IRQs to 779 * 780 * Migrate all peripheral interrupts with a target matching the current CPU 781 * to the interface corresponding to @new_cpu_id. The CPU interface mapping 782 * is also updated. Targets to other CPU interfaces are unchanged. 783 * This must be called with IRQs locally disabled. 784 */ 785 void gic_migrate_target(unsigned int new_cpu_id) 786 { 787 unsigned int cur_cpu_id, gic_irqs, gic_nr = 0; 788 void __iomem *dist_base; 789 int i, ror_val, cpu = smp_processor_id(); 790 u32 val, cur_target_mask, active_mask; 791 792 if (gic_nr >= MAX_GIC_NR) 793 BUG(); 794 795 dist_base = gic_data_dist_base(&gic_data[gic_nr]); 796 if (!dist_base) 797 return; 798 gic_irqs = gic_data[gic_nr].gic_irqs; 799 800 cur_cpu_id = __ffs(gic_cpu_map[cpu]); 801 cur_target_mask = 0x01010101 << cur_cpu_id; 802 ror_val = (cur_cpu_id - new_cpu_id) & 31; 803 804 raw_spin_lock(&irq_controller_lock); 805 806 /* Update the target interface for this logical CPU */ 807 gic_cpu_map[cpu] = 1 << new_cpu_id; 808 809 /* 810 * Find all the peripheral interrupts targetting the current 811 * CPU interface and migrate them to the new CPU interface. 812 * We skip DIST_TARGET 0 to 7 as they are read-only. 813 */ 814 for (i = 8; i < DIV_ROUND_UP(gic_irqs, 4); i++) { 815 val = readl_relaxed(dist_base + GIC_DIST_TARGET + i * 4); 816 active_mask = val & cur_target_mask; 817 if (active_mask) { 818 val &= ~active_mask; 819 val |= ror32(active_mask, ror_val); 820 writel_relaxed(val, dist_base + GIC_DIST_TARGET + i*4); 821 } 822 } 823 824 raw_spin_unlock(&irq_controller_lock); 825 826 /* 827 * Now let's migrate and clear any potential SGIs that might be 828 * pending for us (cur_cpu_id). Since GIC_DIST_SGI_PENDING_SET 829 * is a banked register, we can only forward the SGI using 830 * GIC_DIST_SOFTINT. The original SGI source is lost but Linux 831 * doesn't use that information anyway. 832 * 833 * For the same reason we do not adjust SGI source information 834 * for previously sent SGIs by us to other CPUs either. 835 */ 836 for (i = 0; i < 16; i += 4) { 837 int j; 838 val = readl_relaxed(dist_base + GIC_DIST_SGI_PENDING_SET + i); 839 if (!val) 840 continue; 841 writel_relaxed(val, dist_base + GIC_DIST_SGI_PENDING_CLEAR + i); 842 for (j = i; j < i + 4; j++) { 843 if (val & 0xff) 844 writel_relaxed((1 << (new_cpu_id + 16)) | j, 845 dist_base + GIC_DIST_SOFTINT); 846 val >>= 8; 847 } 848 } 849 } 850 851 /* 852 * gic_get_sgir_physaddr - get the physical address for the SGI register 853 * 854 * REturn the physical address of the SGI register to be used 855 * by some early assembly code when the kernel is not yet available. 856 */ 857 static unsigned long gic_dist_physaddr; 858 859 unsigned long gic_get_sgir_physaddr(void) 860 { 861 if (!gic_dist_physaddr) 862 return 0; 863 return gic_dist_physaddr + GIC_DIST_SOFTINT; 864 } 865 866 void __init gic_init_physaddr(struct device_node *node) 867 { 868 struct resource res; 869 if (of_address_to_resource(node, 0, &res) == 0) { 870 gic_dist_physaddr = res.start; 871 pr_info("GIC physical location is %#lx\n", gic_dist_physaddr); 872 } 873 } 874 875 #else 876 #define gic_init_physaddr(node) do { } while (0) 877 #endif 878 879 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, 880 irq_hw_number_t hw) 881 { 882 struct irq_chip *chip = &gic_chip; 883 884 if (static_key_true(&supports_deactivate)) { 885 if (d->host_data == (void *)&gic_data[0]) 886 chip = &gic_eoimode1_chip; 887 } 888 889 if (hw < 32) { 890 irq_set_percpu_devid(irq); 891 irq_domain_set_info(d, irq, hw, chip, d->host_data, 892 handle_percpu_devid_irq, NULL, NULL); 893 irq_set_status_flags(irq, IRQ_NOAUTOEN); 894 } else { 895 irq_domain_set_info(d, irq, hw, chip, d->host_data, 896 handle_fasteoi_irq, NULL, NULL); 897 irq_set_probe(irq); 898 } 899 return 0; 900 } 901 902 static void gic_irq_domain_unmap(struct irq_domain *d, unsigned int irq) 903 { 904 } 905 906 static int gic_irq_domain_xlate(struct irq_domain *d, 907 struct device_node *controller, 908 const u32 *intspec, unsigned int intsize, 909 unsigned long *out_hwirq, unsigned int *out_type) 910 { 911 unsigned long ret = 0; 912 913 if (d->of_node != controller) 914 return -EINVAL; 915 if (intsize < 3) 916 return -EINVAL; 917 918 /* Get the interrupt number and add 16 to skip over SGIs */ 919 *out_hwirq = intspec[1] + 16; 920 921 /* For SPIs, we need to add 16 more to get the GIC irq ID number */ 922 if (!intspec[0]) 923 *out_hwirq += 16; 924 925 *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; 926 927 return ret; 928 } 929 930 #ifdef CONFIG_SMP 931 static int gic_secondary_init(struct notifier_block *nfb, unsigned long action, 932 void *hcpu) 933 { 934 if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) 935 gic_cpu_init(&gic_data[0]); 936 return NOTIFY_OK; 937 } 938 939 /* 940 * Notifier for enabling the GIC CPU interface. Set an arbitrarily high 941 * priority because the GIC needs to be up before the ARM generic timers. 942 */ 943 static struct notifier_block gic_cpu_notifier = { 944 .notifier_call = gic_secondary_init, 945 .priority = 100, 946 }; 947 #endif 948 949 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 950 unsigned int nr_irqs, void *arg) 951 { 952 int i, ret; 953 irq_hw_number_t hwirq; 954 unsigned int type = IRQ_TYPE_NONE; 955 struct of_phandle_args *irq_data = arg; 956 957 ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, 958 irq_data->args_count, &hwirq, &type); 959 if (ret) 960 return ret; 961 962 for (i = 0; i < nr_irqs; i++) 963 gic_irq_domain_map(domain, virq + i, hwirq + i); 964 965 return 0; 966 } 967 968 static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { 969 .xlate = gic_irq_domain_xlate, 970 .alloc = gic_irq_domain_alloc, 971 .free = irq_domain_free_irqs_top, 972 }; 973 974 static const struct irq_domain_ops gic_irq_domain_ops = { 975 .map = gic_irq_domain_map, 976 .unmap = gic_irq_domain_unmap, 977 .xlate = gic_irq_domain_xlate, 978 }; 979 980 static void __init __gic_init_bases(unsigned int gic_nr, int irq_start, 981 void __iomem *dist_base, void __iomem *cpu_base, 982 u32 percpu_offset, struct device_node *node) 983 { 984 irq_hw_number_t hwirq_base; 985 struct gic_chip_data *gic; 986 int gic_irqs, irq_base, i; 987 988 BUG_ON(gic_nr >= MAX_GIC_NR); 989 990 gic = &gic_data[gic_nr]; 991 #ifdef CONFIG_GIC_NON_BANKED 992 if (percpu_offset) { /* Frankein-GIC without banked registers... */ 993 unsigned int cpu; 994 995 gic->dist_base.percpu_base = alloc_percpu(void __iomem *); 996 gic->cpu_base.percpu_base = alloc_percpu(void __iomem *); 997 if (WARN_ON(!gic->dist_base.percpu_base || 998 !gic->cpu_base.percpu_base)) { 999 free_percpu(gic->dist_base.percpu_base); 1000 free_percpu(gic->cpu_base.percpu_base); 1001 return; 1002 } 1003 1004 for_each_possible_cpu(cpu) { 1005 u32 mpidr = cpu_logical_map(cpu); 1006 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0); 1007 unsigned long offset = percpu_offset * core_id; 1008 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 1009 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 1010 } 1011 1012 gic_set_base_accessor(gic, gic_get_percpu_base); 1013 } else 1014 #endif 1015 { /* Normal, sane GIC... */ 1016 WARN(percpu_offset, 1017 "GIC_NON_BANKED not enabled, ignoring %08x offset!", 1018 percpu_offset); 1019 gic->dist_base.common_base = dist_base; 1020 gic->cpu_base.common_base = cpu_base; 1021 gic_set_base_accessor(gic, gic_get_common_base); 1022 } 1023 1024 /* 1025 * Find out how many interrupts are supported. 1026 * The GIC only supports up to 1020 interrupt sources. 1027 */ 1028 gic_irqs = readl_relaxed(gic_data_dist_base(gic) + GIC_DIST_CTR) & 0x1f; 1029 gic_irqs = (gic_irqs + 1) * 32; 1030 if (gic_irqs > 1020) 1031 gic_irqs = 1020; 1032 gic->gic_irqs = gic_irqs; 1033 1034 if (node) { /* DT case */ 1035 gic->domain = irq_domain_add_linear(node, gic_irqs, 1036 &gic_irq_domain_hierarchy_ops, 1037 gic); 1038 } else { /* Non-DT case */ 1039 /* 1040 * For primary GICs, skip over SGIs. 1041 * For secondary GICs, skip over PPIs, too. 1042 */ 1043 if (gic_nr == 0 && (irq_start & 31) > 0) { 1044 hwirq_base = 16; 1045 if (irq_start != -1) 1046 irq_start = (irq_start & ~31) + 16; 1047 } else { 1048 hwirq_base = 32; 1049 } 1050 1051 gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ 1052 1053 irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, 1054 numa_node_id()); 1055 if (IS_ERR_VALUE(irq_base)) { 1056 WARN(1, "Cannot allocate irq_descs @ IRQ%d, assuming pre-allocated\n", 1057 irq_start); 1058 irq_base = irq_start; 1059 } 1060 1061 gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, 1062 hwirq_base, &gic_irq_domain_ops, gic); 1063 } 1064 1065 if (WARN_ON(!gic->domain)) 1066 return; 1067 1068 if (gic_nr == 0) { 1069 /* 1070 * Initialize the CPU interface map to all CPUs. 1071 * It will be refined as each CPU probes its ID. 1072 * This is only necessary for the primary GIC. 1073 */ 1074 for (i = 0; i < NR_GIC_CPU_IF; i++) 1075 gic_cpu_map[i] = 0xff; 1076 #ifdef CONFIG_SMP 1077 set_smp_cross_call(gic_raise_softirq); 1078 register_cpu_notifier(&gic_cpu_notifier); 1079 #endif 1080 set_handle_irq(gic_handle_irq); 1081 if (static_key_true(&supports_deactivate)) 1082 pr_info("GIC: Using split EOI/Deactivate mode\n"); 1083 } 1084 1085 gic_dist_init(gic); 1086 gic_cpu_init(gic); 1087 gic_pm_init(gic); 1088 } 1089 1090 void __init gic_init_bases(unsigned int gic_nr, int irq_start, 1091 void __iomem *dist_base, void __iomem *cpu_base, 1092 u32 percpu_offset, struct device_node *node) 1093 { 1094 /* 1095 * Non-DT/ACPI systems won't run a hypervisor, so let's not 1096 * bother with these... 1097 */ 1098 static_key_slow_dec(&supports_deactivate); 1099 __gic_init_bases(gic_nr, irq_start, dist_base, cpu_base, 1100 percpu_offset, node); 1101 } 1102 1103 #ifdef CONFIG_OF 1104 static int gic_cnt __initdata; 1105 1106 static bool gic_check_eoimode(struct device_node *node, void __iomem **base) 1107 { 1108 struct resource cpuif_res; 1109 1110 of_address_to_resource(node, 1, &cpuif_res); 1111 1112 if (!is_hyp_mode_available()) 1113 return false; 1114 if (resource_size(&cpuif_res) < SZ_8K) 1115 return false; 1116 if (resource_size(&cpuif_res) == SZ_128K) { 1117 u32 val_low, val_high; 1118 1119 /* 1120 * Verify that we have the first 4kB of a GIC400 1121 * aliased over the first 64kB by checking the 1122 * GICC_IIDR register on both ends. 1123 */ 1124 val_low = readl_relaxed(*base + GIC_CPU_IDENT); 1125 val_high = readl_relaxed(*base + GIC_CPU_IDENT + 0xf000); 1126 if ((val_low & 0xffff0fff) != 0x0202043B || 1127 val_low != val_high) 1128 return false; 1129 1130 /* 1131 * Move the base up by 60kB, so that we have a 8kB 1132 * contiguous region, which allows us to use GICC_DIR 1133 * at its normal offset. Please pass me that bucket. 1134 */ 1135 *base += 0xf000; 1136 cpuif_res.start += 0xf000; 1137 pr_warn("GIC: Adjusting CPU interface base to %pa", 1138 &cpuif_res.start); 1139 } 1140 1141 return true; 1142 } 1143 1144 static int __init 1145 gic_of_init(struct device_node *node, struct device_node *parent) 1146 { 1147 void __iomem *cpu_base; 1148 void __iomem *dist_base; 1149 u32 percpu_offset; 1150 int irq; 1151 1152 if (WARN_ON(!node)) 1153 return -ENODEV; 1154 1155 dist_base = of_iomap(node, 0); 1156 WARN(!dist_base, "unable to map gic dist registers\n"); 1157 1158 cpu_base = of_iomap(node, 1); 1159 WARN(!cpu_base, "unable to map gic cpu registers\n"); 1160 1161 /* 1162 * Disable split EOI/Deactivate if either HYP is not available 1163 * or the CPU interface is too small. 1164 */ 1165 if (gic_cnt == 0 && !gic_check_eoimode(node, &cpu_base)) 1166 static_key_slow_dec(&supports_deactivate); 1167 1168 if (of_property_read_u32(node, "cpu-offset", &percpu_offset)) 1169 percpu_offset = 0; 1170 1171 __gic_init_bases(gic_cnt, -1, dist_base, cpu_base, percpu_offset, node); 1172 if (!gic_cnt) 1173 gic_init_physaddr(node); 1174 1175 if (parent) { 1176 irq = irq_of_parse_and_map(node, 0); 1177 gic_cascade_irq(gic_cnt, irq); 1178 } 1179 1180 if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) 1181 gicv2m_of_init(node, gic_data[gic_cnt].domain); 1182 1183 gic_cnt++; 1184 return 0; 1185 } 1186 IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init); 1187 IRQCHIP_DECLARE(arm11mp_gic, "arm,arm11mp-gic", gic_of_init); 1188 IRQCHIP_DECLARE(arm1176jzf_dc_gic, "arm,arm1176jzf-devchip-gic", gic_of_init); 1189 IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1190 IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1191 IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init); 1192 IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1193 IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1194 1195 #endif 1196 1197 #ifdef CONFIG_ACPI 1198 static phys_addr_t dist_phy_base, cpu_phy_base __initdata; 1199 1200 static int __init 1201 gic_acpi_parse_madt_cpu(struct acpi_subtable_header *header, 1202 const unsigned long end) 1203 { 1204 struct acpi_madt_generic_interrupt *processor; 1205 phys_addr_t gic_cpu_base; 1206 static int cpu_base_assigned; 1207 1208 processor = (struct acpi_madt_generic_interrupt *)header; 1209 1210 if (BAD_MADT_GICC_ENTRY(processor, end)) 1211 return -EINVAL; 1212 1213 /* 1214 * There is no support for non-banked GICv1/2 register in ACPI spec. 1215 * All CPU interface addresses have to be the same. 1216 */ 1217 gic_cpu_base = processor->base_address; 1218 if (cpu_base_assigned && gic_cpu_base != cpu_phy_base) 1219 return -EINVAL; 1220 1221 cpu_phy_base = gic_cpu_base; 1222 cpu_base_assigned = 1; 1223 return 0; 1224 } 1225 1226 static int __init 1227 gic_acpi_parse_madt_distributor(struct acpi_subtable_header *header, 1228 const unsigned long end) 1229 { 1230 struct acpi_madt_generic_distributor *dist; 1231 1232 dist = (struct acpi_madt_generic_distributor *)header; 1233 1234 if (BAD_MADT_ENTRY(dist, end)) 1235 return -EINVAL; 1236 1237 dist_phy_base = dist->base_address; 1238 return 0; 1239 } 1240 1241 int __init 1242 gic_v2_acpi_init(struct acpi_table_header *table) 1243 { 1244 void __iomem *cpu_base, *dist_base; 1245 int count; 1246 1247 /* Collect CPU base addresses */ 1248 count = acpi_parse_entries(ACPI_SIG_MADT, 1249 sizeof(struct acpi_table_madt), 1250 gic_acpi_parse_madt_cpu, table, 1251 ACPI_MADT_TYPE_GENERIC_INTERRUPT, 0); 1252 if (count <= 0) { 1253 pr_err("No valid GICC entries exist\n"); 1254 return -EINVAL; 1255 } 1256 1257 /* 1258 * Find distributor base address. We expect one distributor entry since 1259 * ACPI 5.1 spec neither support multi-GIC instances nor GIC cascade. 1260 */ 1261 count = acpi_parse_entries(ACPI_SIG_MADT, 1262 sizeof(struct acpi_table_madt), 1263 gic_acpi_parse_madt_distributor, table, 1264 ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, 0); 1265 if (count <= 0) { 1266 pr_err("No valid GICD entries exist\n"); 1267 return -EINVAL; 1268 } else if (count > 1) { 1269 pr_err("More than one GICD entry detected\n"); 1270 return -EINVAL; 1271 } 1272 1273 cpu_base = ioremap(cpu_phy_base, ACPI_GIC_CPU_IF_MEM_SIZE); 1274 if (!cpu_base) { 1275 pr_err("Unable to map GICC registers\n"); 1276 return -ENOMEM; 1277 } 1278 1279 dist_base = ioremap(dist_phy_base, ACPI_GICV2_DIST_MEM_SIZE); 1280 if (!dist_base) { 1281 pr_err("Unable to map GICD registers\n"); 1282 iounmap(cpu_base); 1283 return -ENOMEM; 1284 } 1285 1286 /* 1287 * Disable split EOI/Deactivate if HYP is not available. ACPI 1288 * guarantees that we'll always have a GICv2, so the CPU 1289 * interface will always be the right size. 1290 */ 1291 if (!is_hyp_mode_available()) 1292 static_key_slow_dec(&supports_deactivate); 1293 1294 /* 1295 * Initialize zero GIC instance (no multi-GIC support). Also, set GIC 1296 * as default IRQ domain to allow for GSI registration and GSI to IRQ 1297 * number translation (see acpi_register_gsi() and acpi_gsi_to_irq()). 1298 */ 1299 __gic_init_bases(0, -1, dist_base, cpu_base, 0, NULL); 1300 irq_set_default_host(gic_data[0].domain); 1301 1302 acpi_irq_model = ACPI_IRQ_MODEL_GIC; 1303 return 0; 1304 } 1305 #endif 1306