1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #define pr_fmt(fmt) "GICv3: " fmt
8
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kernel.h>
16 #include <linux/kstrtox.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/percpu.h>
21 #include <linux/refcount.h>
22 #include <linux/slab.h>
23 #include <linux/iopoll.h>
24
25 #include <linux/irqchip.h>
26 #include <linux/irqchip/arm-gic-common.h>
27 #include <linux/irqchip/arm-gic-v3.h>
28 #include <linux/irqchip/arm-gic-v3-prio.h>
29 #include <linux/irqchip/irq-partition-percpu.h>
30 #include <linux/bitfield.h>
31 #include <linux/bits.h>
32 #include <linux/arm-smccc.h>
33
34 #include <asm/cputype.h>
35 #include <asm/exception.h>
36 #include <asm/smp_plat.h>
37 #include <asm/virt.h>
38
39 #include "irq-gic-common.h"
40
41 static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ;
42 static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
43
44 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
45 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
46 #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
47
48 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
49
50 static struct cpumask broken_rdists __read_mostly __maybe_unused;
51
52 struct redist_region {
53 void __iomem *redist_base;
54 phys_addr_t phys_base;
55 bool single_redist;
56 };
57
58 struct gic_chip_data {
59 struct fwnode_handle *fwnode;
60 phys_addr_t dist_phys_base;
61 void __iomem *dist_base;
62 struct redist_region *redist_regions;
63 struct rdists rdists;
64 struct irq_domain *domain;
65 u64 redist_stride;
66 u32 nr_redist_regions;
67 u64 flags;
68 bool has_rss;
69 unsigned int ppi_nr;
70 struct partition_desc **ppi_descs;
71 };
72
73 #define T241_CHIPS_MAX 4
74 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
75 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
76
77 static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum);
78
79 static struct gic_chip_data gic_data __read_mostly;
80 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
81
82 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
83 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
84 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
85
86 /*
87 * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
88 * are potentially stolen by the secure side. Some code, especially code dealing
89 * with hwirq IDs, is simplified by accounting for all 16.
90 */
91 #define SGI_NR 16
92
93 /*
94 * The behaviours of RPR and PMR registers differ depending on the value of
95 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
96 * distributor and redistributors depends on whether security is enabled in the
97 * GIC.
98 *
99 * When security is enabled, non-secure priority values from the (re)distributor
100 * are presented to the GIC CPUIF as follow:
101 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
102 *
103 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
104 * EL1 are subject to a similar operation thus matching the priorities presented
105 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
106 * these values are unchanged by the GIC.
107 *
108 * see GICv3/GICv4 Architecture Specification (IHI0069D):
109 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
110 * priorities.
111 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
112 * interrupt.
113 */
114 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
115
gic_get_pribits(void)116 static u32 gic_get_pribits(void)
117 {
118 u32 pribits;
119
120 pribits = gic_read_ctlr();
121 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
122 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
123 pribits++;
124
125 return pribits;
126 }
127
gic_has_group0(void)128 static bool gic_has_group0(void)
129 {
130 u32 val;
131 u32 old_pmr;
132
133 old_pmr = gic_read_pmr();
134
135 /*
136 * Let's find out if Group0 is under control of EL3 or not by
137 * setting the highest possible, non-zero priority in PMR.
138 *
139 * If SCR_EL3.FIQ is set, the priority gets shifted down in
140 * order for the CPU interface to set bit 7, and keep the
141 * actual priority in the non-secure range. In the process, it
142 * looses the least significant bit and the actual priority
143 * becomes 0x80. Reading it back returns 0, indicating that
144 * we're don't have access to Group0.
145 */
146 gic_write_pmr(BIT(8 - gic_get_pribits()));
147 val = gic_read_pmr();
148
149 gic_write_pmr(old_pmr);
150
151 return val != 0;
152 }
153
gic_dist_security_disabled(void)154 static inline bool gic_dist_security_disabled(void)
155 {
156 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
157 }
158
159 static bool cpus_have_security_disabled __ro_after_init;
160 static bool cpus_have_group0 __ro_after_init;
161
gic_prio_init(void)162 static void __init gic_prio_init(void)
163 {
164 cpus_have_security_disabled = gic_dist_security_disabled();
165 cpus_have_group0 = gic_has_group0();
166
167 /*
168 * How priority values are used by the GIC depends on two things:
169 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
170 * and if Group 0 interrupts can be delivered to Linux in the non-secure
171 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
172 * way priorities are presented in ICC_PMR_EL1 and in the distributor:
173 *
174 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor
175 * -------------------------------------------------------
176 * 1 | - | unchanged | unchanged
177 * -------------------------------------------------------
178 * 0 | 1 | non-secure | non-secure
179 * -------------------------------------------------------
180 * 0 | 0 | unchanged | non-secure
181 *
182 * In the non-secure view reads and writes are modified:
183 *
184 * - A value written is right-shifted by one and the MSB is set,
185 * forcing the priority into the non-secure range.
186 *
187 * - A value read is left-shifted by one.
188 *
189 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
190 * are both either modified or unchanged, we can use the same set of
191 * priorities.
192 *
193 * In the last case, where only the interrupt priorities are modified to
194 * be in the non-secure range, we program the non-secure values into
195 * the distributor to match the PMR values we want.
196 */
197 if (cpus_have_group0 & !cpus_have_security_disabled) {
198 dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
199 dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
200 }
201
202 pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n",
203 cpus_have_security_disabled,
204 !cpus_have_group0);
205 }
206
207 /* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
208 static refcount_t *rdist_nmi_refs;
209
210 static struct gic_kvm_info gic_v3_kvm_info __initdata;
211 static DEFINE_PER_CPU(bool, has_rss);
212
213 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
214 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
215 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
216 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
217
218 /* Our default, arbitrary priority value. Linux only uses one anyway. */
219 #define DEFAULT_PMR_VALUE 0xf0
220
221 enum gic_intid_range {
222 SGI_RANGE,
223 PPI_RANGE,
224 SPI_RANGE,
225 EPPI_RANGE,
226 ESPI_RANGE,
227 LPI_RANGE,
228 __INVALID_RANGE__
229 };
230
__get_intid_range(irq_hw_number_t hwirq)231 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
232 {
233 switch (hwirq) {
234 case 0 ... 15:
235 return SGI_RANGE;
236 case 16 ... 31:
237 return PPI_RANGE;
238 case 32 ... 1019:
239 return SPI_RANGE;
240 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
241 return EPPI_RANGE;
242 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
243 return ESPI_RANGE;
244 case 8192 ... GENMASK(23, 0):
245 return LPI_RANGE;
246 default:
247 return __INVALID_RANGE__;
248 }
249 }
250
get_intid_range(struct irq_data * d)251 static enum gic_intid_range get_intid_range(struct irq_data *d)
252 {
253 return __get_intid_range(d->hwirq);
254 }
255
gic_irq_in_rdist(struct irq_data * d)256 static inline bool gic_irq_in_rdist(struct irq_data *d)
257 {
258 switch (get_intid_range(d)) {
259 case SGI_RANGE:
260 case PPI_RANGE:
261 case EPPI_RANGE:
262 return true;
263 default:
264 return false;
265 }
266 }
267
gic_dist_base_alias(struct irq_data * d)268 static inline void __iomem *gic_dist_base_alias(struct irq_data *d)
269 {
270 if (static_branch_unlikely(&gic_nvidia_t241_erratum)) {
271 irq_hw_number_t hwirq = irqd_to_hwirq(d);
272 u32 chip;
273
274 /*
275 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
276 * registers are directed to the chip that owns the SPI. The
277 * the alias region can also be used for writes to the
278 * GICD_In{E} except GICD_ICENABLERn. Each chip has support
279 * for 320 {E}SPIs. Mappings for all 4 chips:
280 * Chip0 = 32-351
281 * Chip1 = 352-671
282 * Chip2 = 672-991
283 * Chip3 = 4096-4415
284 */
285 switch (__get_intid_range(hwirq)) {
286 case SPI_RANGE:
287 chip = (hwirq - 32) / 320;
288 break;
289 case ESPI_RANGE:
290 chip = 3;
291 break;
292 default:
293 unreachable();
294 }
295 return t241_dist_base_alias[chip];
296 }
297
298 return gic_data.dist_base;
299 }
300
gic_dist_base(struct irq_data * d)301 static inline void __iomem *gic_dist_base(struct irq_data *d)
302 {
303 switch (get_intid_range(d)) {
304 case SGI_RANGE:
305 case PPI_RANGE:
306 case EPPI_RANGE:
307 /* SGI+PPI -> SGI_base for this CPU */
308 return gic_data_rdist_sgi_base();
309
310 case SPI_RANGE:
311 case ESPI_RANGE:
312 /* SPI -> dist_base */
313 return gic_data.dist_base;
314
315 default:
316 return NULL;
317 }
318 }
319
gic_do_wait_for_rwp(void __iomem * base,u32 bit)320 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
321 {
322 u32 val;
323 int ret;
324
325 ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit),
326 1, USEC_PER_SEC);
327 if (ret == -ETIMEDOUT)
328 pr_err_ratelimited("RWP timeout, gone fishing\n");
329 }
330
331 /* Wait for completion of a distributor change */
gic_dist_wait_for_rwp(void)332 static void gic_dist_wait_for_rwp(void)
333 {
334 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
335 }
336
337 /* Wait for completion of a redistributor change */
gic_redist_wait_for_rwp(void)338 static void gic_redist_wait_for_rwp(void)
339 {
340 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
341 }
342
gic_enable_redist(bool enable)343 static void gic_enable_redist(bool enable)
344 {
345 void __iomem *rbase;
346 u32 val;
347 int ret;
348
349 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
350 return;
351
352 rbase = gic_data_rdist_rd_base();
353
354 val = readl_relaxed(rbase + GICR_WAKER);
355 if (enable)
356 /* Wake up this CPU redistributor */
357 val &= ~GICR_WAKER_ProcessorSleep;
358 else
359 val |= GICR_WAKER_ProcessorSleep;
360 writel_relaxed(val, rbase + GICR_WAKER);
361
362 if (!enable) { /* Check that GICR_WAKER is writeable */
363 val = readl_relaxed(rbase + GICR_WAKER);
364 if (!(val & GICR_WAKER_ProcessorSleep))
365 return; /* No PM support in this redistributor */
366 }
367
368 ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val,
369 enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep),
370 1, USEC_PER_SEC);
371 if (ret == -ETIMEDOUT) {
372 pr_err_ratelimited("redistributor failed to %s...\n",
373 enable ? "wakeup" : "sleep");
374 }
375 }
376
377 /*
378 * Routines to disable, enable, EOI and route interrupts
379 */
convert_offset_index(struct irq_data * d,u32 offset,u32 * index)380 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
381 {
382 switch (get_intid_range(d)) {
383 case SGI_RANGE:
384 case PPI_RANGE:
385 case SPI_RANGE:
386 *index = d->hwirq;
387 return offset;
388 case EPPI_RANGE:
389 /*
390 * Contrary to the ESPI range, the EPPI range is contiguous
391 * to the PPI range in the registers, so let's adjust the
392 * displacement accordingly. Consistency is overrated.
393 */
394 *index = d->hwirq - EPPI_BASE_INTID + 32;
395 return offset;
396 case ESPI_RANGE:
397 *index = d->hwirq - ESPI_BASE_INTID;
398 switch (offset) {
399 case GICD_ISENABLER:
400 return GICD_ISENABLERnE;
401 case GICD_ICENABLER:
402 return GICD_ICENABLERnE;
403 case GICD_ISPENDR:
404 return GICD_ISPENDRnE;
405 case GICD_ICPENDR:
406 return GICD_ICPENDRnE;
407 case GICD_ISACTIVER:
408 return GICD_ISACTIVERnE;
409 case GICD_ICACTIVER:
410 return GICD_ICACTIVERnE;
411 case GICD_IPRIORITYR:
412 return GICD_IPRIORITYRnE;
413 case GICD_ICFGR:
414 return GICD_ICFGRnE;
415 case GICD_IROUTER:
416 return GICD_IROUTERnE;
417 default:
418 break;
419 }
420 break;
421 default:
422 break;
423 }
424
425 WARN_ON(1);
426 *index = d->hwirq;
427 return offset;
428 }
429
gic_peek_irq(struct irq_data * d,u32 offset)430 static int gic_peek_irq(struct irq_data *d, u32 offset)
431 {
432 void __iomem *base;
433 u32 index, mask;
434
435 offset = convert_offset_index(d, offset, &index);
436 mask = 1 << (index % 32);
437
438 if (gic_irq_in_rdist(d))
439 base = gic_data_rdist_sgi_base();
440 else
441 base = gic_dist_base_alias(d);
442
443 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
444 }
445
gic_poke_irq(struct irq_data * d,u32 offset)446 static void gic_poke_irq(struct irq_data *d, u32 offset)
447 {
448 void __iomem *base;
449 u32 index, mask;
450
451 offset = convert_offset_index(d, offset, &index);
452 mask = 1 << (index % 32);
453
454 if (gic_irq_in_rdist(d))
455 base = gic_data_rdist_sgi_base();
456 else
457 base = gic_data.dist_base;
458
459 writel_relaxed(mask, base + offset + (index / 32) * 4);
460 }
461
gic_mask_irq(struct irq_data * d)462 static void gic_mask_irq(struct irq_data *d)
463 {
464 gic_poke_irq(d, GICD_ICENABLER);
465 if (gic_irq_in_rdist(d))
466 gic_redist_wait_for_rwp();
467 else
468 gic_dist_wait_for_rwp();
469 }
470
gic_eoimode1_mask_irq(struct irq_data * d)471 static void gic_eoimode1_mask_irq(struct irq_data *d)
472 {
473 gic_mask_irq(d);
474 /*
475 * When masking a forwarded interrupt, make sure it is
476 * deactivated as well.
477 *
478 * This ensures that an interrupt that is getting
479 * disabled/masked will not get "stuck", because there is
480 * noone to deactivate it (guest is being terminated).
481 */
482 if (irqd_is_forwarded_to_vcpu(d))
483 gic_poke_irq(d, GICD_ICACTIVER);
484 }
485
gic_unmask_irq(struct irq_data * d)486 static void gic_unmask_irq(struct irq_data *d)
487 {
488 gic_poke_irq(d, GICD_ISENABLER);
489 }
490
gic_supports_nmi(void)491 static inline bool gic_supports_nmi(void)
492 {
493 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
494 static_branch_likely(&supports_pseudo_nmis);
495 }
496
gic_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool val)497 static int gic_irq_set_irqchip_state(struct irq_data *d,
498 enum irqchip_irq_state which, bool val)
499 {
500 u32 reg;
501
502 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
503 return -EINVAL;
504
505 switch (which) {
506 case IRQCHIP_STATE_PENDING:
507 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
508 break;
509
510 case IRQCHIP_STATE_ACTIVE:
511 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
512 break;
513
514 case IRQCHIP_STATE_MASKED:
515 if (val) {
516 gic_mask_irq(d);
517 return 0;
518 }
519 reg = GICD_ISENABLER;
520 break;
521
522 default:
523 return -EINVAL;
524 }
525
526 gic_poke_irq(d, reg);
527
528 /*
529 * Force read-back to guarantee that the active state has taken
530 * effect, and won't race with a guest-driven deactivation.
531 */
532 if (reg == GICD_ISACTIVER)
533 gic_peek_irq(d, reg);
534 return 0;
535 }
536
gic_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * val)537 static int gic_irq_get_irqchip_state(struct irq_data *d,
538 enum irqchip_irq_state which, bool *val)
539 {
540 if (d->hwirq >= 8192) /* PPI/SPI only */
541 return -EINVAL;
542
543 switch (which) {
544 case IRQCHIP_STATE_PENDING:
545 *val = gic_peek_irq(d, GICD_ISPENDR);
546 break;
547
548 case IRQCHIP_STATE_ACTIVE:
549 *val = gic_peek_irq(d, GICD_ISACTIVER);
550 break;
551
552 case IRQCHIP_STATE_MASKED:
553 *val = !gic_peek_irq(d, GICD_ISENABLER);
554 break;
555
556 default:
557 return -EINVAL;
558 }
559
560 return 0;
561 }
562
gic_irq_set_prio(struct irq_data * d,u8 prio)563 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
564 {
565 void __iomem *base = gic_dist_base(d);
566 u32 offset, index;
567
568 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
569
570 writeb_relaxed(prio, base + offset + index);
571 }
572
__gic_get_ppi_index(irq_hw_number_t hwirq)573 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
574 {
575 switch (__get_intid_range(hwirq)) {
576 case PPI_RANGE:
577 return hwirq - 16;
578 case EPPI_RANGE:
579 return hwirq - EPPI_BASE_INTID + 16;
580 default:
581 unreachable();
582 }
583 }
584
__gic_get_rdist_index(irq_hw_number_t hwirq)585 static u32 __gic_get_rdist_index(irq_hw_number_t hwirq)
586 {
587 switch (__get_intid_range(hwirq)) {
588 case SGI_RANGE:
589 case PPI_RANGE:
590 return hwirq;
591 case EPPI_RANGE:
592 return hwirq - EPPI_BASE_INTID + 32;
593 default:
594 unreachable();
595 }
596 }
597
gic_get_rdist_index(struct irq_data * d)598 static u32 gic_get_rdist_index(struct irq_data *d)
599 {
600 return __gic_get_rdist_index(d->hwirq);
601 }
602
gic_irq_nmi_setup(struct irq_data * d)603 static int gic_irq_nmi_setup(struct irq_data *d)
604 {
605 struct irq_desc *desc = irq_to_desc(d->irq);
606
607 if (!gic_supports_nmi())
608 return -EINVAL;
609
610 if (gic_peek_irq(d, GICD_ISENABLER)) {
611 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
612 return -EINVAL;
613 }
614
615 /*
616 * A secondary irq_chip should be in charge of LPI request,
617 * it should not be possible to get there
618 */
619 if (WARN_ON(irqd_to_hwirq(d) >= 8192))
620 return -EINVAL;
621
622 /* desc lock should already be held */
623 if (gic_irq_in_rdist(d)) {
624 u32 idx = gic_get_rdist_index(d);
625
626 /*
627 * Setting up a percpu interrupt as NMI, only switch handler
628 * for first NMI
629 */
630 if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
631 refcount_set(&rdist_nmi_refs[idx], 1);
632 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
633 }
634 } else {
635 desc->handle_irq = handle_fasteoi_nmi;
636 }
637
638 gic_irq_set_prio(d, dist_prio_nmi);
639
640 return 0;
641 }
642
gic_irq_nmi_teardown(struct irq_data * d)643 static void gic_irq_nmi_teardown(struct irq_data *d)
644 {
645 struct irq_desc *desc = irq_to_desc(d->irq);
646
647 if (WARN_ON(!gic_supports_nmi()))
648 return;
649
650 if (gic_peek_irq(d, GICD_ISENABLER)) {
651 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
652 return;
653 }
654
655 /*
656 * A secondary irq_chip should be in charge of LPI request,
657 * it should not be possible to get there
658 */
659 if (WARN_ON(irqd_to_hwirq(d) >= 8192))
660 return;
661
662 /* desc lock should already be held */
663 if (gic_irq_in_rdist(d)) {
664 u32 idx = gic_get_rdist_index(d);
665
666 /* Tearing down NMI, only switch handler for last NMI */
667 if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
668 desc->handle_irq = handle_percpu_devid_irq;
669 } else {
670 desc->handle_irq = handle_fasteoi_irq;
671 }
672
673 gic_irq_set_prio(d, dist_prio_irq);
674 }
675
gic_arm64_erratum_2941627_needed(struct irq_data * d)676 static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
677 {
678 enum gic_intid_range range;
679
680 if (!static_branch_unlikely(&gic_arm64_2941627_erratum))
681 return false;
682
683 range = get_intid_range(d);
684
685 /*
686 * The workaround is needed if the IRQ is an SPI and
687 * the target cpu is different from the one we are
688 * executing on.
689 */
690 return (range == SPI_RANGE || range == ESPI_RANGE) &&
691 !cpumask_test_cpu(raw_smp_processor_id(),
692 irq_data_get_effective_affinity_mask(d));
693 }
694
gic_eoi_irq(struct irq_data * d)695 static void gic_eoi_irq(struct irq_data *d)
696 {
697 write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1);
698 isb();
699
700 if (gic_arm64_erratum_2941627_needed(d)) {
701 /*
702 * Make sure the GIC stream deactivate packet
703 * issued by ICC_EOIR1_EL1 has completed before
704 * deactivating through GICD_IACTIVER.
705 */
706 dsb(sy);
707 gic_poke_irq(d, GICD_ICACTIVER);
708 }
709 }
710
gic_eoimode1_eoi_irq(struct irq_data * d)711 static void gic_eoimode1_eoi_irq(struct irq_data *d)
712 {
713 /*
714 * No need to deactivate an LPI, or an interrupt that
715 * is is getting forwarded to a vcpu.
716 */
717 if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
718 return;
719
720 if (!gic_arm64_erratum_2941627_needed(d))
721 gic_write_dir(irqd_to_hwirq(d));
722 else
723 gic_poke_irq(d, GICD_ICACTIVER);
724 }
725
gic_set_type(struct irq_data * d,unsigned int type)726 static int gic_set_type(struct irq_data *d, unsigned int type)
727 {
728 irq_hw_number_t irq = irqd_to_hwirq(d);
729 enum gic_intid_range range;
730 void __iomem *base;
731 u32 offset, index;
732 int ret;
733
734 range = get_intid_range(d);
735
736 /* Interrupt configuration for SGIs can't be changed */
737 if (range == SGI_RANGE)
738 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
739
740 /* SPIs have restrictions on the supported types */
741 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
742 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
743 return -EINVAL;
744
745 if (gic_irq_in_rdist(d))
746 base = gic_data_rdist_sgi_base();
747 else
748 base = gic_dist_base_alias(d);
749
750 offset = convert_offset_index(d, GICD_ICFGR, &index);
751
752 ret = gic_configure_irq(index, type, base + offset);
753 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
754 /* Misconfigured PPIs are usually not fatal */
755 pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
756 ret = 0;
757 }
758
759 return ret;
760 }
761
gic_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)762 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
763 {
764 if (get_intid_range(d) == SGI_RANGE)
765 return -EINVAL;
766
767 if (vcpu)
768 irqd_set_forwarded_to_vcpu(d);
769 else
770 irqd_clr_forwarded_to_vcpu(d);
771 return 0;
772 }
773
gic_cpu_to_affinity(int cpu)774 static u64 gic_cpu_to_affinity(int cpu)
775 {
776 u64 mpidr = cpu_logical_map(cpu);
777 u64 aff;
778
779 /* ASR8601 needs to have its affinities shifted down... */
780 if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
781 mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) |
782 (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
783
784 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
785 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
786 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
787 MPIDR_AFFINITY_LEVEL(mpidr, 0));
788
789 return aff;
790 }
791
gic_deactivate_unhandled(u32 irqnr)792 static void gic_deactivate_unhandled(u32 irqnr)
793 {
794 if (static_branch_likely(&supports_deactivate_key)) {
795 if (irqnr < 8192)
796 gic_write_dir(irqnr);
797 } else {
798 write_gicreg(irqnr, ICC_EOIR1_EL1);
799 isb();
800 }
801 }
802
803 /*
804 * Follow a read of the IAR with any HW maintenance that needs to happen prior
805 * to invoking the relevant IRQ handler. We must do two things:
806 *
807 * (1) Ensure instruction ordering between a read of IAR and subsequent
808 * instructions in the IRQ handler using an ISB.
809 *
810 * It is possible for the IAR to report an IRQ which was signalled *after*
811 * the CPU took an IRQ exception as multiple interrupts can race to be
812 * recognized by the GIC, earlier interrupts could be withdrawn, and/or
813 * later interrupts could be prioritized by the GIC.
814 *
815 * For devices which are tightly coupled to the CPU, such as PMUs, a
816 * context synchronization event is necessary to ensure that system
817 * register state is not stale, as these may have been indirectly written
818 * *after* exception entry.
819 *
820 * (2) Deactivate the interrupt when EOI mode 1 is in use.
821 */
gic_complete_ack(u32 irqnr)822 static inline void gic_complete_ack(u32 irqnr)
823 {
824 if (static_branch_likely(&supports_deactivate_key))
825 write_gicreg(irqnr, ICC_EOIR1_EL1);
826
827 isb();
828 }
829
gic_rpr_is_nmi_prio(void)830 static bool gic_rpr_is_nmi_prio(void)
831 {
832 if (!gic_supports_nmi())
833 return false;
834
835 return unlikely(gic_read_rpr() == GICV3_PRIO_NMI);
836 }
837
gic_irqnr_is_special(u32 irqnr)838 static bool gic_irqnr_is_special(u32 irqnr)
839 {
840 return irqnr >= 1020 && irqnr <= 1023;
841 }
842
__gic_handle_irq(u32 irqnr,struct pt_regs * regs)843 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
844 {
845 if (gic_irqnr_is_special(irqnr))
846 return;
847
848 gic_complete_ack(irqnr);
849
850 if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
851 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
852 gic_deactivate_unhandled(irqnr);
853 }
854 }
855
__gic_handle_nmi(u32 irqnr,struct pt_regs * regs)856 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
857 {
858 if (gic_irqnr_is_special(irqnr))
859 return;
860
861 gic_complete_ack(irqnr);
862
863 if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
864 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
865 gic_deactivate_unhandled(irqnr);
866 }
867 }
868
869 /*
870 * An exception has been taken from a context with IRQs enabled, and this could
871 * be an IRQ or an NMI.
872 *
873 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
874 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
875 * after handling any NMI but before handling any IRQ.
876 *
877 * The entry code has performed IRQ entry, and if an NMI is detected we must
878 * perform NMI entry/exit around invoking the handler.
879 */
__gic_handle_irq_from_irqson(struct pt_regs * regs)880 static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
881 {
882 bool is_nmi;
883 u32 irqnr;
884
885 irqnr = gic_read_iar();
886
887 is_nmi = gic_rpr_is_nmi_prio();
888
889 if (is_nmi) {
890 nmi_enter();
891 __gic_handle_nmi(irqnr, regs);
892 nmi_exit();
893 }
894
895 if (gic_prio_masking_enabled()) {
896 gic_pmr_mask_irqs();
897 gic_arch_enable_irqs();
898 }
899
900 if (!is_nmi)
901 __gic_handle_irq(irqnr, regs);
902 }
903
904 /*
905 * An exception has been taken from a context with IRQs disabled, which can only
906 * be an NMI.
907 *
908 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
909 * DAIF.IF (and ICC_PMR_EL1) unchanged.
910 *
911 * The entry code has performed NMI entry.
912 */
__gic_handle_irq_from_irqsoff(struct pt_regs * regs)913 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
914 {
915 u64 pmr;
916 u32 irqnr;
917
918 /*
919 * We were in a context with IRQs disabled. However, the
920 * entry code has set PMR to a value that allows any
921 * interrupt to be acknowledged, and not just NMIs. This can
922 * lead to surprising effects if the NMI has been retired in
923 * the meantime, and that there is an IRQ pending. The IRQ
924 * would then be taken in NMI context, something that nobody
925 * wants to debug twice.
926 *
927 * Until we sort this, drop PMR again to a level that will
928 * actually only allow NMIs before reading IAR, and then
929 * restore it to what it was.
930 */
931 pmr = gic_read_pmr();
932 gic_pmr_mask_irqs();
933 isb();
934 irqnr = gic_read_iar();
935 gic_write_pmr(pmr);
936
937 __gic_handle_nmi(irqnr, regs);
938 }
939
gic_handle_irq(struct pt_regs * regs)940 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
941 {
942 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
943 __gic_handle_irq_from_irqsoff(regs);
944 else
945 __gic_handle_irq_from_irqson(regs);
946 }
947
gic_dist_init(void)948 static void __init gic_dist_init(void)
949 {
950 unsigned int i;
951 u64 affinity;
952 void __iomem *base = gic_data.dist_base;
953 u32 val;
954
955 /* Disable the distributor */
956 writel_relaxed(0, base + GICD_CTLR);
957 gic_dist_wait_for_rwp();
958
959 /*
960 * Configure SPIs as non-secure Group-1. This will only matter
961 * if the GIC only has a single security state. This will not
962 * do the right thing if the kernel is running in secure mode,
963 * but that's not the intended use case anyway.
964 */
965 for (i = 32; i < GIC_LINE_NR; i += 32)
966 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
967
968 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
969 for (i = 0; i < GIC_ESPI_NR; i += 32) {
970 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
971 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
972 }
973
974 for (i = 0; i < GIC_ESPI_NR; i += 32)
975 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
976
977 for (i = 0; i < GIC_ESPI_NR; i += 16)
978 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
979
980 for (i = 0; i < GIC_ESPI_NR; i += 4)
981 writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq),
982 base + GICD_IPRIORITYRnE + i);
983
984 /* Now do the common stuff */
985 gic_dist_config(base, GIC_LINE_NR, dist_prio_irq);
986
987 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
988 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
989 pr_info("Enabling SGIs without active state\n");
990 val |= GICD_CTLR_nASSGIreq;
991 }
992
993 /* Enable distributor with ARE, Group1, and wait for it to drain */
994 writel_relaxed(val, base + GICD_CTLR);
995 gic_dist_wait_for_rwp();
996
997 /*
998 * Set all global interrupts to the boot CPU only. ARE must be
999 * enabled.
1000 */
1001 affinity = gic_cpu_to_affinity(smp_processor_id());
1002 for (i = 32; i < GIC_LINE_NR; i++)
1003 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
1004
1005 for (i = 0; i < GIC_ESPI_NR; i++)
1006 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
1007 }
1008
gic_iterate_rdists(int (* fn)(struct redist_region *,void __iomem *))1009 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
1010 {
1011 int ret = -ENODEV;
1012 int i;
1013
1014 for (i = 0; i < gic_data.nr_redist_regions; i++) {
1015 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
1016 u64 typer;
1017 u32 reg;
1018
1019 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
1020 if (reg != GIC_PIDR2_ARCH_GICv3 &&
1021 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
1022 pr_warn("No redistributor present @%p\n", ptr);
1023 break;
1024 }
1025
1026 do {
1027 typer = gic_read_typer(ptr + GICR_TYPER);
1028 ret = fn(gic_data.redist_regions + i, ptr);
1029 if (!ret)
1030 return 0;
1031
1032 if (gic_data.redist_regions[i].single_redist)
1033 break;
1034
1035 if (gic_data.redist_stride) {
1036 ptr += gic_data.redist_stride;
1037 } else {
1038 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
1039 if (typer & GICR_TYPER_VLPIS)
1040 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
1041 }
1042 } while (!(typer & GICR_TYPER_LAST));
1043 }
1044
1045 return ret ? -ENODEV : 0;
1046 }
1047
__gic_populate_rdist(struct redist_region * region,void __iomem * ptr)1048 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
1049 {
1050 unsigned long mpidr;
1051 u64 typer;
1052 u32 aff;
1053
1054 /*
1055 * Convert affinity to a 32bit value that can be matched to
1056 * GICR_TYPER bits [63:32].
1057 */
1058 mpidr = gic_cpu_to_affinity(smp_processor_id());
1059
1060 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
1061 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
1062 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
1063 MPIDR_AFFINITY_LEVEL(mpidr, 0));
1064
1065 typer = gic_read_typer(ptr + GICR_TYPER);
1066 if ((typer >> 32) == aff) {
1067 u64 offset = ptr - region->redist_base;
1068 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
1069 gic_data_rdist_rd_base() = ptr;
1070 gic_data_rdist()->phys_base = region->phys_base + offset;
1071
1072 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
1073 smp_processor_id(), mpidr,
1074 (int)(region - gic_data.redist_regions),
1075 &gic_data_rdist()->phys_base);
1076 return 0;
1077 }
1078
1079 /* Try next one */
1080 return 1;
1081 }
1082
gic_populate_rdist(void)1083 static int gic_populate_rdist(void)
1084 {
1085 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
1086 return 0;
1087
1088 /* We couldn't even deal with ourselves... */
1089 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
1090 smp_processor_id(),
1091 (unsigned long)cpu_logical_map(smp_processor_id()));
1092 return -ENODEV;
1093 }
1094
__gic_update_rdist_properties(struct redist_region * region,void __iomem * ptr)1095 static int __gic_update_rdist_properties(struct redist_region *region,
1096 void __iomem *ptr)
1097 {
1098 u64 typer = gic_read_typer(ptr + GICR_TYPER);
1099 u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
1100
1101 /* Boot-time cleanup */
1102 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
1103 u64 val;
1104
1105 /* Deactivate any present vPE */
1106 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
1107 if (val & GICR_VPENDBASER_Valid)
1108 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
1109 ptr + SZ_128K + GICR_VPENDBASER);
1110
1111 /* Mark the VPE table as invalid */
1112 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
1113 val &= ~GICR_VPROPBASER_4_1_VALID;
1114 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
1115 }
1116
1117 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
1118
1119 /*
1120 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
1121 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
1122 * that the ITS driver can make use of for LPIs (and not VLPIs).
1123 *
1124 * These are 3 different ways to express the same thing, depending
1125 * on the revision of the architecture and its relaxations over
1126 * time. Just group them under the 'direct_lpi' banner.
1127 */
1128 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1129 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
1130 !!(ctlr & GICR_CTLR_IR) |
1131 gic_data.rdists.has_rvpeid);
1132 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
1133
1134 /* Detect non-sensical configurations */
1135 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1136 gic_data.rdists.has_direct_lpi = false;
1137 gic_data.rdists.has_vlpis = false;
1138 gic_data.rdists.has_rvpeid = false;
1139 }
1140
1141 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
1142
1143 return 1;
1144 }
1145
gic_update_rdist_properties(void)1146 static void gic_update_rdist_properties(void)
1147 {
1148 gic_data.ppi_nr = UINT_MAX;
1149 gic_iterate_rdists(__gic_update_rdist_properties);
1150 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1151 gic_data.ppi_nr = 0;
1152 pr_info("GICv3 features: %d PPIs%s%s\n",
1153 gic_data.ppi_nr,
1154 gic_data.has_rss ? ", RSS" : "",
1155 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1156
1157 if (gic_data.rdists.has_vlpis)
1158 pr_info("GICv4 features: %s%s%s\n",
1159 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1160 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1161 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
1162 }
1163
gic_cpu_sys_reg_enable(void)1164 static void gic_cpu_sys_reg_enable(void)
1165 {
1166 /*
1167 * Need to check that the SRE bit has actually been set. If
1168 * not, it means that SRE is disabled at EL2. We're going to
1169 * die painfully, and there is nothing we can do about it.
1170 *
1171 * Kindly inform the luser.
1172 */
1173 if (!gic_enable_sre())
1174 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1175
1176 }
1177
gic_cpu_sys_reg_init(void)1178 static void gic_cpu_sys_reg_init(void)
1179 {
1180 int i, cpu = smp_processor_id();
1181 u64 mpidr = gic_cpu_to_affinity(cpu);
1182 u64 need_rss = MPIDR_RS(mpidr);
1183 bool group0;
1184 u32 pribits;
1185
1186 pribits = gic_get_pribits();
1187
1188 group0 = gic_has_group0();
1189
1190 /* Set priority mask register */
1191 if (!gic_prio_masking_enabled()) {
1192 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1193 } else if (gic_supports_nmi()) {
1194 /*
1195 * Check that all CPUs use the same priority space.
1196 *
1197 * If there's a mismatch with the boot CPU, the system is
1198 * likely to die as interrupt masking will not work properly on
1199 * all CPUs.
1200 */
1201 WARN_ON(group0 != cpus_have_group0);
1202 WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled);
1203 }
1204
1205 /*
1206 * Some firmwares hand over to the kernel with the BPR changed from
1207 * its reset value (and with a value large enough to prevent
1208 * any pre-emptive interrupts from working at all). Writing a zero
1209 * to BPR restores is reset value.
1210 */
1211 gic_write_bpr1(0);
1212
1213 if (static_branch_likely(&supports_deactivate_key)) {
1214 /* EOI drops priority only (mode 1) */
1215 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1216 } else {
1217 /* EOI deactivates interrupt too (mode 0) */
1218 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1219 }
1220
1221 /* Always whack Group0 before Group1 */
1222 if (group0) {
1223 switch(pribits) {
1224 case 8:
1225 case 7:
1226 write_gicreg(0, ICC_AP0R3_EL1);
1227 write_gicreg(0, ICC_AP0R2_EL1);
1228 fallthrough;
1229 case 6:
1230 write_gicreg(0, ICC_AP0R1_EL1);
1231 fallthrough;
1232 case 5:
1233 case 4:
1234 write_gicreg(0, ICC_AP0R0_EL1);
1235 }
1236
1237 isb();
1238 }
1239
1240 switch(pribits) {
1241 case 8:
1242 case 7:
1243 write_gicreg(0, ICC_AP1R3_EL1);
1244 write_gicreg(0, ICC_AP1R2_EL1);
1245 fallthrough;
1246 case 6:
1247 write_gicreg(0, ICC_AP1R1_EL1);
1248 fallthrough;
1249 case 5:
1250 case 4:
1251 write_gicreg(0, ICC_AP1R0_EL1);
1252 }
1253
1254 isb();
1255
1256 /* ... and let's hit the road... */
1257 gic_write_grpen1(1);
1258
1259 /* Keep the RSS capability status in per_cpu variable */
1260 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1261
1262 /* Check all the CPUs have capable of sending SGIs to other CPUs */
1263 for_each_online_cpu(i) {
1264 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1265
1266 need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
1267 if (need_rss && (!have_rss))
1268 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1269 cpu, (unsigned long)mpidr,
1270 i, (unsigned long)gic_cpu_to_affinity(i));
1271 }
1272
1273 /**
1274 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1275 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1276 * UNPREDICTABLE choice of :
1277 * - The write is ignored.
1278 * - The RS field is treated as 0.
1279 */
1280 if (need_rss && (!gic_data.has_rss))
1281 pr_crit_once("RSS is required but GICD doesn't support it\n");
1282 }
1283
1284 static bool gicv3_nolpi;
1285
gicv3_nolpi_cfg(char * buf)1286 static int __init gicv3_nolpi_cfg(char *buf)
1287 {
1288 return kstrtobool(buf, &gicv3_nolpi);
1289 }
1290 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1291
gic_dist_supports_lpis(void)1292 static int gic_dist_supports_lpis(void)
1293 {
1294 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1295 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1296 !gicv3_nolpi);
1297 }
1298
gic_cpu_init(void)1299 static void gic_cpu_init(void)
1300 {
1301 void __iomem *rbase;
1302 int i;
1303
1304 /* Register ourselves with the rest of the world */
1305 if (gic_populate_rdist())
1306 return;
1307
1308 gic_enable_redist(true);
1309
1310 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1311 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1312 "Distributor has extended ranges, but CPU%d doesn't\n",
1313 smp_processor_id());
1314
1315 rbase = gic_data_rdist_sgi_base();
1316
1317 /* Configure SGIs/PPIs as non-secure Group-1 */
1318 for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32)
1319 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1320
1321 gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq);
1322 gic_redist_wait_for_rwp();
1323
1324 /* initialise system registers */
1325 gic_cpu_sys_reg_init();
1326 }
1327
1328 #ifdef CONFIG_SMP
1329
1330 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1331 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1332
1333 /*
1334 * gic_starting_cpu() is called after the last point where cpuhp is allowed
1335 * to fail. So pre check for problems earlier.
1336 */
gic_check_rdist(unsigned int cpu)1337 static int gic_check_rdist(unsigned int cpu)
1338 {
1339 if (cpumask_test_cpu(cpu, &broken_rdists))
1340 return -EINVAL;
1341
1342 return 0;
1343 }
1344
gic_starting_cpu(unsigned int cpu)1345 static int gic_starting_cpu(unsigned int cpu)
1346 {
1347 gic_cpu_sys_reg_enable();
1348 gic_cpu_init();
1349
1350 if (gic_dist_supports_lpis())
1351 its_cpu_init();
1352
1353 return 0;
1354 }
1355
gic_compute_target_list(int * base_cpu,const struct cpumask * mask,unsigned long cluster_id)1356 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1357 unsigned long cluster_id)
1358 {
1359 int next_cpu, cpu = *base_cpu;
1360 unsigned long mpidr;
1361 u16 tlist = 0;
1362
1363 mpidr = gic_cpu_to_affinity(cpu);
1364
1365 while (cpu < nr_cpu_ids) {
1366 tlist |= 1 << (mpidr & 0xf);
1367
1368 next_cpu = cpumask_next(cpu, mask);
1369 if (next_cpu >= nr_cpu_ids)
1370 goto out;
1371 cpu = next_cpu;
1372
1373 mpidr = gic_cpu_to_affinity(cpu);
1374
1375 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1376 cpu--;
1377 goto out;
1378 }
1379 }
1380 out:
1381 *base_cpu = cpu;
1382 return tlist;
1383 }
1384
1385 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1386 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1387 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1388
gic_send_sgi(u64 cluster_id,u16 tlist,unsigned int irq)1389 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1390 {
1391 u64 val;
1392
1393 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1394 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1395 irq << ICC_SGI1R_SGI_ID_SHIFT |
1396 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1397 MPIDR_TO_SGI_RS(cluster_id) |
1398 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1399
1400 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1401 gic_write_sgi1r(val);
1402 }
1403
gic_ipi_send_mask(struct irq_data * d,const struct cpumask * mask)1404 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1405 {
1406 int cpu;
1407
1408 if (WARN_ON(d->hwirq >= 16))
1409 return;
1410
1411 /*
1412 * Ensure that stores to Normal memory are visible to the
1413 * other CPUs before issuing the IPI.
1414 */
1415 dsb(ishst);
1416
1417 for_each_cpu(cpu, mask) {
1418 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
1419 u16 tlist;
1420
1421 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1422 gic_send_sgi(cluster_id, tlist, d->hwirq);
1423 }
1424
1425 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1426 isb();
1427 }
1428
gic_smp_init(void)1429 static void __init gic_smp_init(void)
1430 {
1431 struct irq_fwspec sgi_fwspec = {
1432 .fwnode = gic_data.fwnode,
1433 .param_count = 1,
1434 };
1435 int base_sgi;
1436
1437 cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
1438 "irqchip/arm/gicv3:checkrdist",
1439 gic_check_rdist, NULL);
1440
1441 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1442 "irqchip/arm/gicv3:starting",
1443 gic_starting_cpu, NULL);
1444
1445 /* Register all 8 non-secure SGIs */
1446 base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
1447 if (WARN_ON(base_sgi <= 0))
1448 return;
1449
1450 set_smp_ipi_range(base_sgi, 8);
1451 }
1452
gic_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)1453 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1454 bool force)
1455 {
1456 unsigned int cpu;
1457 u32 offset, index;
1458 void __iomem *reg;
1459 int enabled;
1460 u64 val;
1461
1462 if (force)
1463 cpu = cpumask_first(mask_val);
1464 else
1465 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1466
1467 if (cpu >= nr_cpu_ids)
1468 return -EINVAL;
1469
1470 if (gic_irq_in_rdist(d))
1471 return -EINVAL;
1472
1473 /* If interrupt was enabled, disable it first */
1474 enabled = gic_peek_irq(d, GICD_ISENABLER);
1475 if (enabled)
1476 gic_mask_irq(d);
1477
1478 offset = convert_offset_index(d, GICD_IROUTER, &index);
1479 reg = gic_dist_base(d) + offset + (index * 8);
1480 val = gic_cpu_to_affinity(cpu);
1481
1482 gic_write_irouter(val, reg);
1483
1484 /*
1485 * If the interrupt was enabled, enabled it again. Otherwise,
1486 * just wait for the distributor to have digested our changes.
1487 */
1488 if (enabled)
1489 gic_unmask_irq(d);
1490
1491 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1492
1493 return IRQ_SET_MASK_OK_DONE;
1494 }
1495 #else
1496 #define gic_set_affinity NULL
1497 #define gic_ipi_send_mask NULL
1498 #define gic_smp_init() do { } while(0)
1499 #endif
1500
gic_retrigger(struct irq_data * data)1501 static int gic_retrigger(struct irq_data *data)
1502 {
1503 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1504 }
1505
1506 #ifdef CONFIG_CPU_PM
gic_cpu_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)1507 static int gic_cpu_pm_notifier(struct notifier_block *self,
1508 unsigned long cmd, void *v)
1509 {
1510 if (cmd == CPU_PM_EXIT) {
1511 if (gic_dist_security_disabled())
1512 gic_enable_redist(true);
1513 gic_cpu_sys_reg_enable();
1514 gic_cpu_sys_reg_init();
1515 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1516 gic_write_grpen1(0);
1517 gic_enable_redist(false);
1518 }
1519 return NOTIFY_OK;
1520 }
1521
1522 static struct notifier_block gic_cpu_pm_notifier_block = {
1523 .notifier_call = gic_cpu_pm_notifier,
1524 };
1525
gic_cpu_pm_init(void)1526 static void gic_cpu_pm_init(void)
1527 {
1528 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1529 }
1530
1531 #else
gic_cpu_pm_init(void)1532 static inline void gic_cpu_pm_init(void) { }
1533 #endif /* CONFIG_CPU_PM */
1534
1535 static struct irq_chip gic_chip = {
1536 .name = "GICv3",
1537 .irq_mask = gic_mask_irq,
1538 .irq_unmask = gic_unmask_irq,
1539 .irq_eoi = gic_eoi_irq,
1540 .irq_set_type = gic_set_type,
1541 .irq_set_affinity = gic_set_affinity,
1542 .irq_retrigger = gic_retrigger,
1543 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1544 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1545 .irq_nmi_setup = gic_irq_nmi_setup,
1546 .irq_nmi_teardown = gic_irq_nmi_teardown,
1547 .ipi_send_mask = gic_ipi_send_mask,
1548 .flags = IRQCHIP_SET_TYPE_MASKED |
1549 IRQCHIP_SKIP_SET_WAKE |
1550 IRQCHIP_MASK_ON_SUSPEND,
1551 };
1552
1553 static struct irq_chip gic_eoimode1_chip = {
1554 .name = "GICv3",
1555 .irq_mask = gic_eoimode1_mask_irq,
1556 .irq_unmask = gic_unmask_irq,
1557 .irq_eoi = gic_eoimode1_eoi_irq,
1558 .irq_set_type = gic_set_type,
1559 .irq_set_affinity = gic_set_affinity,
1560 .irq_retrigger = gic_retrigger,
1561 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1562 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1563 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1564 .irq_nmi_setup = gic_irq_nmi_setup,
1565 .irq_nmi_teardown = gic_irq_nmi_teardown,
1566 .ipi_send_mask = gic_ipi_send_mask,
1567 .flags = IRQCHIP_SET_TYPE_MASKED |
1568 IRQCHIP_SKIP_SET_WAKE |
1569 IRQCHIP_MASK_ON_SUSPEND,
1570 };
1571
gic_irq_domain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)1572 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1573 irq_hw_number_t hw)
1574 {
1575 struct irq_chip *chip = &gic_chip;
1576 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1577
1578 if (static_branch_likely(&supports_deactivate_key))
1579 chip = &gic_eoimode1_chip;
1580
1581 switch (__get_intid_range(hw)) {
1582 case SGI_RANGE:
1583 case PPI_RANGE:
1584 case EPPI_RANGE:
1585 irq_set_percpu_devid(irq);
1586 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1587 handle_percpu_devid_irq, NULL, NULL);
1588 break;
1589
1590 case SPI_RANGE:
1591 case ESPI_RANGE:
1592 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1593 handle_fasteoi_irq, NULL, NULL);
1594 irq_set_probe(irq);
1595 irqd_set_single_target(irqd);
1596 break;
1597
1598 case LPI_RANGE:
1599 if (!gic_dist_supports_lpis())
1600 return -EPERM;
1601 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1602 handle_fasteoi_irq, NULL, NULL);
1603 break;
1604
1605 default:
1606 return -EPERM;
1607 }
1608
1609 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1610 irqd_set_handle_enforce_irqctx(irqd);
1611 return 0;
1612 }
1613
gic_irq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)1614 static int gic_irq_domain_translate(struct irq_domain *d,
1615 struct irq_fwspec *fwspec,
1616 unsigned long *hwirq,
1617 unsigned int *type)
1618 {
1619 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1620 *hwirq = fwspec->param[0];
1621 *type = IRQ_TYPE_EDGE_RISING;
1622 return 0;
1623 }
1624
1625 if (is_of_node(fwspec->fwnode)) {
1626 if (fwspec->param_count < 3)
1627 return -EINVAL;
1628
1629 switch (fwspec->param[0]) {
1630 case 0: /* SPI */
1631 *hwirq = fwspec->param[1] + 32;
1632 break;
1633 case 1: /* PPI */
1634 *hwirq = fwspec->param[1] + 16;
1635 break;
1636 case 2: /* ESPI */
1637 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1638 break;
1639 case 3: /* EPPI */
1640 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1641 break;
1642 case GIC_IRQ_TYPE_LPI: /* LPI */
1643 *hwirq = fwspec->param[1];
1644 break;
1645 case GIC_IRQ_TYPE_PARTITION:
1646 *hwirq = fwspec->param[1];
1647 if (fwspec->param[1] >= 16)
1648 *hwirq += EPPI_BASE_INTID - 16;
1649 else
1650 *hwirq += 16;
1651 break;
1652 default:
1653 return -EINVAL;
1654 }
1655
1656 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1657
1658 /*
1659 * Make it clear that broken DTs are... broken.
1660 * Partitioned PPIs are an unfortunate exception.
1661 */
1662 WARN_ON(*type == IRQ_TYPE_NONE &&
1663 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1664 return 0;
1665 }
1666
1667 if (is_fwnode_irqchip(fwspec->fwnode)) {
1668 if(fwspec->param_count != 2)
1669 return -EINVAL;
1670
1671 if (fwspec->param[0] < 16) {
1672 pr_err(FW_BUG "Illegal GSI%d translation request\n",
1673 fwspec->param[0]);
1674 return -EINVAL;
1675 }
1676
1677 *hwirq = fwspec->param[0];
1678 *type = fwspec->param[1];
1679
1680 WARN_ON(*type == IRQ_TYPE_NONE);
1681 return 0;
1682 }
1683
1684 return -EINVAL;
1685 }
1686
gic_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)1687 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1688 unsigned int nr_irqs, void *arg)
1689 {
1690 int i, ret;
1691 irq_hw_number_t hwirq;
1692 unsigned int type = IRQ_TYPE_NONE;
1693 struct irq_fwspec *fwspec = arg;
1694
1695 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1696 if (ret)
1697 return ret;
1698
1699 for (i = 0; i < nr_irqs; i++) {
1700 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1701 if (ret)
1702 return ret;
1703 }
1704
1705 return 0;
1706 }
1707
gic_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1708 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1709 unsigned int nr_irqs)
1710 {
1711 int i;
1712
1713 for (i = 0; i < nr_irqs; i++) {
1714 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1715 irq_set_handler(virq + i, NULL);
1716 irq_domain_reset_irq_data(d);
1717 }
1718 }
1719
fwspec_is_partitioned_ppi(struct irq_fwspec * fwspec,irq_hw_number_t hwirq)1720 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1721 irq_hw_number_t hwirq)
1722 {
1723 enum gic_intid_range range;
1724
1725 if (!gic_data.ppi_descs)
1726 return false;
1727
1728 if (!is_of_node(fwspec->fwnode))
1729 return false;
1730
1731 if (fwspec->param_count < 4 || !fwspec->param[3])
1732 return false;
1733
1734 range = __get_intid_range(hwirq);
1735 if (range != PPI_RANGE && range != EPPI_RANGE)
1736 return false;
1737
1738 return true;
1739 }
1740
gic_irq_domain_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)1741 static int gic_irq_domain_select(struct irq_domain *d,
1742 struct irq_fwspec *fwspec,
1743 enum irq_domain_bus_token bus_token)
1744 {
1745 unsigned int type, ret, ppi_idx;
1746 irq_hw_number_t hwirq;
1747
1748 /* Not for us */
1749 if (fwspec->fwnode != d->fwnode)
1750 return 0;
1751
1752 /* Handle pure domain searches */
1753 if (!fwspec->param_count)
1754 return d->bus_token == bus_token;
1755
1756 /* If this is not DT, then we have a single domain */
1757 if (!is_of_node(fwspec->fwnode))
1758 return 1;
1759
1760 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1761 if (WARN_ON_ONCE(ret))
1762 return 0;
1763
1764 if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1765 return d == gic_data.domain;
1766
1767 /*
1768 * If this is a PPI and we have a 4th (non-null) parameter,
1769 * then we need to match the partition domain.
1770 */
1771 ppi_idx = __gic_get_ppi_index(hwirq);
1772 return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
1773 }
1774
1775 static const struct irq_domain_ops gic_irq_domain_ops = {
1776 .translate = gic_irq_domain_translate,
1777 .alloc = gic_irq_domain_alloc,
1778 .free = gic_irq_domain_free,
1779 .select = gic_irq_domain_select,
1780 };
1781
partition_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)1782 static int partition_domain_translate(struct irq_domain *d,
1783 struct irq_fwspec *fwspec,
1784 unsigned long *hwirq,
1785 unsigned int *type)
1786 {
1787 unsigned long ppi_intid;
1788 struct device_node *np;
1789 unsigned int ppi_idx;
1790 int ret;
1791
1792 if (!gic_data.ppi_descs)
1793 return -ENOMEM;
1794
1795 np = of_find_node_by_phandle(fwspec->param[3]);
1796 if (WARN_ON(!np))
1797 return -EINVAL;
1798
1799 ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1800 if (WARN_ON_ONCE(ret))
1801 return 0;
1802
1803 ppi_idx = __gic_get_ppi_index(ppi_intid);
1804 ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
1805 of_node_to_fwnode(np));
1806 if (ret < 0)
1807 return ret;
1808
1809 *hwirq = ret;
1810 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1811
1812 return 0;
1813 }
1814
1815 static const struct irq_domain_ops partition_domain_ops = {
1816 .translate = partition_domain_translate,
1817 .select = gic_irq_domain_select,
1818 };
1819
gic_enable_quirk_msm8996(void * data)1820 static bool gic_enable_quirk_msm8996(void *data)
1821 {
1822 struct gic_chip_data *d = data;
1823
1824 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1825
1826 return true;
1827 }
1828
gic_enable_quirk_cavium_38539(void * data)1829 static bool gic_enable_quirk_cavium_38539(void *data)
1830 {
1831 struct gic_chip_data *d = data;
1832
1833 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1834
1835 return true;
1836 }
1837
gic_enable_quirk_hip06_07(void * data)1838 static bool gic_enable_quirk_hip06_07(void *data)
1839 {
1840 struct gic_chip_data *d = data;
1841
1842 /*
1843 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1844 * not being an actual ARM implementation). The saving grace is
1845 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1846 * HIP07 doesn't even have a proper IIDR, and still pretends to
1847 * have ESPI. In both cases, put them right.
1848 */
1849 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1850 /* Zero both ESPI and the RES0 field next to it... */
1851 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1852 return true;
1853 }
1854
1855 return false;
1856 }
1857
1858 #define T241_CHIPN_MASK GENMASK_ULL(45, 44)
1859 #define T241_CHIP_GICDA_OFFSET 0x1580000
1860 #define SMCCC_SOC_ID_T241 0x036b0241
1861
gic_enable_quirk_nvidia_t241(void * data)1862 static bool gic_enable_quirk_nvidia_t241(void *data)
1863 {
1864 s32 soc_id = arm_smccc_get_soc_id_version();
1865 unsigned long chip_bmask = 0;
1866 phys_addr_t phys;
1867 u32 i;
1868
1869 /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
1870 if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241))
1871 return false;
1872
1873 /* Find the chips based on GICR regions PHYS addr */
1874 for (i = 0; i < gic_data.nr_redist_regions; i++) {
1875 chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
1876 (u64)gic_data.redist_regions[i].phys_base));
1877 }
1878
1879 if (hweight32(chip_bmask) < 3)
1880 return false;
1881
1882 /* Setup GICD alias regions */
1883 for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) {
1884 if (chip_bmask & BIT(i)) {
1885 phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
1886 phys |= FIELD_PREP(T241_CHIPN_MASK, i);
1887 t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
1888 WARN_ON_ONCE(!t241_dist_base_alias[i]);
1889 }
1890 }
1891 static_branch_enable(&gic_nvidia_t241_erratum);
1892 return true;
1893 }
1894
gic_enable_quirk_asr8601(void * data)1895 static bool gic_enable_quirk_asr8601(void *data)
1896 {
1897 struct gic_chip_data *d = data;
1898
1899 d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
1900
1901 return true;
1902 }
1903
gic_enable_quirk_arm64_2941627(void * data)1904 static bool gic_enable_quirk_arm64_2941627(void *data)
1905 {
1906 static_branch_enable(&gic_arm64_2941627_erratum);
1907 return true;
1908 }
1909
rd_set_non_coherent(void * data)1910 static bool rd_set_non_coherent(void *data)
1911 {
1912 struct gic_chip_data *d = data;
1913
1914 d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
1915 return true;
1916 }
1917
1918 static const struct gic_quirk gic_quirks[] = {
1919 {
1920 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1921 .compatible = "qcom,msm8996-gic-v3",
1922 .init = gic_enable_quirk_msm8996,
1923 },
1924 {
1925 .desc = "GICv3: ASR erratum 8601001",
1926 .compatible = "asr,asr8601-gic-v3",
1927 .init = gic_enable_quirk_asr8601,
1928 },
1929 {
1930 .desc = "GICv3: HIP06 erratum 161010803",
1931 .iidr = 0x0204043b,
1932 .mask = 0xffffffff,
1933 .init = gic_enable_quirk_hip06_07,
1934 },
1935 {
1936 .desc = "GICv3: HIP07 erratum 161010803",
1937 .iidr = 0x00000000,
1938 .mask = 0xffffffff,
1939 .init = gic_enable_quirk_hip06_07,
1940 },
1941 {
1942 /*
1943 * Reserved register accesses generate a Synchronous
1944 * External Abort. This erratum applies to:
1945 * - ThunderX: CN88xx
1946 * - OCTEON TX: CN83xx, CN81xx
1947 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1948 */
1949 .desc = "GICv3: Cavium erratum 38539",
1950 .iidr = 0xa000034c,
1951 .mask = 0xe8f00fff,
1952 .init = gic_enable_quirk_cavium_38539,
1953 },
1954 {
1955 .desc = "GICv3: NVIDIA erratum T241-FABRIC-4",
1956 .iidr = 0x0402043b,
1957 .mask = 0xffffffff,
1958 .init = gic_enable_quirk_nvidia_t241,
1959 },
1960 {
1961 /*
1962 * GIC-700: 2941627 workaround - IP variant [0,1]
1963 *
1964 */
1965 .desc = "GICv3: ARM64 erratum 2941627",
1966 .iidr = 0x0400043b,
1967 .mask = 0xff0e0fff,
1968 .init = gic_enable_quirk_arm64_2941627,
1969 },
1970 {
1971 /*
1972 * GIC-700: 2941627 workaround - IP variant [2]
1973 */
1974 .desc = "GICv3: ARM64 erratum 2941627",
1975 .iidr = 0x0402043b,
1976 .mask = 0xff0f0fff,
1977 .init = gic_enable_quirk_arm64_2941627,
1978 },
1979 {
1980 .desc = "GICv3: non-coherent attribute",
1981 .property = "dma-noncoherent",
1982 .init = rd_set_non_coherent,
1983 },
1984 {
1985 }
1986 };
1987
gic_enable_nmi_support(void)1988 static void gic_enable_nmi_support(void)
1989 {
1990 int i;
1991
1992 if (!gic_prio_masking_enabled())
1993 return;
1994
1995 rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
1996 sizeof(*rdist_nmi_refs), GFP_KERNEL);
1997 if (!rdist_nmi_refs)
1998 return;
1999
2000 for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++)
2001 refcount_set(&rdist_nmi_refs[i], 0);
2002
2003 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
2004 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
2005
2006 static_branch_enable(&supports_pseudo_nmis);
2007
2008 if (static_branch_likely(&supports_deactivate_key))
2009 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
2010 else
2011 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
2012 }
2013
gic_init_bases(phys_addr_t dist_phys_base,void __iomem * dist_base,struct redist_region * rdist_regs,u32 nr_redist_regions,u64 redist_stride,struct fwnode_handle * handle)2014 static int __init gic_init_bases(phys_addr_t dist_phys_base,
2015 void __iomem *dist_base,
2016 struct redist_region *rdist_regs,
2017 u32 nr_redist_regions,
2018 u64 redist_stride,
2019 struct fwnode_handle *handle)
2020 {
2021 u32 typer;
2022 int err;
2023
2024 if (!is_hyp_mode_available())
2025 static_branch_disable(&supports_deactivate_key);
2026
2027 if (static_branch_likely(&supports_deactivate_key))
2028 pr_info("GIC: Using split EOI/Deactivate mode\n");
2029
2030 gic_data.fwnode = handle;
2031 gic_data.dist_phys_base = dist_phys_base;
2032 gic_data.dist_base = dist_base;
2033 gic_data.redist_regions = rdist_regs;
2034 gic_data.nr_redist_regions = nr_redist_regions;
2035 gic_data.redist_stride = redist_stride;
2036
2037 /*
2038 * Find out how many interrupts are supported.
2039 */
2040 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
2041 gic_data.rdists.gicd_typer = typer;
2042
2043 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
2044 gic_quirks, &gic_data);
2045
2046 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
2047 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
2048
2049 /*
2050 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
2051 * architecture spec (which says that reserved registers are RES0).
2052 */
2053 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
2054 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
2055
2056 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
2057 &gic_data);
2058 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
2059 if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) {
2060 /* Disable GICv4.x features for the erratum T241-FABRIC-4 */
2061 gic_data.rdists.has_rvpeid = true;
2062 gic_data.rdists.has_vlpis = true;
2063 gic_data.rdists.has_direct_lpi = true;
2064 gic_data.rdists.has_vpend_valid_dirty = true;
2065 }
2066
2067 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
2068 err = -ENOMEM;
2069 goto out_free;
2070 }
2071
2072 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
2073
2074 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
2075
2076 if (typer & GICD_TYPER_MBIS) {
2077 err = mbi_init(handle, gic_data.domain);
2078 if (err)
2079 pr_err("Failed to initialize MBIs\n");
2080 }
2081
2082 set_handle_irq(gic_handle_irq);
2083
2084 gic_update_rdist_properties();
2085
2086 gic_cpu_sys_reg_enable();
2087 gic_prio_init();
2088 gic_dist_init();
2089 gic_cpu_init();
2090 gic_enable_nmi_support();
2091 gic_smp_init();
2092 gic_cpu_pm_init();
2093
2094 if (gic_dist_supports_lpis()) {
2095 its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq);
2096 its_cpu_init();
2097 its_lpi_memreserve_init();
2098 } else {
2099 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
2100 gicv2m_init(handle, gic_data.domain);
2101 }
2102
2103 return 0;
2104
2105 out_free:
2106 if (gic_data.domain)
2107 irq_domain_remove(gic_data.domain);
2108 free_percpu(gic_data.rdists.rdist);
2109 return err;
2110 }
2111
gic_validate_dist_version(void __iomem * dist_base)2112 static int __init gic_validate_dist_version(void __iomem *dist_base)
2113 {
2114 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2115
2116 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
2117 return -ENODEV;
2118
2119 return 0;
2120 }
2121
2122 /* Create all possible partitions at boot time */
gic_populate_ppi_partitions(struct device_node * gic_node)2123 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2124 {
2125 struct device_node *parts_node, *child_part;
2126 int part_idx = 0, i;
2127 int nr_parts;
2128 struct partition_affinity *parts;
2129
2130 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
2131 if (!parts_node)
2132 return;
2133
2134 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
2135 if (!gic_data.ppi_descs)
2136 goto out_put_node;
2137
2138 nr_parts = of_get_child_count(parts_node);
2139
2140 if (!nr_parts)
2141 goto out_put_node;
2142
2143 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
2144 if (WARN_ON(!parts))
2145 goto out_put_node;
2146
2147 for_each_child_of_node(parts_node, child_part) {
2148 struct partition_affinity *part;
2149 int n;
2150
2151 part = &parts[part_idx];
2152
2153 part->partition_id = of_node_to_fwnode(child_part);
2154
2155 pr_info("GIC: PPI partition %pOFn[%d] { ",
2156 child_part, part_idx);
2157
2158 n = of_property_count_elems_of_size(child_part, "affinity",
2159 sizeof(u32));
2160 WARN_ON(n <= 0);
2161
2162 for (i = 0; i < n; i++) {
2163 int err, cpu;
2164 u32 cpu_phandle;
2165 struct device_node *cpu_node;
2166
2167 err = of_property_read_u32_index(child_part, "affinity",
2168 i, &cpu_phandle);
2169 if (WARN_ON(err))
2170 continue;
2171
2172 cpu_node = of_find_node_by_phandle(cpu_phandle);
2173 if (WARN_ON(!cpu_node))
2174 continue;
2175
2176 cpu = of_cpu_node_to_id(cpu_node);
2177 if (WARN_ON(cpu < 0)) {
2178 of_node_put(cpu_node);
2179 continue;
2180 }
2181
2182 pr_cont("%pOF[%d] ", cpu_node, cpu);
2183
2184 cpumask_set_cpu(cpu, &part->mask);
2185 of_node_put(cpu_node);
2186 }
2187
2188 pr_cont("}\n");
2189 part_idx++;
2190 }
2191
2192 for (i = 0; i < gic_data.ppi_nr; i++) {
2193 unsigned int irq;
2194 struct partition_desc *desc;
2195 struct irq_fwspec ppi_fwspec = {
2196 .fwnode = gic_data.fwnode,
2197 .param_count = 3,
2198 .param = {
2199 [0] = GIC_IRQ_TYPE_PARTITION,
2200 [1] = i,
2201 [2] = IRQ_TYPE_NONE,
2202 },
2203 };
2204
2205 irq = irq_create_fwspec_mapping(&ppi_fwspec);
2206 if (WARN_ON(!irq))
2207 continue;
2208 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
2209 irq, &partition_domain_ops);
2210 if (WARN_ON(!desc))
2211 continue;
2212
2213 gic_data.ppi_descs[i] = desc;
2214 }
2215
2216 out_put_node:
2217 of_node_put(parts_node);
2218 }
2219
gic_of_setup_kvm_info(struct device_node * node,u32 nr_redist_regions)2220 static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions)
2221 {
2222 int ret;
2223 struct resource r;
2224
2225 gic_v3_kvm_info.type = GIC_V3;
2226
2227 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2228 if (!gic_v3_kvm_info.maint_irq)
2229 return;
2230
2231 /* Also skip GICD, GICC, GICH */
2232 ret = of_address_to_resource(node, nr_redist_regions + 3, &r);
2233 if (!ret)
2234 gic_v3_kvm_info.vcpu = r;
2235
2236 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2237 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2238 vgic_set_kvm_info(&gic_v3_kvm_info);
2239 }
2240
gic_request_region(resource_size_t base,resource_size_t size,const char * name)2241 static void gic_request_region(resource_size_t base, resource_size_t size,
2242 const char *name)
2243 {
2244 if (!request_mem_region(base, size, name))
2245 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2246 name, &base);
2247 }
2248
gic_of_iomap(struct device_node * node,int idx,const char * name,struct resource * res)2249 static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2250 const char *name, struct resource *res)
2251 {
2252 void __iomem *base;
2253 int ret;
2254
2255 ret = of_address_to_resource(node, idx, res);
2256 if (ret)
2257 return IOMEM_ERR_PTR(ret);
2258
2259 gic_request_region(res->start, resource_size(res), name);
2260 base = of_iomap(node, idx);
2261
2262 return base ?: IOMEM_ERR_PTR(-ENOMEM);
2263 }
2264
gic_of_init(struct device_node * node,struct device_node * parent)2265 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2266 {
2267 phys_addr_t dist_phys_base;
2268 void __iomem *dist_base;
2269 struct redist_region *rdist_regs;
2270 struct resource res;
2271 u64 redist_stride;
2272 u32 nr_redist_regions;
2273 int err, i;
2274
2275 dist_base = gic_of_iomap(node, 0, "GICD", &res);
2276 if (IS_ERR(dist_base)) {
2277 pr_err("%pOF: unable to map gic dist registers\n", node);
2278 return PTR_ERR(dist_base);
2279 }
2280
2281 dist_phys_base = res.start;
2282
2283 err = gic_validate_dist_version(dist_base);
2284 if (err) {
2285 pr_err("%pOF: no distributor detected, giving up\n", node);
2286 goto out_unmap_dist;
2287 }
2288
2289 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2290 nr_redist_regions = 1;
2291
2292 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
2293 GFP_KERNEL);
2294 if (!rdist_regs) {
2295 err = -ENOMEM;
2296 goto out_unmap_dist;
2297 }
2298
2299 for (i = 0; i < nr_redist_regions; i++) {
2300 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2301 if (IS_ERR(rdist_regs[i].redist_base)) {
2302 pr_err("%pOF: couldn't map region %d\n", node, i);
2303 err = -ENODEV;
2304 goto out_unmap_rdist;
2305 }
2306 rdist_regs[i].phys_base = res.start;
2307 }
2308
2309 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2310 redist_stride = 0;
2311
2312 gic_enable_of_quirks(node, gic_quirks, &gic_data);
2313
2314 err = gic_init_bases(dist_phys_base, dist_base, rdist_regs,
2315 nr_redist_regions, redist_stride, &node->fwnode);
2316 if (err)
2317 goto out_unmap_rdist;
2318
2319 gic_populate_ppi_partitions(node);
2320
2321 if (static_branch_likely(&supports_deactivate_key))
2322 gic_of_setup_kvm_info(node, nr_redist_regions);
2323 return 0;
2324
2325 out_unmap_rdist:
2326 for (i = 0; i < nr_redist_regions; i++)
2327 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
2328 iounmap(rdist_regs[i].redist_base);
2329 kfree(rdist_regs);
2330 out_unmap_dist:
2331 iounmap(dist_base);
2332 return err;
2333 }
2334
2335 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2336
2337 #ifdef CONFIG_ACPI
2338 static struct
2339 {
2340 void __iomem *dist_base;
2341 struct redist_region *redist_regs;
2342 u32 nr_redist_regions;
2343 bool single_redist;
2344 int enabled_rdists;
2345 u32 maint_irq;
2346 int maint_irq_mode;
2347 phys_addr_t vcpu_base;
2348 } acpi_data __initdata;
2349
2350 static void __init
gic_acpi_register_redist(phys_addr_t phys_base,void __iomem * redist_base)2351 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2352 {
2353 static int count = 0;
2354
2355 acpi_data.redist_regs[count].phys_base = phys_base;
2356 acpi_data.redist_regs[count].redist_base = redist_base;
2357 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2358 count++;
2359 }
2360
2361 static int __init
gic_acpi_parse_madt_redist(union acpi_subtable_headers * header,const unsigned long end)2362 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2363 const unsigned long end)
2364 {
2365 struct acpi_madt_generic_redistributor *redist =
2366 (struct acpi_madt_generic_redistributor *)header;
2367 void __iomem *redist_base;
2368
2369 redist_base = ioremap(redist->base_address, redist->length);
2370 if (!redist_base) {
2371 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2372 return -ENOMEM;
2373 }
2374
2375 if (acpi_get_madt_revision() >= 7 &&
2376 (redist->flags & ACPI_MADT_GICR_NON_COHERENT))
2377 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
2378
2379 gic_request_region(redist->base_address, redist->length, "GICR");
2380
2381 gic_acpi_register_redist(redist->base_address, redist_base);
2382 return 0;
2383 }
2384
2385 static int __init
gic_acpi_parse_madt_gicc(union acpi_subtable_headers * header,const unsigned long end)2386 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2387 const unsigned long end)
2388 {
2389 struct acpi_madt_generic_interrupt *gicc =
2390 (struct acpi_madt_generic_interrupt *)header;
2391 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2392 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2393 void __iomem *redist_base;
2394
2395 /* Neither enabled or online capable means it doesn't exist, skip it */
2396 if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
2397 return 0;
2398
2399 /*
2400 * Capable but disabled CPUs can be brought online later. What about
2401 * the redistributor? ACPI doesn't want to say!
2402 * Virtual hotplug systems can use the MADT's "always-on" GICR entries.
2403 * Otherwise, prevent such CPUs from being brought online.
2404 */
2405 if (!(gicc->flags & ACPI_MADT_ENABLED)) {
2406 int cpu = get_cpu_for_acpi_id(gicc->uid);
2407
2408 pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu);
2409 if (cpu >= 0)
2410 cpumask_set_cpu(cpu, &broken_rdists);
2411 return 0;
2412 }
2413
2414 redist_base = ioremap(gicc->gicr_base_address, size);
2415 if (!redist_base)
2416 return -ENOMEM;
2417 gic_request_region(gicc->gicr_base_address, size, "GICR");
2418
2419 if (acpi_get_madt_revision() >= 7 &&
2420 (gicc->flags & ACPI_MADT_GICC_NON_COHERENT))
2421 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
2422
2423 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2424 return 0;
2425 }
2426
gic_acpi_collect_gicr_base(void)2427 static int __init gic_acpi_collect_gicr_base(void)
2428 {
2429 acpi_tbl_entry_handler redist_parser;
2430 enum acpi_madt_type type;
2431
2432 if (acpi_data.single_redist) {
2433 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2434 redist_parser = gic_acpi_parse_madt_gicc;
2435 } else {
2436 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2437 redist_parser = gic_acpi_parse_madt_redist;
2438 }
2439
2440 /* Collect redistributor base addresses in GICR entries */
2441 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2442 return 0;
2443
2444 pr_info("No valid GICR entries exist\n");
2445 return -ENODEV;
2446 }
2447
gic_acpi_match_gicr(union acpi_subtable_headers * header,const unsigned long end)2448 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2449 const unsigned long end)
2450 {
2451 /* Subtable presence means that redist exists, that's it */
2452 return 0;
2453 }
2454
gic_acpi_match_gicc(union acpi_subtable_headers * header,const unsigned long end)2455 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2456 const unsigned long end)
2457 {
2458 struct acpi_madt_generic_interrupt *gicc =
2459 (struct acpi_madt_generic_interrupt *)header;
2460
2461 /*
2462 * If GICC is enabled and has valid gicr base address, then it means
2463 * GICR base is presented via GICC. The redistributor is only known to
2464 * be accessible if the GICC is marked as enabled. If this bit is not
2465 * set, we'd need to add the redistributor at runtime, which isn't
2466 * supported.
2467 */
2468 if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address)
2469 acpi_data.enabled_rdists++;
2470
2471 return 0;
2472 }
2473
gic_acpi_count_gicr_regions(void)2474 static int __init gic_acpi_count_gicr_regions(void)
2475 {
2476 int count;
2477
2478 /*
2479 * Count how many redistributor regions we have. It is not allowed
2480 * to mix redistributor description, GICR and GICC subtables have to be
2481 * mutually exclusive.
2482 */
2483 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2484 gic_acpi_match_gicr, 0);
2485 if (count > 0) {
2486 acpi_data.single_redist = false;
2487 return count;
2488 }
2489
2490 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2491 gic_acpi_match_gicc, 0);
2492 if (count > 0) {
2493 acpi_data.single_redist = true;
2494 count = acpi_data.enabled_rdists;
2495 }
2496
2497 return count;
2498 }
2499
acpi_validate_gic_table(struct acpi_subtable_header * header,struct acpi_probe_entry * ape)2500 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2501 struct acpi_probe_entry *ape)
2502 {
2503 struct acpi_madt_generic_distributor *dist;
2504 int count;
2505
2506 dist = (struct acpi_madt_generic_distributor *)header;
2507 if (dist->version != ape->driver_data)
2508 return false;
2509
2510 /* We need to do that exercise anyway, the sooner the better */
2511 count = gic_acpi_count_gicr_regions();
2512 if (count <= 0)
2513 return false;
2514
2515 acpi_data.nr_redist_regions = count;
2516 return true;
2517 }
2518
gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers * header,const unsigned long end)2519 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2520 const unsigned long end)
2521 {
2522 struct acpi_madt_generic_interrupt *gicc =
2523 (struct acpi_madt_generic_interrupt *)header;
2524 int maint_irq_mode;
2525 static int first_madt = true;
2526
2527 if (!(gicc->flags &
2528 (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
2529 return 0;
2530
2531 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2532 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2533
2534 if (first_madt) {
2535 first_madt = false;
2536
2537 acpi_data.maint_irq = gicc->vgic_interrupt;
2538 acpi_data.maint_irq_mode = maint_irq_mode;
2539 acpi_data.vcpu_base = gicc->gicv_base_address;
2540
2541 return 0;
2542 }
2543
2544 /*
2545 * The maintenance interrupt and GICV should be the same for every CPU
2546 */
2547 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2548 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2549 (acpi_data.vcpu_base != gicc->gicv_base_address))
2550 return -EINVAL;
2551
2552 return 0;
2553 }
2554
gic_acpi_collect_virt_info(void)2555 static bool __init gic_acpi_collect_virt_info(void)
2556 {
2557 int count;
2558
2559 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2560 gic_acpi_parse_virt_madt_gicc, 0);
2561
2562 return (count > 0);
2563 }
2564
2565 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2566 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2567 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2568
gic_acpi_setup_kvm_info(void)2569 static void __init gic_acpi_setup_kvm_info(void)
2570 {
2571 int irq;
2572
2573 if (!gic_acpi_collect_virt_info()) {
2574 pr_warn("Unable to get hardware information used for virtualization\n");
2575 return;
2576 }
2577
2578 gic_v3_kvm_info.type = GIC_V3;
2579
2580 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2581 acpi_data.maint_irq_mode,
2582 ACPI_ACTIVE_HIGH);
2583 if (irq <= 0)
2584 return;
2585
2586 gic_v3_kvm_info.maint_irq = irq;
2587
2588 if (acpi_data.vcpu_base) {
2589 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2590
2591 vcpu->flags = IORESOURCE_MEM;
2592 vcpu->start = acpi_data.vcpu_base;
2593 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2594 }
2595
2596 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2597 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2598 vgic_set_kvm_info(&gic_v3_kvm_info);
2599 }
2600
2601 static struct fwnode_handle *gsi_domain_handle;
2602
gic_v3_get_gsi_domain_id(u32 gsi)2603 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2604 {
2605 return gsi_domain_handle;
2606 }
2607
2608 static int __init
gic_acpi_init(union acpi_subtable_headers * header,const unsigned long end)2609 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2610 {
2611 struct acpi_madt_generic_distributor *dist;
2612 size_t size;
2613 int i, err;
2614
2615 /* Get distributor base address */
2616 dist = (struct acpi_madt_generic_distributor *)header;
2617 acpi_data.dist_base = ioremap(dist->base_address,
2618 ACPI_GICV3_DIST_MEM_SIZE);
2619 if (!acpi_data.dist_base) {
2620 pr_err("Unable to map GICD registers\n");
2621 return -ENOMEM;
2622 }
2623 gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
2624
2625 err = gic_validate_dist_version(acpi_data.dist_base);
2626 if (err) {
2627 pr_err("No distributor detected at @%p, giving up\n",
2628 acpi_data.dist_base);
2629 goto out_dist_unmap;
2630 }
2631
2632 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2633 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2634 if (!acpi_data.redist_regs) {
2635 err = -ENOMEM;
2636 goto out_dist_unmap;
2637 }
2638
2639 err = gic_acpi_collect_gicr_base();
2640 if (err)
2641 goto out_redist_unmap;
2642
2643 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2644 if (!gsi_domain_handle) {
2645 err = -ENOMEM;
2646 goto out_redist_unmap;
2647 }
2648
2649 err = gic_init_bases(dist->base_address, acpi_data.dist_base,
2650 acpi_data.redist_regs, acpi_data.nr_redist_regions,
2651 0, gsi_domain_handle);
2652 if (err)
2653 goto out_fwhandle_free;
2654
2655 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
2656
2657 if (static_branch_likely(&supports_deactivate_key))
2658 gic_acpi_setup_kvm_info();
2659
2660 return 0;
2661
2662 out_fwhandle_free:
2663 irq_domain_free_fwnode(gsi_domain_handle);
2664 out_redist_unmap:
2665 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2666 if (acpi_data.redist_regs[i].redist_base)
2667 iounmap(acpi_data.redist_regs[i].redist_base);
2668 kfree(acpi_data.redist_regs);
2669 out_dist_unmap:
2670 iounmap(acpi_data.dist_base);
2671 return err;
2672 }
2673 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2674 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2675 gic_acpi_init);
2676 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2677 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2678 gic_acpi_init);
2679 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2680 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2681 gic_acpi_init);
2682 #endif
2683