1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #define pr_fmt(fmt) "GICv3: " fmt
8
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kernel.h>
16 #include <linux/kstrtox.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/percpu.h>
21 #include <linux/refcount.h>
22 #include <linux/slab.h>
23 #include <linux/iopoll.h>
24
25 #include <linux/irqchip.h>
26 #include <linux/irqchip/arm-gic-common.h>
27 #include <linux/irqchip/arm-gic-v3.h>
28 #include <linux/irqchip/arm-gic-v3-prio.h>
29 #include <linux/bitfield.h>
30 #include <linux/bits.h>
31 #include <linux/arm-smccc.h>
32
33 #include <asm/cputype.h>
34 #include <asm/exception.h>
35 #include <asm/smp_plat.h>
36 #include <asm/virt.h>
37
38 #include "irq-gic-common.h"
39
40 static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ;
41 static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
42
43 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
44 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
45 #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
46 #define FLAGS_WORKAROUND_INSECURE (1ULL << 3)
47
48 static struct cpumask broken_rdists __read_mostly __maybe_unused;
49
50 struct redist_region {
51 void __iomem *redist_base;
52 phys_addr_t phys_base;
53 bool single_redist;
54 };
55
56 struct gic_chip_data {
57 struct fwnode_handle *fwnode;
58 phys_addr_t dist_phys_base;
59 void __iomem *dist_base;
60 struct redist_region *redist_regions;
61 struct rdists rdists;
62 struct irq_domain *domain;
63 u64 redist_stride;
64 u32 nr_redist_regions;
65 u64 flags;
66 bool has_rss;
67 unsigned int ppi_nr;
68 struct partition_affinity *parts;
69 unsigned int nr_parts;
70 };
71
72 struct partition_affinity {
73 cpumask_t mask;
74 struct fwnode_handle *partition_id;
75 };
76
77 #define T241_CHIPS_MAX 4
78 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
79 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
80
81 static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum);
82
83 static struct gic_chip_data gic_data __read_mostly;
84 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
85
86 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
87 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
88 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
89
90 static bool nmi_support_forbidden;
91
92 /*
93 * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
94 * are potentially stolen by the secure side. Some code, especially code dealing
95 * with hwirq IDs, is simplified by accounting for all 16.
96 */
97 #define SGI_NR 16
98
99 /*
100 * The behaviours of RPR and PMR registers differ depending on the value of
101 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
102 * distributor and redistributors depends on whether security is enabled in the
103 * GIC.
104 *
105 * When security is enabled, non-secure priority values from the (re)distributor
106 * are presented to the GIC CPUIF as follow:
107 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
108 *
109 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
110 * EL1 are subject to a similar operation thus matching the priorities presented
111 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
112 * these values are unchanged by the GIC.
113 *
114 * see GICv3/GICv4 Architecture Specification (IHI0069D):
115 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
116 * priorities.
117 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
118 * interrupt.
119 */
120 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
121
gic_get_pribits(void)122 static u32 gic_get_pribits(void)
123 {
124 u32 pribits;
125
126 pribits = gic_read_ctlr();
127 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
128 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
129 pribits++;
130
131 return pribits;
132 }
133
gic_has_group0(void)134 static bool gic_has_group0(void)
135 {
136 u32 val;
137 u32 old_pmr;
138
139 old_pmr = gic_read_pmr();
140
141 /*
142 * Let's find out if Group0 is under control of EL3 or not by
143 * setting the highest possible, non-zero priority in PMR.
144 *
145 * If SCR_EL3.FIQ is set, the priority gets shifted down in
146 * order for the CPU interface to set bit 7, and keep the
147 * actual priority in the non-secure range. In the process, it
148 * looses the least significant bit and the actual priority
149 * becomes 0x80. Reading it back returns 0, indicating that
150 * we're don't have access to Group0.
151 */
152 gic_write_pmr(BIT(8 - gic_get_pribits()));
153 val = gic_read_pmr();
154
155 gic_write_pmr(old_pmr);
156
157 return val != 0;
158 }
159
gic_dist_security_disabled(void)160 static inline bool gic_dist_security_disabled(void)
161 {
162 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
163 }
164
165 static bool cpus_have_security_disabled __ro_after_init;
166 static bool cpus_have_group0 __ro_after_init;
167
gic_prio_init(void)168 static void __init gic_prio_init(void)
169 {
170 bool ds;
171
172 cpus_have_group0 = gic_has_group0();
173
174 ds = gic_dist_security_disabled();
175 if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) {
176 if (cpus_have_group0) {
177 u32 val;
178
179 val = readl_relaxed(gic_data.dist_base + GICD_CTLR);
180 val |= GICD_CTLR_DS;
181 writel_relaxed(val, gic_data.dist_base + GICD_CTLR);
182
183 ds = gic_dist_security_disabled();
184 if (ds)
185 pr_warn("Broken GIC integration, security disabled\n");
186 } else {
187 pr_warn("Broken GIC integration, pNMI forbidden\n");
188 nmi_support_forbidden = true;
189 }
190 }
191
192 cpus_have_security_disabled = ds;
193
194 /*
195 * How priority values are used by the GIC depends on two things:
196 * the security state of the GIC (controlled by the GICD_CTLR.DS bit)
197 * and if Group 0 interrupts can be delivered to Linux in the non-secure
198 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
199 * way priorities are presented in ICC_PMR_EL1 and in the distributor:
200 *
201 * GICD_CTLR.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor
202 * -------------------------------------------------------
203 * 1 | - | unchanged | unchanged
204 * -------------------------------------------------------
205 * 0 | 1 | non-secure | non-secure
206 * -------------------------------------------------------
207 * 0 | 0 | unchanged | non-secure
208 *
209 * In the non-secure view reads and writes are modified:
210 *
211 * - A value written is right-shifted by one and the MSB is set,
212 * forcing the priority into the non-secure range.
213 *
214 * - A value read is left-shifted by one.
215 *
216 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
217 * are both either modified or unchanged, we can use the same set of
218 * priorities.
219 *
220 * In the last case, where only the interrupt priorities are modified to
221 * be in the non-secure range, we program the non-secure values into
222 * the distributor to match the PMR values we want.
223 */
224 if (cpus_have_group0 && !cpus_have_security_disabled) {
225 dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
226 dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
227 }
228
229 pr_info("GICD_CTLR.DS=%d, SCR_EL3.FIQ=%d\n",
230 cpus_have_security_disabled,
231 !cpus_have_group0);
232 }
233
234 static struct gic_kvm_info gic_v3_kvm_info __initdata;
235 static DEFINE_PER_CPU(bool, has_rss);
236
237 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
238 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
239 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
240 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
241
242 /* Our default, arbitrary priority value. Linux only uses one anyway. */
243 #define DEFAULT_PMR_VALUE 0xf0
244
245 enum gic_intid_range {
246 SGI_RANGE,
247 PPI_RANGE,
248 SPI_RANGE,
249 EPPI_RANGE,
250 ESPI_RANGE,
251 LPI_RANGE,
252 __INVALID_RANGE__
253 };
254
__get_intid_range(irq_hw_number_t hwirq)255 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
256 {
257 switch (hwirq) {
258 case 0 ... 15:
259 return SGI_RANGE;
260 case 16 ... 31:
261 return PPI_RANGE;
262 case 32 ... 1019:
263 return SPI_RANGE;
264 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
265 return EPPI_RANGE;
266 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
267 return ESPI_RANGE;
268 case 8192 ... GENMASK(23, 0):
269 return LPI_RANGE;
270 default:
271 return __INVALID_RANGE__;
272 }
273 }
274
get_intid_range(struct irq_data * d)275 static enum gic_intid_range get_intid_range(struct irq_data *d)
276 {
277 return __get_intid_range(d->hwirq);
278 }
279
gic_irq_in_rdist(struct irq_data * d)280 static inline bool gic_irq_in_rdist(struct irq_data *d)
281 {
282 switch (get_intid_range(d)) {
283 case SGI_RANGE:
284 case PPI_RANGE:
285 case EPPI_RANGE:
286 return true;
287 default:
288 return false;
289 }
290 }
291
gic_dist_base_alias(struct irq_data * d)292 static inline void __iomem *gic_dist_base_alias(struct irq_data *d)
293 {
294 if (static_branch_unlikely(&gic_nvidia_t241_erratum)) {
295 irq_hw_number_t hwirq = irqd_to_hwirq(d);
296 u32 chip;
297
298 /*
299 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
300 * registers are directed to the chip that owns the SPI. The
301 * the alias region can also be used for writes to the
302 * GICD_In{E} except GICD_ICENABLERn. Each chip has support
303 * for 320 {E}SPIs. Mappings for all 4 chips:
304 * Chip0 = 32-351
305 * Chip1 = 352-671
306 * Chip2 = 672-991
307 * Chip3 = 4096-4415
308 */
309 switch (__get_intid_range(hwirq)) {
310 case SPI_RANGE:
311 chip = (hwirq - 32) / 320;
312 break;
313 case ESPI_RANGE:
314 chip = 3;
315 break;
316 default:
317 unreachable();
318 }
319 return t241_dist_base_alias[chip];
320 }
321
322 return gic_data.dist_base;
323 }
324
gic_dist_base(struct irq_data * d)325 static inline void __iomem *gic_dist_base(struct irq_data *d)
326 {
327 switch (get_intid_range(d)) {
328 case SGI_RANGE:
329 case PPI_RANGE:
330 case EPPI_RANGE:
331 /* SGI+PPI -> SGI_base for this CPU */
332 return gic_data_rdist_sgi_base();
333
334 case SPI_RANGE:
335 case ESPI_RANGE:
336 /* SPI -> dist_base */
337 return gic_data.dist_base;
338
339 default:
340 return NULL;
341 }
342 }
343
gic_do_wait_for_rwp(void __iomem * base,u32 bit)344 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
345 {
346 u32 val;
347 int ret;
348
349 ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit),
350 1, USEC_PER_SEC);
351 if (ret == -ETIMEDOUT)
352 pr_err_ratelimited("RWP timeout, gone fishing\n");
353 }
354
355 /* Wait for completion of a distributor change */
gic_dist_wait_for_rwp(void)356 static void gic_dist_wait_for_rwp(void)
357 {
358 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
359 }
360
361 /* Wait for completion of a redistributor change */
gic_redist_wait_for_rwp(void)362 static void gic_redist_wait_for_rwp(void)
363 {
364 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
365 }
366
gic_enable_redist(bool enable)367 static void gic_enable_redist(bool enable)
368 {
369 void __iomem *rbase;
370 u32 val;
371 int ret;
372
373 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
374 return;
375
376 rbase = gic_data_rdist_rd_base();
377
378 val = readl_relaxed(rbase + GICR_WAKER);
379 if (enable)
380 /* Wake up this CPU redistributor */
381 val &= ~GICR_WAKER_ProcessorSleep;
382 else
383 val |= GICR_WAKER_ProcessorSleep;
384 writel_relaxed(val, rbase + GICR_WAKER);
385
386 if (!enable) { /* Check that GICR_WAKER is writeable */
387 val = readl_relaxed(rbase + GICR_WAKER);
388 if (!(val & GICR_WAKER_ProcessorSleep))
389 return; /* No PM support in this redistributor */
390 }
391
392 ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val,
393 enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep),
394 1, USEC_PER_SEC);
395 if (ret == -ETIMEDOUT) {
396 pr_err_ratelimited("redistributor failed to %s...\n",
397 enable ? "wakeup" : "sleep");
398 }
399 }
400
401 /*
402 * Routines to disable, enable, EOI and route interrupts
403 */
convert_offset_index(struct irq_data * d,u32 offset,u32 * index)404 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
405 {
406 switch (get_intid_range(d)) {
407 case SGI_RANGE:
408 case PPI_RANGE:
409 case SPI_RANGE:
410 *index = d->hwirq;
411 return offset;
412 case EPPI_RANGE:
413 /*
414 * Contrary to the ESPI range, the EPPI range is contiguous
415 * to the PPI range in the registers, so let's adjust the
416 * displacement accordingly. Consistency is overrated.
417 */
418 *index = d->hwirq - EPPI_BASE_INTID + 32;
419 return offset;
420 case ESPI_RANGE:
421 *index = d->hwirq - ESPI_BASE_INTID;
422 switch (offset) {
423 case GICD_ISENABLER:
424 return GICD_ISENABLERnE;
425 case GICD_ICENABLER:
426 return GICD_ICENABLERnE;
427 case GICD_ISPENDR:
428 return GICD_ISPENDRnE;
429 case GICD_ICPENDR:
430 return GICD_ICPENDRnE;
431 case GICD_ISACTIVER:
432 return GICD_ISACTIVERnE;
433 case GICD_ICACTIVER:
434 return GICD_ICACTIVERnE;
435 case GICD_IPRIORITYR:
436 return GICD_IPRIORITYRnE;
437 case GICD_ICFGR:
438 return GICD_ICFGRnE;
439 case GICD_IROUTER:
440 return GICD_IROUTERnE;
441 default:
442 break;
443 }
444 break;
445 default:
446 break;
447 }
448
449 WARN_ON(1);
450 *index = d->hwirq;
451 return offset;
452 }
453
gic_peek_irq(struct irq_data * d,u32 offset)454 static int gic_peek_irq(struct irq_data *d, u32 offset)
455 {
456 void __iomem *base;
457 u32 index, mask;
458
459 offset = convert_offset_index(d, offset, &index);
460 mask = 1 << (index % 32);
461
462 if (gic_irq_in_rdist(d))
463 base = gic_data_rdist_sgi_base();
464 else
465 base = gic_dist_base_alias(d);
466
467 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
468 }
469
gic_poke_irq(struct irq_data * d,u32 offset)470 static void gic_poke_irq(struct irq_data *d, u32 offset)
471 {
472 void __iomem *base;
473 u32 index, mask;
474
475 offset = convert_offset_index(d, offset, &index);
476 mask = 1 << (index % 32);
477
478 if (gic_irq_in_rdist(d))
479 base = gic_data_rdist_sgi_base();
480 else
481 base = gic_data.dist_base;
482
483 writel_relaxed(mask, base + offset + (index / 32) * 4);
484 }
485
gic_mask_irq(struct irq_data * d)486 static void gic_mask_irq(struct irq_data *d)
487 {
488 gic_poke_irq(d, GICD_ICENABLER);
489 if (gic_irq_in_rdist(d))
490 gic_redist_wait_for_rwp();
491 else
492 gic_dist_wait_for_rwp();
493 }
494
gic_eoimode1_mask_irq(struct irq_data * d)495 static void gic_eoimode1_mask_irq(struct irq_data *d)
496 {
497 gic_mask_irq(d);
498 /*
499 * When masking a forwarded interrupt, make sure it is
500 * deactivated as well.
501 *
502 * This ensures that an interrupt that is getting
503 * disabled/masked will not get "stuck", because there is
504 * noone to deactivate it (guest is being terminated).
505 */
506 if (irqd_is_forwarded_to_vcpu(d))
507 gic_poke_irq(d, GICD_ICACTIVER);
508 }
509
gic_unmask_irq(struct irq_data * d)510 static void gic_unmask_irq(struct irq_data *d)
511 {
512 gic_poke_irq(d, GICD_ISENABLER);
513 }
514
gic_supports_nmi(void)515 static inline bool gic_supports_nmi(void)
516 {
517 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
518 static_branch_likely(&supports_pseudo_nmis);
519 }
520
gic_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool val)521 static int gic_irq_set_irqchip_state(struct irq_data *d,
522 enum irqchip_irq_state which, bool val)
523 {
524 u32 reg;
525
526 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
527 return -EINVAL;
528
529 switch (which) {
530 case IRQCHIP_STATE_PENDING:
531 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
532 break;
533
534 case IRQCHIP_STATE_ACTIVE:
535 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
536 break;
537
538 case IRQCHIP_STATE_MASKED:
539 if (val) {
540 gic_mask_irq(d);
541 return 0;
542 }
543 reg = GICD_ISENABLER;
544 break;
545
546 default:
547 return -EINVAL;
548 }
549
550 gic_poke_irq(d, reg);
551
552 /*
553 * Force read-back to guarantee that the active state has taken
554 * effect, and won't race with a guest-driven deactivation.
555 */
556 if (reg == GICD_ISACTIVER)
557 gic_peek_irq(d, reg);
558 return 0;
559 }
560
gic_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * val)561 static int gic_irq_get_irqchip_state(struct irq_data *d,
562 enum irqchip_irq_state which, bool *val)
563 {
564 if (d->hwirq >= 8192) /* PPI/SPI only */
565 return -EINVAL;
566
567 switch (which) {
568 case IRQCHIP_STATE_PENDING:
569 *val = gic_peek_irq(d, GICD_ISPENDR);
570 break;
571
572 case IRQCHIP_STATE_ACTIVE:
573 *val = gic_peek_irq(d, GICD_ISACTIVER);
574 break;
575
576 case IRQCHIP_STATE_MASKED:
577 *val = !gic_peek_irq(d, GICD_ISENABLER);
578 break;
579
580 default:
581 return -EINVAL;
582 }
583
584 return 0;
585 }
586
gic_irq_set_prio(struct irq_data * d,u8 prio)587 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
588 {
589 void __iomem *base = gic_dist_base(d);
590 u32 offset, index;
591
592 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
593
594 writeb_relaxed(prio, base + offset + index);
595 }
596
gic_irq_nmi_setup(struct irq_data * d)597 static int gic_irq_nmi_setup(struct irq_data *d)
598 {
599 struct irq_desc *desc = irq_to_desc(d->irq);
600
601 if (!gic_supports_nmi())
602 return -EINVAL;
603
604 if (gic_peek_irq(d, GICD_ISENABLER)) {
605 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
606 return -EINVAL;
607 }
608
609 /*
610 * A secondary irq_chip should be in charge of LPI request,
611 * it should not be possible to get there
612 */
613 if (WARN_ON(irqd_to_hwirq(d) >= 8192))
614 return -EINVAL;
615
616 /* desc lock should already be held */
617 if (!gic_irq_in_rdist(d))
618 desc->handle_irq = handle_fasteoi_nmi;
619
620 gic_irq_set_prio(d, dist_prio_nmi);
621
622 return 0;
623 }
624
gic_irq_nmi_teardown(struct irq_data * d)625 static void gic_irq_nmi_teardown(struct irq_data *d)
626 {
627 struct irq_desc *desc = irq_to_desc(d->irq);
628
629 if (WARN_ON(!gic_supports_nmi()))
630 return;
631
632 if (gic_peek_irq(d, GICD_ISENABLER)) {
633 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
634 return;
635 }
636
637 /*
638 * A secondary irq_chip should be in charge of LPI request,
639 * it should not be possible to get there
640 */
641 if (WARN_ON(irqd_to_hwirq(d) >= 8192))
642 return;
643
644 /* desc lock should already be held */
645 if (!gic_irq_in_rdist(d))
646 desc->handle_irq = handle_fasteoi_irq;
647
648 gic_irq_set_prio(d, dist_prio_irq);
649 }
650
gic_arm64_erratum_2941627_needed(struct irq_data * d)651 static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
652 {
653 enum gic_intid_range range;
654
655 if (!static_branch_unlikely(&gic_arm64_2941627_erratum))
656 return false;
657
658 range = get_intid_range(d);
659
660 /*
661 * The workaround is needed if the IRQ is an SPI and
662 * the target cpu is different from the one we are
663 * executing on.
664 */
665 return (range == SPI_RANGE || range == ESPI_RANGE) &&
666 !cpumask_test_cpu(raw_smp_processor_id(),
667 irq_data_get_effective_affinity_mask(d));
668 }
669
gic_eoi_irq(struct irq_data * d)670 static void gic_eoi_irq(struct irq_data *d)
671 {
672 write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1);
673 isb();
674
675 if (gic_arm64_erratum_2941627_needed(d)) {
676 /*
677 * Make sure the GIC stream deactivate packet
678 * issued by ICC_EOIR1_EL1 has completed before
679 * deactivating through GICD_IACTIVER.
680 */
681 dsb(sy);
682 gic_poke_irq(d, GICD_ICACTIVER);
683 }
684 }
685
gic_eoimode1_eoi_irq(struct irq_data * d)686 static void gic_eoimode1_eoi_irq(struct irq_data *d)
687 {
688 /*
689 * No need to deactivate an LPI, or an interrupt that
690 * is is getting forwarded to a vcpu.
691 */
692 if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
693 return;
694
695 if (!gic_arm64_erratum_2941627_needed(d))
696 gic_write_dir(irqd_to_hwirq(d));
697 else
698 gic_poke_irq(d, GICD_ICACTIVER);
699 }
700
gic_set_type(struct irq_data * d,unsigned int type)701 static int gic_set_type(struct irq_data *d, unsigned int type)
702 {
703 irq_hw_number_t irq = irqd_to_hwirq(d);
704 enum gic_intid_range range;
705 void __iomem *base;
706 u32 offset, index;
707 int ret;
708
709 range = get_intid_range(d);
710
711 /* Interrupt configuration for SGIs can't be changed */
712 if (range == SGI_RANGE)
713 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
714
715 /* SPIs have restrictions on the supported types */
716 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
717 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
718 return -EINVAL;
719
720 if (gic_irq_in_rdist(d))
721 base = gic_data_rdist_sgi_base();
722 else
723 base = gic_dist_base_alias(d);
724
725 offset = convert_offset_index(d, GICD_ICFGR, &index);
726
727 ret = gic_configure_irq(index, type, base + offset);
728 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
729 /* Misconfigured PPIs are usually not fatal */
730 pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
731 ret = 0;
732 }
733
734 return ret;
735 }
736
gic_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)737 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
738 {
739 if (get_intid_range(d) == SGI_RANGE)
740 return -EINVAL;
741
742 if (vcpu)
743 irqd_set_forwarded_to_vcpu(d);
744 else
745 irqd_clr_forwarded_to_vcpu(d);
746 return 0;
747 }
748
gic_cpu_to_affinity(int cpu)749 static u64 gic_cpu_to_affinity(int cpu)
750 {
751 u64 mpidr = cpu_logical_map(cpu);
752 u64 aff;
753
754 /* ASR8601 needs to have its affinities shifted down... */
755 if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
756 mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) |
757 (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
758
759 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
760 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
761 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
762 MPIDR_AFFINITY_LEVEL(mpidr, 0));
763
764 return aff;
765 }
766
gic_deactivate_unhandled(u32 irqnr)767 static void gic_deactivate_unhandled(u32 irqnr)
768 {
769 if (static_branch_likely(&supports_deactivate_key)) {
770 if (irqnr < 8192)
771 gic_write_dir(irqnr);
772 } else {
773 write_gicreg(irqnr, ICC_EOIR1_EL1);
774 isb();
775 }
776 }
777
778 /*
779 * Follow a read of the IAR with any HW maintenance that needs to happen prior
780 * to invoking the relevant IRQ handler. We must do two things:
781 *
782 * (1) Ensure instruction ordering between a read of IAR and subsequent
783 * instructions in the IRQ handler using an ISB.
784 *
785 * It is possible for the IAR to report an IRQ which was signalled *after*
786 * the CPU took an IRQ exception as multiple interrupts can race to be
787 * recognized by the GIC, earlier interrupts could be withdrawn, and/or
788 * later interrupts could be prioritized by the GIC.
789 *
790 * For devices which are tightly coupled to the CPU, such as PMUs, a
791 * context synchronization event is necessary to ensure that system
792 * register state is not stale, as these may have been indirectly written
793 * *after* exception entry.
794 *
795 * (2) Execute an interrupt priority drop when EOI mode 1 is in use.
796 */
gic_complete_ack(u32 irqnr)797 static inline void gic_complete_ack(u32 irqnr)
798 {
799 if (static_branch_likely(&supports_deactivate_key))
800 write_gicreg(irqnr, ICC_EOIR1_EL1);
801
802 isb();
803 }
804
gic_rpr_is_nmi_prio(void)805 static bool gic_rpr_is_nmi_prio(void)
806 {
807 if (!gic_supports_nmi())
808 return false;
809
810 return unlikely(gic_read_rpr() == GICV3_PRIO_NMI);
811 }
812
gic_irqnr_is_special(u32 irqnr)813 static bool gic_irqnr_is_special(u32 irqnr)
814 {
815 return irqnr >= 1020 && irqnr <= 1023;
816 }
817
__gic_handle_irq(u32 irqnr,struct pt_regs * regs)818 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
819 {
820 if (gic_irqnr_is_special(irqnr))
821 return;
822
823 gic_complete_ack(irqnr);
824
825 if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
826 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
827 gic_deactivate_unhandled(irqnr);
828 }
829 }
830
__gic_handle_nmi(u32 irqnr,struct pt_regs * regs)831 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
832 {
833 if (gic_irqnr_is_special(irqnr))
834 return;
835
836 gic_complete_ack(irqnr);
837
838 if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
839 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
840 gic_deactivate_unhandled(irqnr);
841 }
842 }
843
844 /*
845 * An exception has been taken from a context with IRQs enabled, and this could
846 * be an IRQ or an NMI.
847 *
848 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
849 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
850 * after handling any NMI but before handling any IRQ.
851 *
852 * The entry code has performed IRQ entry, and if an NMI is detected we must
853 * perform NMI entry/exit around invoking the handler.
854 */
__gic_handle_irq_from_irqson(struct pt_regs * regs)855 static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
856 {
857 bool is_nmi;
858 u32 irqnr;
859
860 irqnr = gic_read_iar();
861
862 is_nmi = gic_rpr_is_nmi_prio();
863
864 if (is_nmi) {
865 nmi_enter();
866 __gic_handle_nmi(irqnr, regs);
867 nmi_exit();
868 }
869
870 if (gic_prio_masking_enabled()) {
871 gic_pmr_mask_irqs();
872 gic_arch_enable_irqs();
873 }
874
875 if (!is_nmi)
876 __gic_handle_irq(irqnr, regs);
877 }
878
879 /*
880 * An exception has been taken from a context with IRQs disabled, which can only
881 * be an NMI.
882 *
883 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
884 * DAIF.IF (and ICC_PMR_EL1) unchanged.
885 *
886 * The entry code has performed NMI entry.
887 */
__gic_handle_irq_from_irqsoff(struct pt_regs * regs)888 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
889 {
890 u64 pmr;
891 u32 irqnr;
892
893 /*
894 * We were in a context with IRQs disabled. However, the
895 * entry code has set PMR to a value that allows any
896 * interrupt to be acknowledged, and not just NMIs. This can
897 * lead to surprising effects if the NMI has been retired in
898 * the meantime, and that there is an IRQ pending. The IRQ
899 * would then be taken in NMI context, something that nobody
900 * wants to debug twice.
901 *
902 * Until we sort this, drop PMR again to a level that will
903 * actually only allow NMIs before reading IAR, and then
904 * restore it to what it was.
905 */
906 pmr = gic_read_pmr();
907 gic_pmr_mask_irqs();
908 isb();
909 irqnr = gic_read_iar();
910 gic_write_pmr(pmr);
911
912 __gic_handle_nmi(irqnr, regs);
913 }
914
gic_handle_irq(struct pt_regs * regs)915 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
916 {
917 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
918 __gic_handle_irq_from_irqsoff(regs);
919 else
920 __gic_handle_irq_from_irqson(regs);
921 }
922
gic_dist_init(void)923 static void __init gic_dist_init(void)
924 {
925 unsigned int i;
926 u64 affinity;
927 void __iomem *base = gic_data.dist_base;
928 u32 val;
929
930 /* Disable the distributor */
931 writel_relaxed(0, base + GICD_CTLR);
932 gic_dist_wait_for_rwp();
933
934 /*
935 * Configure SPIs as non-secure Group-1. This will only matter
936 * if the GIC only has a single security state. This will not
937 * do the right thing if the kernel is running in secure mode,
938 * but that's not the intended use case anyway.
939 */
940 for (i = 32; i < GIC_LINE_NR; i += 32)
941 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
942
943 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
944 for (i = 0; i < GIC_ESPI_NR; i += 32) {
945 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
946 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
947 }
948
949 for (i = 0; i < GIC_ESPI_NR; i += 32)
950 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
951
952 for (i = 0; i < GIC_ESPI_NR; i += 16)
953 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
954
955 for (i = 0; i < GIC_ESPI_NR; i += 4)
956 writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq),
957 base + GICD_IPRIORITYRnE + i);
958
959 /* Now do the common stuff */
960 gic_dist_config(base, GIC_LINE_NR, dist_prio_irq);
961
962 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
963 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
964 pr_info("Enabling SGIs without active state\n");
965 val |= GICD_CTLR_nASSGIreq;
966 }
967
968 /* Enable distributor with ARE, Group1, and wait for it to drain */
969 writel_relaxed(val, base + GICD_CTLR);
970 gic_dist_wait_for_rwp();
971
972 /*
973 * Set all global interrupts to the boot CPU only. ARE must be
974 * enabled.
975 */
976 affinity = gic_cpu_to_affinity(smp_processor_id());
977 for (i = 32; i < GIC_LINE_NR; i++)
978 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
979
980 for (i = 0; i < GIC_ESPI_NR; i++)
981 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
982 }
983
gic_iterate_rdists(int (* fn)(struct redist_region *,void __iomem *))984 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
985 {
986 int ret = -ENODEV;
987 int i;
988
989 for (i = 0; i < gic_data.nr_redist_regions; i++) {
990 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
991 u64 typer;
992 u32 reg;
993
994 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
995 if (reg != GIC_PIDR2_ARCH_GICv3 &&
996 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
997 pr_warn("No redistributor present @%p\n", ptr);
998 break;
999 }
1000
1001 do {
1002 typer = gic_read_typer(ptr + GICR_TYPER);
1003 ret = fn(gic_data.redist_regions + i, ptr);
1004 if (!ret)
1005 return 0;
1006
1007 if (gic_data.redist_regions[i].single_redist)
1008 break;
1009
1010 if (gic_data.redist_stride) {
1011 ptr += gic_data.redist_stride;
1012 } else {
1013 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
1014 if (typer & GICR_TYPER_VLPIS)
1015 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
1016 }
1017 } while (!(typer & GICR_TYPER_LAST));
1018 }
1019
1020 return ret ? -ENODEV : 0;
1021 }
1022
__gic_populate_rdist(struct redist_region * region,void __iomem * ptr)1023 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
1024 {
1025 unsigned long mpidr;
1026 u64 typer;
1027 u32 aff;
1028
1029 /*
1030 * Convert affinity to a 32bit value that can be matched to
1031 * GICR_TYPER bits [63:32].
1032 */
1033 mpidr = gic_cpu_to_affinity(smp_processor_id());
1034
1035 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
1036 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
1037 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
1038 MPIDR_AFFINITY_LEVEL(mpidr, 0));
1039
1040 typer = gic_read_typer(ptr + GICR_TYPER);
1041 if ((typer >> 32) == aff) {
1042 u64 offset = ptr - region->redist_base;
1043 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
1044 gic_data_rdist_rd_base() = ptr;
1045 gic_data_rdist()->phys_base = region->phys_base + offset;
1046
1047 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
1048 smp_processor_id(), mpidr,
1049 (int)(region - gic_data.redist_regions),
1050 &gic_data_rdist()->phys_base);
1051 return 0;
1052 }
1053
1054 /* Try next one */
1055 return 1;
1056 }
1057
gic_populate_rdist(void)1058 static int gic_populate_rdist(void)
1059 {
1060 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
1061 return 0;
1062
1063 /* We couldn't even deal with ourselves... */
1064 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
1065 smp_processor_id(),
1066 (unsigned long)cpu_logical_map(smp_processor_id()));
1067 return -ENODEV;
1068 }
1069
__gic_update_rdist_properties(struct redist_region * region,void __iomem * ptr)1070 static int __gic_update_rdist_properties(struct redist_region *region,
1071 void __iomem *ptr)
1072 {
1073 u64 typer = gic_read_typer(ptr + GICR_TYPER);
1074 u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
1075
1076 /* Boot-time cleanup */
1077 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
1078 u64 val;
1079
1080 /* Deactivate any present vPE */
1081 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
1082 if (val & GICR_VPENDBASER_Valid)
1083 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
1084 ptr + SZ_128K + GICR_VPENDBASER);
1085
1086 /* Mark the VPE table as invalid */
1087 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
1088 val &= ~GICR_VPROPBASER_4_1_VALID;
1089 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
1090 }
1091
1092 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
1093
1094 /*
1095 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
1096 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
1097 * that the ITS driver can make use of for LPIs (and not VLPIs).
1098 *
1099 * These are 3 different ways to express the same thing, depending
1100 * on the revision of the architecture and its relaxations over
1101 * time. Just group them under the 'direct_lpi' banner.
1102 */
1103 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1104 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
1105 !!(ctlr & GICR_CTLR_IR) |
1106 gic_data.rdists.has_rvpeid);
1107 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
1108
1109 /* Detect non-sensical configurations */
1110 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1111 gic_data.rdists.has_direct_lpi = false;
1112 gic_data.rdists.has_vlpis = false;
1113 gic_data.rdists.has_rvpeid = false;
1114 }
1115
1116 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
1117
1118 return 1;
1119 }
1120
gic_update_rdist_properties(void)1121 static void gic_update_rdist_properties(void)
1122 {
1123 gic_data.ppi_nr = UINT_MAX;
1124 gic_iterate_rdists(__gic_update_rdist_properties);
1125 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1126 gic_data.ppi_nr = 0;
1127 pr_info("GICv3 features: %d PPIs%s%s\n",
1128 gic_data.ppi_nr,
1129 gic_data.has_rss ? ", RSS" : "",
1130 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1131
1132 if (gic_data.rdists.has_vlpis)
1133 pr_info("GICv4 features: %s%s%s\n",
1134 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1135 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1136 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
1137 }
1138
gic_cpu_sys_reg_enable(void)1139 static void gic_cpu_sys_reg_enable(void)
1140 {
1141 /*
1142 * Need to check that the SRE bit has actually been set. If
1143 * not, it means that SRE is disabled at EL2. We're going to
1144 * die painfully, and there is nothing we can do about it.
1145 *
1146 * Kindly inform the luser.
1147 */
1148 if (!gic_enable_sre())
1149 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1150
1151 }
1152
gic_cpu_sys_reg_init(void)1153 static void gic_cpu_sys_reg_init(void)
1154 {
1155 int i, cpu = smp_processor_id();
1156 u64 mpidr = gic_cpu_to_affinity(cpu);
1157 u64 need_rss = MPIDR_RS(mpidr);
1158 bool group0;
1159 u32 pribits;
1160
1161 pribits = gic_get_pribits();
1162
1163 group0 = gic_has_group0();
1164
1165 /* Set priority mask register */
1166 if (!gic_prio_masking_enabled()) {
1167 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1168 } else if (gic_supports_nmi()) {
1169 /*
1170 * Check that all CPUs use the same priority space.
1171 *
1172 * If there's a mismatch with the boot CPU, the system is
1173 * likely to die as interrupt masking will not work properly on
1174 * all CPUs.
1175 */
1176 WARN_ON(group0 != cpus_have_group0);
1177 WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled);
1178 }
1179
1180 /*
1181 * Some firmwares hand over to the kernel with the BPR changed from
1182 * its reset value (and with a value large enough to prevent
1183 * any pre-emptive interrupts from working at all). Writing a zero
1184 * to BPR restores is reset value.
1185 */
1186 gic_write_bpr1(0);
1187
1188 if (static_branch_likely(&supports_deactivate_key)) {
1189 /* EOI drops priority only (mode 1) */
1190 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1191 } else {
1192 /* EOI deactivates interrupt too (mode 0) */
1193 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1194 }
1195
1196 /* Always whack Group0 before Group1 */
1197 if (group0) {
1198 switch(pribits) {
1199 case 8:
1200 case 7:
1201 write_gicreg(0, ICC_AP0R3_EL1);
1202 write_gicreg(0, ICC_AP0R2_EL1);
1203 fallthrough;
1204 case 6:
1205 write_gicreg(0, ICC_AP0R1_EL1);
1206 fallthrough;
1207 case 5:
1208 case 4:
1209 write_gicreg(0, ICC_AP0R0_EL1);
1210 }
1211
1212 isb();
1213 }
1214
1215 switch(pribits) {
1216 case 8:
1217 case 7:
1218 write_gicreg(0, ICC_AP1R3_EL1);
1219 write_gicreg(0, ICC_AP1R2_EL1);
1220 fallthrough;
1221 case 6:
1222 write_gicreg(0, ICC_AP1R1_EL1);
1223 fallthrough;
1224 case 5:
1225 case 4:
1226 write_gicreg(0, ICC_AP1R0_EL1);
1227 }
1228
1229 isb();
1230
1231 /* ... and let's hit the road... */
1232 gic_write_grpen1(1);
1233
1234 /* Keep the RSS capability status in per_cpu variable */
1235 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1236
1237 /* Check all the CPUs have capable of sending SGIs to other CPUs */
1238 for_each_online_cpu(i) {
1239 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1240
1241 need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
1242 if (need_rss && (!have_rss))
1243 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1244 cpu, (unsigned long)mpidr,
1245 i, (unsigned long)gic_cpu_to_affinity(i));
1246 }
1247
1248 /**
1249 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1250 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1251 * UNPREDICTABLE choice of :
1252 * - The write is ignored.
1253 * - The RS field is treated as 0.
1254 */
1255 if (need_rss && (!gic_data.has_rss))
1256 pr_crit_once("RSS is required but GICD doesn't support it\n");
1257 }
1258
1259 static bool gicv3_nolpi;
1260
gicv3_nolpi_cfg(char * buf)1261 static int __init gicv3_nolpi_cfg(char *buf)
1262 {
1263 return kstrtobool(buf, &gicv3_nolpi);
1264 }
1265 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1266
gic_dist_supports_lpis(void)1267 static int gic_dist_supports_lpis(void)
1268 {
1269 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1270 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1271 !gicv3_nolpi);
1272 }
1273
gic_cpu_init(void)1274 static void gic_cpu_init(void)
1275 {
1276 void __iomem *rbase;
1277 int i;
1278
1279 /* Register ourselves with the rest of the world */
1280 if (gic_populate_rdist())
1281 return;
1282
1283 gic_enable_redist(true);
1284
1285 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1286 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1287 "Distributor has extended ranges, but CPU%d doesn't\n",
1288 smp_processor_id());
1289
1290 rbase = gic_data_rdist_sgi_base();
1291
1292 /* Configure SGIs/PPIs as non-secure Group-1 */
1293 for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32)
1294 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1295
1296 gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq);
1297 gic_redist_wait_for_rwp();
1298
1299 /* initialise system registers */
1300 gic_cpu_sys_reg_init();
1301 }
1302
1303 #ifdef CONFIG_SMP
1304
1305 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1306 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1307
1308 /*
1309 * gic_starting_cpu() is called after the last point where cpuhp is allowed
1310 * to fail. So pre check for problems earlier.
1311 */
gic_check_rdist(unsigned int cpu)1312 static int gic_check_rdist(unsigned int cpu)
1313 {
1314 if (cpumask_test_cpu(cpu, &broken_rdists))
1315 return -EINVAL;
1316
1317 return 0;
1318 }
1319
gic_starting_cpu(unsigned int cpu)1320 static int gic_starting_cpu(unsigned int cpu)
1321 {
1322 gic_cpu_sys_reg_enable();
1323 gic_cpu_init();
1324
1325 if (gic_dist_supports_lpis())
1326 its_cpu_init();
1327
1328 return 0;
1329 }
1330
gic_compute_target_list(int * base_cpu,const struct cpumask * mask,unsigned long cluster_id)1331 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1332 unsigned long cluster_id)
1333 {
1334 int next_cpu, cpu = *base_cpu;
1335 unsigned long mpidr;
1336 u16 tlist = 0;
1337
1338 mpidr = gic_cpu_to_affinity(cpu);
1339
1340 while (cpu < nr_cpu_ids) {
1341 tlist |= 1 << (mpidr & 0xf);
1342
1343 next_cpu = cpumask_next(cpu, mask);
1344 if (next_cpu >= nr_cpu_ids)
1345 goto out;
1346 cpu = next_cpu;
1347
1348 mpidr = gic_cpu_to_affinity(cpu);
1349
1350 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1351 cpu--;
1352 goto out;
1353 }
1354 }
1355 out:
1356 *base_cpu = cpu;
1357 return tlist;
1358 }
1359
1360 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1361 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1362 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1363
gic_send_sgi(u64 cluster_id,u16 tlist,unsigned int irq)1364 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1365 {
1366 u64 val;
1367
1368 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1369 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1370 irq << ICC_SGI1R_SGI_ID_SHIFT |
1371 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1372 MPIDR_TO_SGI_RS(cluster_id) |
1373 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1374
1375 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1376 gic_write_sgi1r(val);
1377 }
1378
gic_ipi_send_mask(struct irq_data * d,const struct cpumask * mask)1379 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1380 {
1381 int cpu;
1382
1383 if (WARN_ON(d->hwirq >= 16))
1384 return;
1385
1386 /*
1387 * Ensure that stores to Normal memory are visible to the
1388 * other CPUs before issuing the IPI.
1389 */
1390 dsb(ishst);
1391
1392 for_each_cpu(cpu, mask) {
1393 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
1394 u16 tlist;
1395
1396 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1397 gic_send_sgi(cluster_id, tlist, d->hwirq);
1398 }
1399
1400 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1401 isb();
1402 }
1403
gic_smp_init(void)1404 static void __init gic_smp_init(void)
1405 {
1406 struct irq_fwspec sgi_fwspec = {
1407 .fwnode = gic_data.fwnode,
1408 .param_count = 1,
1409 };
1410 int base_sgi;
1411
1412 cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
1413 "irqchip/arm/gicv3:checkrdist",
1414 gic_check_rdist, NULL);
1415
1416 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1417 "irqchip/arm/gicv3:starting",
1418 gic_starting_cpu, NULL);
1419
1420 /* Register all 8 non-secure SGIs */
1421 base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
1422 if (WARN_ON(base_sgi <= 0))
1423 return;
1424
1425 set_smp_ipi_range(base_sgi, 8);
1426 }
1427
gic_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)1428 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1429 bool force)
1430 {
1431 unsigned int cpu;
1432 u32 offset, index;
1433 void __iomem *reg;
1434 int enabled;
1435 u64 val;
1436
1437 if (force)
1438 cpu = cpumask_first(mask_val);
1439 else
1440 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1441
1442 if (cpu >= nr_cpu_ids)
1443 return -EINVAL;
1444
1445 if (gic_irq_in_rdist(d))
1446 return -EINVAL;
1447
1448 /* If interrupt was enabled, disable it first */
1449 enabled = gic_peek_irq(d, GICD_ISENABLER);
1450 if (enabled)
1451 gic_mask_irq(d);
1452
1453 offset = convert_offset_index(d, GICD_IROUTER, &index);
1454 reg = gic_dist_base(d) + offset + (index * 8);
1455 val = gic_cpu_to_affinity(cpu);
1456
1457 gic_write_irouter(val, reg);
1458
1459 /*
1460 * If the interrupt was enabled, enabled it again. Otherwise,
1461 * just wait for the distributor to have digested our changes.
1462 */
1463 if (enabled)
1464 gic_unmask_irq(d);
1465
1466 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1467
1468 return IRQ_SET_MASK_OK_DONE;
1469 }
1470 #else
1471 #define gic_set_affinity NULL
1472 #define gic_ipi_send_mask NULL
1473 #define gic_smp_init() do { } while(0)
1474 #endif
1475
gic_retrigger(struct irq_data * data)1476 static int gic_retrigger(struct irq_data *data)
1477 {
1478 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1479 }
1480
1481 #ifdef CONFIG_CPU_PM
gic_cpu_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)1482 static int gic_cpu_pm_notifier(struct notifier_block *self,
1483 unsigned long cmd, void *v)
1484 {
1485 if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) {
1486 if (gic_dist_security_disabled())
1487 gic_enable_redist(true);
1488 gic_cpu_sys_reg_enable();
1489 gic_cpu_sys_reg_init();
1490 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1491 gic_write_grpen1(0);
1492 gic_enable_redist(false);
1493 }
1494 return NOTIFY_OK;
1495 }
1496
1497 static struct notifier_block gic_cpu_pm_notifier_block = {
1498 .notifier_call = gic_cpu_pm_notifier,
1499 };
1500
gic_cpu_pm_init(void)1501 static void gic_cpu_pm_init(void)
1502 {
1503 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1504 }
1505
1506 #else
gic_cpu_pm_init(void)1507 static inline void gic_cpu_pm_init(void) { }
1508 #endif /* CONFIG_CPU_PM */
1509
1510 static struct irq_chip gic_chip = {
1511 .name = "GICv3",
1512 .irq_mask = gic_mask_irq,
1513 .irq_unmask = gic_unmask_irq,
1514 .irq_eoi = gic_eoi_irq,
1515 .irq_set_type = gic_set_type,
1516 .irq_set_affinity = gic_set_affinity,
1517 .irq_retrigger = gic_retrigger,
1518 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1519 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1520 .irq_nmi_setup = gic_irq_nmi_setup,
1521 .irq_nmi_teardown = gic_irq_nmi_teardown,
1522 .ipi_send_mask = gic_ipi_send_mask,
1523 .flags = IRQCHIP_SET_TYPE_MASKED |
1524 IRQCHIP_SKIP_SET_WAKE |
1525 IRQCHIP_MASK_ON_SUSPEND,
1526 };
1527
1528 static struct irq_chip gic_eoimode1_chip = {
1529 .name = "GICv3",
1530 .irq_mask = gic_eoimode1_mask_irq,
1531 .irq_unmask = gic_unmask_irq,
1532 .irq_eoi = gic_eoimode1_eoi_irq,
1533 .irq_set_type = gic_set_type,
1534 .irq_set_affinity = gic_set_affinity,
1535 .irq_retrigger = gic_retrigger,
1536 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1537 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1538 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1539 .irq_nmi_setup = gic_irq_nmi_setup,
1540 .irq_nmi_teardown = gic_irq_nmi_teardown,
1541 .ipi_send_mask = gic_ipi_send_mask,
1542 .flags = IRQCHIP_SET_TYPE_MASKED |
1543 IRQCHIP_SKIP_SET_WAKE |
1544 IRQCHIP_MASK_ON_SUSPEND,
1545 };
1546
gic_irq_domain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)1547 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1548 irq_hw_number_t hw)
1549 {
1550 struct irq_chip *chip = &gic_chip;
1551 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1552
1553 if (static_branch_likely(&supports_deactivate_key))
1554 chip = &gic_eoimode1_chip;
1555
1556 switch (__get_intid_range(hw)) {
1557 case SGI_RANGE:
1558 case PPI_RANGE:
1559 case EPPI_RANGE:
1560 irq_set_percpu_devid(irq);
1561 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1562 handle_percpu_devid_irq, NULL, NULL);
1563 break;
1564
1565 case SPI_RANGE:
1566 case ESPI_RANGE:
1567 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1568 handle_fasteoi_irq, NULL, NULL);
1569 irq_set_probe(irq);
1570 irqd_set_single_target(irqd);
1571 break;
1572
1573 case LPI_RANGE:
1574 if (!gic_dist_supports_lpis())
1575 return -EPERM;
1576 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1577 handle_fasteoi_irq, NULL, NULL);
1578 break;
1579
1580 default:
1581 return -EPERM;
1582 }
1583
1584 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1585 irqd_set_handle_enforce_irqctx(irqd);
1586 return 0;
1587 }
1588
gic_irq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)1589 static int gic_irq_domain_translate(struct irq_domain *d,
1590 struct irq_fwspec *fwspec,
1591 unsigned long *hwirq,
1592 unsigned int *type)
1593 {
1594 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1595 *hwirq = fwspec->param[0];
1596 *type = IRQ_TYPE_EDGE_RISING;
1597 return 0;
1598 }
1599
1600 if (is_of_node(fwspec->fwnode)) {
1601 if (fwspec->param_count < 3)
1602 return -EINVAL;
1603
1604 switch (fwspec->param[0]) {
1605 case 0: /* SPI */
1606 *hwirq = fwspec->param[1] + 32;
1607 break;
1608 case 1: /* PPI */
1609 *hwirq = fwspec->param[1] + 16;
1610 break;
1611 case 2: /* ESPI */
1612 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1613 break;
1614 case 3: /* EPPI */
1615 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1616 break;
1617 case GIC_IRQ_TYPE_LPI: /* LPI */
1618 *hwirq = fwspec->param[1];
1619 break;
1620 default:
1621 return -EINVAL;
1622 }
1623
1624 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1625
1626 /*
1627 * Make it clear that broken DTs are... broken.
1628 */
1629 WARN_ON(*type == IRQ_TYPE_NONE);
1630 return 0;
1631 }
1632
1633 if (is_fwnode_irqchip(fwspec->fwnode)) {
1634 if(fwspec->param_count != 2)
1635 return -EINVAL;
1636
1637 if (fwspec->param[0] < 16) {
1638 pr_err(FW_BUG "Illegal GSI%d translation request\n",
1639 fwspec->param[0]);
1640 return -EINVAL;
1641 }
1642
1643 *hwirq = fwspec->param[0];
1644 *type = fwspec->param[1];
1645
1646 WARN_ON(*type == IRQ_TYPE_NONE);
1647 return 0;
1648 }
1649
1650 return -EINVAL;
1651 }
1652
gic_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)1653 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1654 unsigned int nr_irqs, void *arg)
1655 {
1656 int i, ret;
1657 irq_hw_number_t hwirq;
1658 unsigned int type = IRQ_TYPE_NONE;
1659 struct irq_fwspec *fwspec = arg;
1660
1661 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1662 if (ret)
1663 return ret;
1664
1665 for (i = 0; i < nr_irqs; i++) {
1666 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1667 if (ret)
1668 return ret;
1669 }
1670
1671 return 0;
1672 }
1673
gic_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1674 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1675 unsigned int nr_irqs)
1676 {
1677 int i;
1678
1679 for (i = 0; i < nr_irqs; i++) {
1680 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1681 irq_set_handler(virq + i, NULL);
1682 irq_domain_reset_irq_data(d);
1683 }
1684 }
1685
gic_irq_domain_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)1686 static int gic_irq_domain_select(struct irq_domain *d,
1687 struct irq_fwspec *fwspec,
1688 enum irq_domain_bus_token bus_token)
1689 {
1690 irq_hw_number_t hwirq;
1691 unsigned int type;
1692 int ret;
1693
1694 /* Not for us */
1695 if (fwspec->fwnode != d->fwnode)
1696 return 0;
1697
1698 /* Handle pure domain searches */
1699 if (!fwspec->param_count)
1700 return d->bus_token == bus_token;
1701
1702 /* If this is not DT, then we have a single domain */
1703 if (!is_of_node(fwspec->fwnode))
1704 return 1;
1705
1706 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1707 if (WARN_ON_ONCE(ret))
1708 return 0;
1709
1710 return d == gic_data.domain;
1711 }
1712
gic_irq_get_fwspec_info(struct irq_fwspec * fwspec,struct irq_fwspec_info * info)1713 static int gic_irq_get_fwspec_info(struct irq_fwspec *fwspec, struct irq_fwspec_info *info)
1714 {
1715 const struct cpumask *mask = NULL;
1716
1717 info->flags = 0;
1718 info->affinity = NULL;
1719
1720 /* ACPI is not capable of describing PPI affinity -- yet */
1721 if (!is_of_node(fwspec->fwnode))
1722 return 0;
1723
1724 /* If the specifier provides an affinity, use it */
1725 if (fwspec->param_count == 4 && fwspec->param[3]) {
1726 struct fwnode_handle *fw;
1727
1728 switch (fwspec->param[0]) {
1729 case 1: /* PPI */
1730 case 3: /* EPPI */
1731 break;
1732 default:
1733 return 0;
1734 }
1735
1736 fw = of_fwnode_handle(of_find_node_by_phandle(fwspec->param[3]));
1737 if (!fw)
1738 return -ENOENT;
1739
1740 for (int i = 0; i < gic_data.nr_parts; i++) {
1741 if (gic_data.parts[i].partition_id == fw) {
1742 mask = &gic_data.parts[i].mask;
1743 break;
1744 }
1745 }
1746
1747 if (!mask)
1748 return -ENOENT;
1749 } else {
1750 mask = cpu_possible_mask;
1751 }
1752
1753 info->affinity = mask;
1754 info->flags = IRQ_FWSPEC_INFO_AFFINITY_VALID;
1755
1756 return 0;
1757 }
1758
1759 static const struct irq_domain_ops gic_irq_domain_ops = {
1760 .translate = gic_irq_domain_translate,
1761 .alloc = gic_irq_domain_alloc,
1762 .free = gic_irq_domain_free,
1763 .select = gic_irq_domain_select,
1764 .get_fwspec_info = gic_irq_get_fwspec_info,
1765 };
1766
gic_enable_quirk_msm8996(void * data)1767 static bool gic_enable_quirk_msm8996(void *data)
1768 {
1769 struct gic_chip_data *d = data;
1770
1771 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1772
1773 return true;
1774 }
1775
gic_enable_quirk_cavium_38539(void * data)1776 static bool gic_enable_quirk_cavium_38539(void *data)
1777 {
1778 struct gic_chip_data *d = data;
1779
1780 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1781
1782 return true;
1783 }
1784
gic_enable_quirk_hip06_07(void * data)1785 static bool gic_enable_quirk_hip06_07(void *data)
1786 {
1787 struct gic_chip_data *d = data;
1788
1789 /*
1790 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1791 * not being an actual ARM implementation). The saving grace is
1792 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1793 * HIP07 doesn't even have a proper IIDR, and still pretends to
1794 * have ESPI. In both cases, put them right.
1795 */
1796 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1797 /* Zero both ESPI and the RES0 field next to it... */
1798 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1799 return true;
1800 }
1801
1802 return false;
1803 }
1804
1805 #define T241_CHIPN_MASK GENMASK_ULL(45, 44)
1806 #define T241_CHIP_GICDA_OFFSET 0x1580000
1807 #define SMCCC_SOC_ID_T241 0x036b0241
1808
gic_enable_quirk_nvidia_t241(void * data)1809 static bool gic_enable_quirk_nvidia_t241(void *data)
1810 {
1811 s32 soc_id = arm_smccc_get_soc_id_version();
1812 unsigned long chip_bmask = 0;
1813 phys_addr_t phys;
1814 u32 i;
1815
1816 /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
1817 if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241))
1818 return false;
1819
1820 /* Find the chips based on GICR regions PHYS addr */
1821 for (i = 0; i < gic_data.nr_redist_regions; i++) {
1822 chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
1823 (u64)gic_data.redist_regions[i].phys_base));
1824 }
1825
1826 if (hweight32(chip_bmask) < 3)
1827 return false;
1828
1829 /* Setup GICD alias regions */
1830 for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) {
1831 if (chip_bmask & BIT(i)) {
1832 phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
1833 phys |= FIELD_PREP(T241_CHIPN_MASK, i);
1834 t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
1835 WARN_ON_ONCE(!t241_dist_base_alias[i]);
1836 }
1837 }
1838 static_branch_enable(&gic_nvidia_t241_erratum);
1839 return true;
1840 }
1841
gic_enable_quirk_asr8601(void * data)1842 static bool gic_enable_quirk_asr8601(void *data)
1843 {
1844 struct gic_chip_data *d = data;
1845
1846 d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
1847
1848 return true;
1849 }
1850
gic_enable_quirk_arm64_2941627(void * data)1851 static bool gic_enable_quirk_arm64_2941627(void *data)
1852 {
1853 static_branch_enable(&gic_arm64_2941627_erratum);
1854 return true;
1855 }
1856
gic_enable_quirk_rk3399(void * data)1857 static bool gic_enable_quirk_rk3399(void *data)
1858 {
1859 struct gic_chip_data *d = data;
1860
1861 if (of_machine_is_compatible("rockchip,rk3399")) {
1862 d->flags |= FLAGS_WORKAROUND_INSECURE;
1863 return true;
1864 }
1865
1866 return false;
1867 }
1868
rd_set_non_coherent(void * data)1869 static bool rd_set_non_coherent(void *data)
1870 {
1871 struct gic_chip_data *d = data;
1872
1873 d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
1874 return true;
1875 }
1876
1877 static const struct gic_quirk gic_quirks[] = {
1878 {
1879 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1880 .compatible = "qcom,msm8996-gic-v3",
1881 .init = gic_enable_quirk_msm8996,
1882 },
1883 {
1884 .desc = "GICv3: ASR erratum 8601001",
1885 .compatible = "asr,asr8601-gic-v3",
1886 .init = gic_enable_quirk_asr8601,
1887 },
1888 {
1889 .desc = "GICv3: HIP06 erratum 161010803",
1890 .iidr = 0x0204043b,
1891 .mask = 0xffffffff,
1892 .init = gic_enable_quirk_hip06_07,
1893 },
1894 {
1895 .desc = "GICv3: HIP07 erratum 161010803",
1896 .iidr = 0x00000000,
1897 .mask = 0xffffffff,
1898 .init = gic_enable_quirk_hip06_07,
1899 },
1900 {
1901 /*
1902 * Reserved register accesses generate a Synchronous
1903 * External Abort. This erratum applies to:
1904 * - ThunderX: CN88xx
1905 * - OCTEON TX: CN83xx, CN81xx
1906 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1907 */
1908 .desc = "GICv3: Cavium erratum 38539",
1909 .iidr = 0xa000034c,
1910 .mask = 0xe8f00fff,
1911 .init = gic_enable_quirk_cavium_38539,
1912 },
1913 {
1914 .desc = "GICv3: NVIDIA erratum T241-FABRIC-4",
1915 .iidr = 0x0402043b,
1916 .mask = 0xffffffff,
1917 .init = gic_enable_quirk_nvidia_t241,
1918 },
1919 {
1920 /*
1921 * GIC-700: 2941627 workaround - IP variant [0,1]
1922 *
1923 */
1924 .desc = "GICv3: ARM64 erratum 2941627",
1925 .iidr = 0x0400043b,
1926 .mask = 0xff0e0fff,
1927 .init = gic_enable_quirk_arm64_2941627,
1928 },
1929 {
1930 /*
1931 * GIC-700: 2941627 workaround - IP variant [2]
1932 */
1933 .desc = "GICv3: ARM64 erratum 2941627",
1934 .iidr = 0x0402043b,
1935 .mask = 0xff0f0fff,
1936 .init = gic_enable_quirk_arm64_2941627,
1937 },
1938 {
1939 .desc = "GICv3: non-coherent attribute",
1940 .property = "dma-noncoherent",
1941 .init = rd_set_non_coherent,
1942 },
1943 {
1944 .desc = "GICv3: Insecure RK3399 integration",
1945 .iidr = 0x0000043b,
1946 .mask = 0xff000fff,
1947 .init = gic_enable_quirk_rk3399,
1948 },
1949 {
1950 }
1951 };
1952
gic_enable_nmi_support(void)1953 static void gic_enable_nmi_support(void)
1954 {
1955 if (!gic_prio_masking_enabled() || nmi_support_forbidden)
1956 return;
1957
1958 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1959 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
1960
1961 static_branch_enable(&supports_pseudo_nmis);
1962
1963 if (static_branch_likely(&supports_deactivate_key))
1964 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1965 else
1966 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
1967 }
1968
gic_init_bases(phys_addr_t dist_phys_base,void __iomem * dist_base,struct redist_region * rdist_regs,u32 nr_redist_regions,u64 redist_stride,struct fwnode_handle * handle)1969 static int __init gic_init_bases(phys_addr_t dist_phys_base,
1970 void __iomem *dist_base,
1971 struct redist_region *rdist_regs,
1972 u32 nr_redist_regions,
1973 u64 redist_stride,
1974 struct fwnode_handle *handle)
1975 {
1976 u32 typer;
1977 int err;
1978
1979 if (!is_hyp_mode_available())
1980 static_branch_disable(&supports_deactivate_key);
1981
1982 if (static_branch_likely(&supports_deactivate_key))
1983 pr_info("GIC: Using split EOI/Deactivate mode\n");
1984
1985 gic_data.fwnode = handle;
1986 gic_data.dist_phys_base = dist_phys_base;
1987 gic_data.dist_base = dist_base;
1988 gic_data.redist_regions = rdist_regs;
1989 gic_data.nr_redist_regions = nr_redist_regions;
1990 gic_data.redist_stride = redist_stride;
1991
1992 /*
1993 * Find out how many interrupts are supported.
1994 */
1995 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
1996 gic_data.rdists.gicd_typer = typer;
1997
1998 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
1999 gic_quirks, &gic_data);
2000
2001 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
2002 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
2003
2004 /*
2005 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
2006 * architecture spec (which says that reserved registers are RES0).
2007 */
2008 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
2009 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
2010
2011 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
2012 &gic_data);
2013 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
2014 if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) {
2015 /* Disable GICv4.x features for the erratum T241-FABRIC-4 */
2016 gic_data.rdists.has_rvpeid = true;
2017 gic_data.rdists.has_vlpis = true;
2018 gic_data.rdists.has_direct_lpi = true;
2019 gic_data.rdists.has_vpend_valid_dirty = true;
2020 }
2021
2022 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
2023 err = -ENOMEM;
2024 goto out_free;
2025 }
2026
2027 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
2028
2029 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
2030
2031 if (typer & GICD_TYPER_MBIS) {
2032 err = mbi_init(handle, gic_data.domain);
2033 if (err)
2034 pr_err("Failed to initialize MBIs\n");
2035 }
2036
2037 set_handle_irq(gic_handle_irq);
2038
2039 gic_update_rdist_properties();
2040
2041 gic_cpu_sys_reg_enable();
2042 gic_prio_init();
2043 gic_dist_init();
2044 gic_cpu_init();
2045 gic_enable_nmi_support();
2046 gic_smp_init();
2047 gic_cpu_pm_init();
2048
2049 if (gic_dist_supports_lpis()) {
2050 its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq);
2051 its_cpu_init();
2052 its_lpi_memreserve_init();
2053 } else {
2054 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
2055 gicv2m_init(handle, gic_data.domain);
2056 }
2057
2058 return 0;
2059
2060 out_free:
2061 if (gic_data.domain)
2062 irq_domain_remove(gic_data.domain);
2063 free_percpu(gic_data.rdists.rdist);
2064 return err;
2065 }
2066
gic_validate_dist_version(void __iomem * dist_base)2067 static int __init gic_validate_dist_version(void __iomem *dist_base)
2068 {
2069 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2070
2071 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
2072 return -ENODEV;
2073
2074 return 0;
2075 }
2076
2077 /* Create all possible partitions at boot time */
gic_populate_ppi_partitions(struct device_node * gic_node)2078 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2079 {
2080 struct device_node *parts_node, *child_part;
2081 int part_idx = 0, i;
2082 int nr_parts;
2083 struct partition_affinity *parts;
2084
2085 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
2086 if (!parts_node)
2087 return;
2088
2089 nr_parts = of_get_child_count(parts_node);
2090 if (!nr_parts)
2091 goto out_put_node;
2092
2093 parts = kzalloc_objs(*parts, nr_parts);
2094 if (WARN_ON(!parts))
2095 goto out_put_node;
2096
2097 for_each_child_of_node(parts_node, child_part) {
2098 struct partition_affinity *part;
2099 int n;
2100
2101 part = &parts[part_idx];
2102
2103 part->partition_id = of_fwnode_handle(child_part);
2104
2105 pr_info("GIC: PPI partition %pOFn[%d] { ",
2106 child_part, part_idx);
2107
2108 n = of_property_count_elems_of_size(child_part, "affinity",
2109 sizeof(u32));
2110 WARN_ON(n <= 0);
2111
2112 for (i = 0; i < n; i++) {
2113 int err, cpu;
2114 u32 cpu_phandle;
2115 struct device_node *cpu_node;
2116
2117 err = of_property_read_u32_index(child_part, "affinity",
2118 i, &cpu_phandle);
2119 if (WARN_ON(err))
2120 continue;
2121
2122 cpu_node = of_find_node_by_phandle(cpu_phandle);
2123 if (WARN_ON(!cpu_node))
2124 continue;
2125
2126 cpu = of_cpu_node_to_id(cpu_node);
2127 if (WARN_ON(cpu < 0)) {
2128 of_node_put(cpu_node);
2129 continue;
2130 }
2131
2132 pr_cont("%pOF[%d] ", cpu_node, cpu);
2133
2134 cpumask_set_cpu(cpu, &part->mask);
2135 of_node_put(cpu_node);
2136 }
2137
2138 pr_cont("}\n");
2139 part_idx++;
2140 }
2141
2142 gic_data.parts = parts;
2143 gic_data.nr_parts = nr_parts;
2144
2145 out_put_node:
2146 of_node_put(parts_node);
2147 }
2148
gic_of_setup_kvm_info(struct device_node * node,u32 nr_redist_regions)2149 static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions)
2150 {
2151 int ret;
2152 struct resource r;
2153
2154 gic_v3_kvm_info.type = GIC_V3;
2155
2156 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2157 if (!gic_v3_kvm_info.maint_irq)
2158 return;
2159
2160 /* Also skip GICD, GICC, GICH */
2161 ret = of_address_to_resource(node, nr_redist_regions + 3, &r);
2162 if (!ret)
2163 gic_v3_kvm_info.vcpu = r;
2164
2165 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2166 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2167 vgic_set_kvm_info(&gic_v3_kvm_info);
2168 }
2169
gic_request_region(resource_size_t base,resource_size_t size,const char * name)2170 static void gic_request_region(resource_size_t base, resource_size_t size,
2171 const char *name)
2172 {
2173 if (!request_mem_region(base, size, name))
2174 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2175 name, &base);
2176 }
2177
gic_of_iomap(struct device_node * node,int idx,const char * name,struct resource * res)2178 static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2179 const char *name, struct resource *res)
2180 {
2181 void __iomem *base;
2182 int ret;
2183
2184 ret = of_address_to_resource(node, idx, res);
2185 if (ret)
2186 return IOMEM_ERR_PTR(ret);
2187
2188 gic_request_region(res->start, resource_size(res), name);
2189 base = of_iomap(node, idx);
2190
2191 return base ?: IOMEM_ERR_PTR(-ENOMEM);
2192 }
2193
gic_of_init(struct device_node * node,struct device_node * parent)2194 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2195 {
2196 phys_addr_t dist_phys_base;
2197 void __iomem *dist_base;
2198 struct redist_region *rdist_regs;
2199 struct resource res;
2200 u64 redist_stride;
2201 u32 nr_redist_regions;
2202 int err, i;
2203
2204 dist_base = gic_of_iomap(node, 0, "GICD", &res);
2205 if (IS_ERR(dist_base)) {
2206 pr_err("%pOF: unable to map gic dist registers\n", node);
2207 return PTR_ERR(dist_base);
2208 }
2209
2210 dist_phys_base = res.start;
2211
2212 err = gic_validate_dist_version(dist_base);
2213 if (err) {
2214 pr_err("%pOF: no distributor detected, giving up\n", node);
2215 goto out_unmap_dist;
2216 }
2217
2218 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2219 nr_redist_regions = 1;
2220
2221 rdist_regs = kzalloc_objs(*rdist_regs, nr_redist_regions);
2222 if (!rdist_regs) {
2223 err = -ENOMEM;
2224 goto out_unmap_dist;
2225 }
2226
2227 for (i = 0; i < nr_redist_regions; i++) {
2228 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2229 if (IS_ERR(rdist_regs[i].redist_base)) {
2230 pr_err("%pOF: couldn't map region %d\n", node, i);
2231 err = -ENODEV;
2232 goto out_unmap_rdist;
2233 }
2234 rdist_regs[i].phys_base = res.start;
2235 }
2236
2237 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2238 redist_stride = 0;
2239
2240 gic_enable_of_quirks(node, gic_quirks, &gic_data);
2241
2242 err = gic_init_bases(dist_phys_base, dist_base, rdist_regs,
2243 nr_redist_regions, redist_stride, &node->fwnode);
2244 if (err)
2245 goto out_unmap_rdist;
2246
2247 gic_populate_ppi_partitions(node);
2248
2249 if (static_branch_likely(&supports_deactivate_key))
2250 gic_of_setup_kvm_info(node, nr_redist_regions);
2251 return 0;
2252
2253 out_unmap_rdist:
2254 for (i = 0; i < nr_redist_regions; i++)
2255 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
2256 iounmap(rdist_regs[i].redist_base);
2257 kfree(rdist_regs);
2258 out_unmap_dist:
2259 iounmap(dist_base);
2260 return err;
2261 }
2262
2263 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2264
2265 #ifdef CONFIG_ACPI
2266 static struct
2267 {
2268 void __iomem *dist_base;
2269 struct redist_region *redist_regs;
2270 u32 nr_redist_regions;
2271 bool single_redist;
2272 int enabled_rdists;
2273 u32 maint_irq;
2274 int maint_irq_mode;
2275 phys_addr_t vcpu_base;
2276 } acpi_data __initdata;
2277
2278 static void __init
gic_acpi_register_redist(phys_addr_t phys_base,void __iomem * redist_base)2279 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2280 {
2281 static int count = 0;
2282
2283 acpi_data.redist_regs[count].phys_base = phys_base;
2284 acpi_data.redist_regs[count].redist_base = redist_base;
2285 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2286 count++;
2287 }
2288
2289 static int __init
gic_acpi_parse_madt_redist(union acpi_subtable_headers * header,const unsigned long end)2290 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2291 const unsigned long end)
2292 {
2293 struct acpi_madt_generic_redistributor *redist =
2294 (struct acpi_madt_generic_redistributor *)header;
2295 void __iomem *redist_base;
2296
2297 redist_base = ioremap(redist->base_address, redist->length);
2298 if (!redist_base) {
2299 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2300 return -ENOMEM;
2301 }
2302
2303 if (acpi_get_madt_revision() >= 7 &&
2304 (redist->flags & ACPI_MADT_GICR_NON_COHERENT))
2305 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
2306
2307 gic_request_region(redist->base_address, redist->length, "GICR");
2308
2309 gic_acpi_register_redist(redist->base_address, redist_base);
2310 return 0;
2311 }
2312
2313 static int __init
gic_acpi_parse_madt_gicc(union acpi_subtable_headers * header,const unsigned long end)2314 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2315 const unsigned long end)
2316 {
2317 struct acpi_madt_generic_interrupt *gicc =
2318 (struct acpi_madt_generic_interrupt *)header;
2319 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2320 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2321 void __iomem *redist_base;
2322
2323 /* Neither enabled or online capable means it doesn't exist, skip it */
2324 if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
2325 return 0;
2326
2327 /*
2328 * Capable but disabled CPUs can be brought online later. What about
2329 * the redistributor? ACPI doesn't want to say!
2330 * Virtual hotplug systems can use the MADT's "always-on" GICR entries.
2331 * Otherwise, prevent such CPUs from being brought online.
2332 */
2333 if (!(gicc->flags & ACPI_MADT_ENABLED)) {
2334 int cpu = get_cpu_for_acpi_id(gicc->uid);
2335
2336 pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu);
2337 if (cpu >= 0)
2338 cpumask_set_cpu(cpu, &broken_rdists);
2339 return 0;
2340 }
2341
2342 redist_base = ioremap(gicc->gicr_base_address, size);
2343 if (!redist_base)
2344 return -ENOMEM;
2345 gic_request_region(gicc->gicr_base_address, size, "GICR");
2346
2347 if (acpi_get_madt_revision() >= 7 &&
2348 (gicc->flags & ACPI_MADT_GICC_NON_COHERENT))
2349 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
2350
2351 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2352 return 0;
2353 }
2354
gic_acpi_collect_gicr_base(void)2355 static int __init gic_acpi_collect_gicr_base(void)
2356 {
2357 acpi_tbl_entry_handler redist_parser;
2358 enum acpi_madt_type type;
2359
2360 if (acpi_data.single_redist) {
2361 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2362 redist_parser = gic_acpi_parse_madt_gicc;
2363 } else {
2364 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2365 redist_parser = gic_acpi_parse_madt_redist;
2366 }
2367
2368 /* Collect redistributor base addresses in GICR entries */
2369 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2370 return 0;
2371
2372 pr_info("No valid GICR entries exist\n");
2373 return -ENODEV;
2374 }
2375
gic_acpi_match_gicr(union acpi_subtable_headers * header,const unsigned long end)2376 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2377 const unsigned long end)
2378 {
2379 /* Subtable presence means that redist exists, that's it */
2380 return 0;
2381 }
2382
gic_acpi_match_gicc(union acpi_subtable_headers * header,const unsigned long end)2383 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2384 const unsigned long end)
2385 {
2386 struct acpi_madt_generic_interrupt *gicc =
2387 (struct acpi_madt_generic_interrupt *)header;
2388
2389 /*
2390 * If GICC is enabled and has valid gicr base address, then it means
2391 * GICR base is presented via GICC. The redistributor is only known to
2392 * be accessible if the GICC is marked as enabled. If this bit is not
2393 * set, we'd need to add the redistributor at runtime, which isn't
2394 * supported.
2395 */
2396 if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address)
2397 acpi_data.enabled_rdists++;
2398
2399 return 0;
2400 }
2401
gic_acpi_count_gicr_regions(void)2402 static int __init gic_acpi_count_gicr_regions(void)
2403 {
2404 int count;
2405
2406 /*
2407 * Count how many redistributor regions we have. It is not allowed
2408 * to mix redistributor description, GICR and GICC subtables have to be
2409 * mutually exclusive.
2410 */
2411 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2412 gic_acpi_match_gicr, 0);
2413 if (count > 0) {
2414 acpi_data.single_redist = false;
2415 return count;
2416 }
2417
2418 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2419 gic_acpi_match_gicc, 0);
2420 if (count > 0) {
2421 acpi_data.single_redist = true;
2422 count = acpi_data.enabled_rdists;
2423 }
2424
2425 return count;
2426 }
2427
acpi_validate_gic_table(struct acpi_subtable_header * header,struct acpi_probe_entry * ape)2428 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2429 struct acpi_probe_entry *ape)
2430 {
2431 struct acpi_madt_generic_distributor *dist;
2432 int count;
2433
2434 dist = (struct acpi_madt_generic_distributor *)header;
2435 if (dist->version != ape->driver_data)
2436 return false;
2437
2438 /* We need to do that exercise anyway, the sooner the better */
2439 count = gic_acpi_count_gicr_regions();
2440 if (count <= 0)
2441 return false;
2442
2443 acpi_data.nr_redist_regions = count;
2444 return true;
2445 }
2446
gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers * header,const unsigned long end)2447 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2448 const unsigned long end)
2449 {
2450 struct acpi_madt_generic_interrupt *gicc =
2451 (struct acpi_madt_generic_interrupt *)header;
2452 int maint_irq_mode;
2453 static int first_madt = true;
2454
2455 if (!(gicc->flags &
2456 (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
2457 return 0;
2458
2459 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2460 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2461
2462 if (first_madt) {
2463 first_madt = false;
2464
2465 acpi_data.maint_irq = gicc->vgic_interrupt;
2466 acpi_data.maint_irq_mode = maint_irq_mode;
2467 acpi_data.vcpu_base = gicc->gicv_base_address;
2468
2469 return 0;
2470 }
2471
2472 /*
2473 * The maintenance interrupt and GICV should be the same for every CPU
2474 */
2475 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2476 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2477 (acpi_data.vcpu_base != gicc->gicv_base_address))
2478 return -EINVAL;
2479
2480 return 0;
2481 }
2482
gic_acpi_collect_virt_info(void)2483 static bool __init gic_acpi_collect_virt_info(void)
2484 {
2485 int count;
2486
2487 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2488 gic_acpi_parse_virt_madt_gicc, 0);
2489
2490 return (count > 0);
2491 }
2492
2493 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2494 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2495 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2496
gic_acpi_setup_kvm_info(void)2497 static void __init gic_acpi_setup_kvm_info(void)
2498 {
2499 int irq;
2500
2501 if (!gic_acpi_collect_virt_info()) {
2502 pr_warn("Unable to get hardware information used for virtualization\n");
2503 return;
2504 }
2505
2506 gic_v3_kvm_info.type = GIC_V3;
2507
2508 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2509 acpi_data.maint_irq_mode,
2510 ACPI_ACTIVE_HIGH);
2511 if (irq <= 0)
2512 return;
2513
2514 gic_v3_kvm_info.maint_irq = irq;
2515
2516 if (acpi_data.vcpu_base) {
2517 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2518
2519 vcpu->flags = IORESOURCE_MEM;
2520 vcpu->start = acpi_data.vcpu_base;
2521 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2522 }
2523
2524 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2525 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2526 vgic_set_kvm_info(&gic_v3_kvm_info);
2527 }
2528
2529 static struct fwnode_handle *gsi_domain_handle;
2530
gic_v3_get_gsi_domain_id(u32 gsi)2531 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2532 {
2533 return gsi_domain_handle;
2534 }
2535
2536 static int __init
gic_acpi_init(union acpi_subtable_headers * header,const unsigned long end)2537 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2538 {
2539 struct acpi_madt_generic_distributor *dist;
2540 size_t size;
2541 int i, err;
2542
2543 /* Get distributor base address */
2544 dist = (struct acpi_madt_generic_distributor *)header;
2545 acpi_data.dist_base = ioremap(dist->base_address,
2546 ACPI_GICV3_DIST_MEM_SIZE);
2547 if (!acpi_data.dist_base) {
2548 pr_err("Unable to map GICD registers\n");
2549 return -ENOMEM;
2550 }
2551 gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
2552
2553 err = gic_validate_dist_version(acpi_data.dist_base);
2554 if (err) {
2555 pr_err("No distributor detected at @%p, giving up\n",
2556 acpi_data.dist_base);
2557 goto out_dist_unmap;
2558 }
2559
2560 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2561 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2562 if (!acpi_data.redist_regs) {
2563 err = -ENOMEM;
2564 goto out_dist_unmap;
2565 }
2566
2567 err = gic_acpi_collect_gicr_base();
2568 if (err)
2569 goto out_redist_unmap;
2570
2571 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2572 if (!gsi_domain_handle) {
2573 err = -ENOMEM;
2574 goto out_redist_unmap;
2575 }
2576
2577 err = gic_init_bases(dist->base_address, acpi_data.dist_base,
2578 acpi_data.redist_regs, acpi_data.nr_redist_regions,
2579 0, gsi_domain_handle);
2580 if (err)
2581 goto out_fwhandle_free;
2582
2583 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
2584
2585 if (static_branch_likely(&supports_deactivate_key))
2586 gic_acpi_setup_kvm_info();
2587
2588 return 0;
2589
2590 out_fwhandle_free:
2591 irq_domain_free_fwnode(gsi_domain_handle);
2592 out_redist_unmap:
2593 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2594 if (acpi_data.redist_regs[i].redist_base)
2595 iounmap(acpi_data.redist_regs[i].redist_base);
2596 kfree(acpi_data.redist_regs);
2597 out_dist_unmap:
2598 iounmap(acpi_data.dist_base);
2599 return err;
2600 }
2601 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2602 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2603 gic_acpi_init);
2604 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2605 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2606 gic_acpi_init);
2607 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2608 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2609 gic_acpi_init);
2610 #endif
2611