1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7 #define pr_fmt(fmt) "GICv3: " fmt
8
9 #include <linux/acpi.h>
10 #include <linux/cpu.h>
11 #include <linux/cpu_pm.h>
12 #include <linux/delay.h>
13 #include <linux/interrupt.h>
14 #include <linux/irqdomain.h>
15 #include <linux/kernel.h>
16 #include <linux/kstrtox.h>
17 #include <linux/of.h>
18 #include <linux/of_address.h>
19 #include <linux/of_irq.h>
20 #include <linux/percpu.h>
21 #include <linux/refcount.h>
22 #include <linux/slab.h>
23 #include <linux/iopoll.h>
24
25 #include <linux/irqchip.h>
26 #include <linux/irqchip/arm-gic-common.h>
27 #include <linux/irqchip/arm-gic-v3.h>
28 #include <linux/irqchip/arm-gic-v3-prio.h>
29 #include <linux/irqchip/irq-partition-percpu.h>
30 #include <linux/bitfield.h>
31 #include <linux/bits.h>
32 #include <linux/arm-smccc.h>
33
34 #include <asm/cputype.h>
35 #include <asm/exception.h>
36 #include <asm/smp_plat.h>
37 #include <asm/virt.h>
38
39 #include "irq-gic-common.h"
40
41 static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ;
42 static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI;
43
44 #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0)
45 #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1)
46 #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2)
47
48 #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1)
49
50 static struct cpumask broken_rdists __read_mostly __maybe_unused;
51
52 struct redist_region {
53 void __iomem *redist_base;
54 phys_addr_t phys_base;
55 bool single_redist;
56 };
57
58 struct gic_chip_data {
59 struct fwnode_handle *fwnode;
60 phys_addr_t dist_phys_base;
61 void __iomem *dist_base;
62 struct redist_region *redist_regions;
63 struct rdists rdists;
64 struct irq_domain *domain;
65 u64 redist_stride;
66 u32 nr_redist_regions;
67 u64 flags;
68 bool has_rss;
69 unsigned int ppi_nr;
70 struct partition_desc **ppi_descs;
71 };
72
73 #define T241_CHIPS_MAX 4
74 static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly;
75 static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum);
76
77 static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum);
78
79 static struct gic_chip_data gic_data __read_mostly;
80 static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key);
81
82 #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer))
83 #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U)
84 #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer)
85
86 /*
87 * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs
88 * are potentially stolen by the secure side. Some code, especially code dealing
89 * with hwirq IDs, is simplified by accounting for all 16.
90 */
91 #define SGI_NR 16
92
93 /*
94 * The behaviours of RPR and PMR registers differ depending on the value of
95 * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the
96 * distributor and redistributors depends on whether security is enabled in the
97 * GIC.
98 *
99 * When security is enabled, non-secure priority values from the (re)distributor
100 * are presented to the GIC CPUIF as follow:
101 * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80;
102 *
103 * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure
104 * EL1 are subject to a similar operation thus matching the priorities presented
105 * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0,
106 * these values are unchanged by the GIC.
107 *
108 * see GICv3/GICv4 Architecture Specification (IHI0069D):
109 * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt
110 * priorities.
111 * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1
112 * interrupt.
113 */
114 static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis);
115
gic_get_pribits(void)116 static u32 gic_get_pribits(void)
117 {
118 u32 pribits;
119
120 pribits = gic_read_ctlr();
121 pribits &= ICC_CTLR_EL1_PRI_BITS_MASK;
122 pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT;
123 pribits++;
124
125 return pribits;
126 }
127
gic_has_group0(void)128 static bool gic_has_group0(void)
129 {
130 u32 val;
131 u32 old_pmr;
132
133 old_pmr = gic_read_pmr();
134
135 /*
136 * Let's find out if Group0 is under control of EL3 or not by
137 * setting the highest possible, non-zero priority in PMR.
138 *
139 * If SCR_EL3.FIQ is set, the priority gets shifted down in
140 * order for the CPU interface to set bit 7, and keep the
141 * actual priority in the non-secure range. In the process, it
142 * looses the least significant bit and the actual priority
143 * becomes 0x80. Reading it back returns 0, indicating that
144 * we're don't have access to Group0.
145 */
146 gic_write_pmr(BIT(8 - gic_get_pribits()));
147 val = gic_read_pmr();
148
149 gic_write_pmr(old_pmr);
150
151 return val != 0;
152 }
153
gic_dist_security_disabled(void)154 static inline bool gic_dist_security_disabled(void)
155 {
156 return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS;
157 }
158
159 static bool cpus_have_security_disabled __ro_after_init;
160 static bool cpus_have_group0 __ro_after_init;
161
gic_prio_init(void)162 static void __init gic_prio_init(void)
163 {
164 cpus_have_security_disabled = gic_dist_security_disabled();
165 cpus_have_group0 = gic_has_group0();
166
167 /*
168 * How priority values are used by the GIC depends on two things:
169 * the security state of the GIC (controlled by the GICD_CTRL.DS bit)
170 * and if Group 0 interrupts can be delivered to Linux in the non-secure
171 * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the
172 * way priorities are presented in ICC_PMR_EL1 and in the distributor:
173 *
174 * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor
175 * -------------------------------------------------------
176 * 1 | - | unchanged | unchanged
177 * -------------------------------------------------------
178 * 0 | 1 | non-secure | non-secure
179 * -------------------------------------------------------
180 * 0 | 0 | unchanged | non-secure
181 *
182 * In the non-secure view reads and writes are modified:
183 *
184 * - A value written is right-shifted by one and the MSB is set,
185 * forcing the priority into the non-secure range.
186 *
187 * - A value read is left-shifted by one.
188 *
189 * In the first two cases, where ICC_PMR_EL1 and the interrupt priority
190 * are both either modified or unchanged, we can use the same set of
191 * priorities.
192 *
193 * In the last case, where only the interrupt priorities are modified to
194 * be in the non-secure range, we program the non-secure values into
195 * the distributor to match the PMR values we want.
196 */
197 if (cpus_have_group0 & !cpus_have_security_disabled) {
198 dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq);
199 dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi);
200 }
201
202 pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n",
203 cpus_have_security_disabled,
204 !cpus_have_group0);
205 }
206
207 /* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */
208 static refcount_t *rdist_nmi_refs;
209
210 static struct gic_kvm_info gic_v3_kvm_info __initdata;
211 static DEFINE_PER_CPU(bool, has_rss);
212
213 #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4)
214 #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist))
215 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
216 #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K)
217
218 /* Our default, arbitrary priority value. Linux only uses one anyway. */
219 #define DEFAULT_PMR_VALUE 0xf0
220
221 enum gic_intid_range {
222 SGI_RANGE,
223 PPI_RANGE,
224 SPI_RANGE,
225 EPPI_RANGE,
226 ESPI_RANGE,
227 LPI_RANGE,
228 __INVALID_RANGE__
229 };
230
__get_intid_range(irq_hw_number_t hwirq)231 static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq)
232 {
233 switch (hwirq) {
234 case 0 ... 15:
235 return SGI_RANGE;
236 case 16 ... 31:
237 return PPI_RANGE;
238 case 32 ... 1019:
239 return SPI_RANGE;
240 case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63):
241 return EPPI_RANGE;
242 case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023):
243 return ESPI_RANGE;
244 case 8192 ... GENMASK(23, 0):
245 return LPI_RANGE;
246 default:
247 return __INVALID_RANGE__;
248 }
249 }
250
get_intid_range(struct irq_data * d)251 static enum gic_intid_range get_intid_range(struct irq_data *d)
252 {
253 return __get_intid_range(d->hwirq);
254 }
255
gic_irq_in_rdist(struct irq_data * d)256 static inline bool gic_irq_in_rdist(struct irq_data *d)
257 {
258 switch (get_intid_range(d)) {
259 case SGI_RANGE:
260 case PPI_RANGE:
261 case EPPI_RANGE:
262 return true;
263 default:
264 return false;
265 }
266 }
267
gic_dist_base_alias(struct irq_data * d)268 static inline void __iomem *gic_dist_base_alias(struct irq_data *d)
269 {
270 if (static_branch_unlikely(&gic_nvidia_t241_erratum)) {
271 irq_hw_number_t hwirq = irqd_to_hwirq(d);
272 u32 chip;
273
274 /*
275 * For the erratum T241-FABRIC-4, read accesses to GICD_In{E}
276 * registers are directed to the chip that owns the SPI. The
277 * the alias region can also be used for writes to the
278 * GICD_In{E} except GICD_ICENABLERn. Each chip has support
279 * for 320 {E}SPIs. Mappings for all 4 chips:
280 * Chip0 = 32-351
281 * Chip1 = 352-671
282 * Chip2 = 672-991
283 * Chip3 = 4096-4415
284 */
285 switch (__get_intid_range(hwirq)) {
286 case SPI_RANGE:
287 chip = (hwirq - 32) / 320;
288 break;
289 case ESPI_RANGE:
290 chip = 3;
291 break;
292 default:
293 unreachable();
294 }
295 return t241_dist_base_alias[chip];
296 }
297
298 return gic_data.dist_base;
299 }
300
gic_dist_base(struct irq_data * d)301 static inline void __iomem *gic_dist_base(struct irq_data *d)
302 {
303 switch (get_intid_range(d)) {
304 case SGI_RANGE:
305 case PPI_RANGE:
306 case EPPI_RANGE:
307 /* SGI+PPI -> SGI_base for this CPU */
308 return gic_data_rdist_sgi_base();
309
310 case SPI_RANGE:
311 case ESPI_RANGE:
312 /* SPI -> dist_base */
313 return gic_data.dist_base;
314
315 default:
316 return NULL;
317 }
318 }
319
gic_do_wait_for_rwp(void __iomem * base,u32 bit)320 static void gic_do_wait_for_rwp(void __iomem *base, u32 bit)
321 {
322 u32 val;
323 int ret;
324
325 ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit),
326 1, USEC_PER_SEC);
327 if (ret == -ETIMEDOUT)
328 pr_err_ratelimited("RWP timeout, gone fishing\n");
329 }
330
331 /* Wait for completion of a distributor change */
gic_dist_wait_for_rwp(void)332 static void gic_dist_wait_for_rwp(void)
333 {
334 gic_do_wait_for_rwp(gic_data.dist_base, GICD_CTLR_RWP);
335 }
336
337 /* Wait for completion of a redistributor change */
gic_redist_wait_for_rwp(void)338 static void gic_redist_wait_for_rwp(void)
339 {
340 gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP);
341 }
342
gic_enable_redist(bool enable)343 static void gic_enable_redist(bool enable)
344 {
345 void __iomem *rbase;
346 u32 val;
347 int ret;
348
349 if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996)
350 return;
351
352 rbase = gic_data_rdist_rd_base();
353
354 val = readl_relaxed(rbase + GICR_WAKER);
355 if (enable)
356 /* Wake up this CPU redistributor */
357 val &= ~GICR_WAKER_ProcessorSleep;
358 else
359 val |= GICR_WAKER_ProcessorSleep;
360 writel_relaxed(val, rbase + GICR_WAKER);
361
362 if (!enable) { /* Check that GICR_WAKER is writeable */
363 val = readl_relaxed(rbase + GICR_WAKER);
364 if (!(val & GICR_WAKER_ProcessorSleep))
365 return; /* No PM support in this redistributor */
366 }
367
368 ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val,
369 enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep),
370 1, USEC_PER_SEC);
371 if (ret == -ETIMEDOUT) {
372 pr_err_ratelimited("redistributor failed to %s...\n",
373 enable ? "wakeup" : "sleep");
374 }
375 }
376
377 /*
378 * Routines to disable, enable, EOI and route interrupts
379 */
convert_offset_index(struct irq_data * d,u32 offset,u32 * index)380 static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index)
381 {
382 switch (get_intid_range(d)) {
383 case SGI_RANGE:
384 case PPI_RANGE:
385 case SPI_RANGE:
386 *index = d->hwirq;
387 return offset;
388 case EPPI_RANGE:
389 /*
390 * Contrary to the ESPI range, the EPPI range is contiguous
391 * to the PPI range in the registers, so let's adjust the
392 * displacement accordingly. Consistency is overrated.
393 */
394 *index = d->hwirq - EPPI_BASE_INTID + 32;
395 return offset;
396 case ESPI_RANGE:
397 *index = d->hwirq - ESPI_BASE_INTID;
398 switch (offset) {
399 case GICD_ISENABLER:
400 return GICD_ISENABLERnE;
401 case GICD_ICENABLER:
402 return GICD_ICENABLERnE;
403 case GICD_ISPENDR:
404 return GICD_ISPENDRnE;
405 case GICD_ICPENDR:
406 return GICD_ICPENDRnE;
407 case GICD_ISACTIVER:
408 return GICD_ISACTIVERnE;
409 case GICD_ICACTIVER:
410 return GICD_ICACTIVERnE;
411 case GICD_IPRIORITYR:
412 return GICD_IPRIORITYRnE;
413 case GICD_ICFGR:
414 return GICD_ICFGRnE;
415 case GICD_IROUTER:
416 return GICD_IROUTERnE;
417 default:
418 break;
419 }
420 break;
421 default:
422 break;
423 }
424
425 WARN_ON(1);
426 *index = d->hwirq;
427 return offset;
428 }
429
gic_peek_irq(struct irq_data * d,u32 offset)430 static int gic_peek_irq(struct irq_data *d, u32 offset)
431 {
432 void __iomem *base;
433 u32 index, mask;
434
435 offset = convert_offset_index(d, offset, &index);
436 mask = 1 << (index % 32);
437
438 if (gic_irq_in_rdist(d))
439 base = gic_data_rdist_sgi_base();
440 else
441 base = gic_dist_base_alias(d);
442
443 return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask);
444 }
445
gic_poke_irq(struct irq_data * d,u32 offset)446 static void gic_poke_irq(struct irq_data *d, u32 offset)
447 {
448 void __iomem *base;
449 u32 index, mask;
450
451 offset = convert_offset_index(d, offset, &index);
452 mask = 1 << (index % 32);
453
454 if (gic_irq_in_rdist(d))
455 base = gic_data_rdist_sgi_base();
456 else
457 base = gic_data.dist_base;
458
459 writel_relaxed(mask, base + offset + (index / 32) * 4);
460 }
461
gic_mask_irq(struct irq_data * d)462 static void gic_mask_irq(struct irq_data *d)
463 {
464 gic_poke_irq(d, GICD_ICENABLER);
465 if (gic_irq_in_rdist(d))
466 gic_redist_wait_for_rwp();
467 else
468 gic_dist_wait_for_rwp();
469 }
470
gic_eoimode1_mask_irq(struct irq_data * d)471 static void gic_eoimode1_mask_irq(struct irq_data *d)
472 {
473 gic_mask_irq(d);
474 /*
475 * When masking a forwarded interrupt, make sure it is
476 * deactivated as well.
477 *
478 * This ensures that an interrupt that is getting
479 * disabled/masked will not get "stuck", because there is
480 * noone to deactivate it (guest is being terminated).
481 */
482 if (irqd_is_forwarded_to_vcpu(d))
483 gic_poke_irq(d, GICD_ICACTIVER);
484 }
485
gic_unmask_irq(struct irq_data * d)486 static void gic_unmask_irq(struct irq_data *d)
487 {
488 gic_poke_irq(d, GICD_ISENABLER);
489 }
490
gic_supports_nmi(void)491 static inline bool gic_supports_nmi(void)
492 {
493 return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) &&
494 static_branch_likely(&supports_pseudo_nmis);
495 }
496
gic_irq_set_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool val)497 static int gic_irq_set_irqchip_state(struct irq_data *d,
498 enum irqchip_irq_state which, bool val)
499 {
500 u32 reg;
501
502 if (d->hwirq >= 8192) /* SGI/PPI/SPI only */
503 return -EINVAL;
504
505 switch (which) {
506 case IRQCHIP_STATE_PENDING:
507 reg = val ? GICD_ISPENDR : GICD_ICPENDR;
508 break;
509
510 case IRQCHIP_STATE_ACTIVE:
511 reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
512 break;
513
514 case IRQCHIP_STATE_MASKED:
515 if (val) {
516 gic_mask_irq(d);
517 return 0;
518 }
519 reg = GICD_ISENABLER;
520 break;
521
522 default:
523 return -EINVAL;
524 }
525
526 gic_poke_irq(d, reg);
527 return 0;
528 }
529
gic_irq_get_irqchip_state(struct irq_data * d,enum irqchip_irq_state which,bool * val)530 static int gic_irq_get_irqchip_state(struct irq_data *d,
531 enum irqchip_irq_state which, bool *val)
532 {
533 if (d->hwirq >= 8192) /* PPI/SPI only */
534 return -EINVAL;
535
536 switch (which) {
537 case IRQCHIP_STATE_PENDING:
538 *val = gic_peek_irq(d, GICD_ISPENDR);
539 break;
540
541 case IRQCHIP_STATE_ACTIVE:
542 *val = gic_peek_irq(d, GICD_ISACTIVER);
543 break;
544
545 case IRQCHIP_STATE_MASKED:
546 *val = !gic_peek_irq(d, GICD_ISENABLER);
547 break;
548
549 default:
550 return -EINVAL;
551 }
552
553 return 0;
554 }
555
gic_irq_set_prio(struct irq_data * d,u8 prio)556 static void gic_irq_set_prio(struct irq_data *d, u8 prio)
557 {
558 void __iomem *base = gic_dist_base(d);
559 u32 offset, index;
560
561 offset = convert_offset_index(d, GICD_IPRIORITYR, &index);
562
563 writeb_relaxed(prio, base + offset + index);
564 }
565
__gic_get_ppi_index(irq_hw_number_t hwirq)566 static u32 __gic_get_ppi_index(irq_hw_number_t hwirq)
567 {
568 switch (__get_intid_range(hwirq)) {
569 case PPI_RANGE:
570 return hwirq - 16;
571 case EPPI_RANGE:
572 return hwirq - EPPI_BASE_INTID + 16;
573 default:
574 unreachable();
575 }
576 }
577
__gic_get_rdist_index(irq_hw_number_t hwirq)578 static u32 __gic_get_rdist_index(irq_hw_number_t hwirq)
579 {
580 switch (__get_intid_range(hwirq)) {
581 case SGI_RANGE:
582 case PPI_RANGE:
583 return hwirq;
584 case EPPI_RANGE:
585 return hwirq - EPPI_BASE_INTID + 32;
586 default:
587 unreachable();
588 }
589 }
590
gic_get_rdist_index(struct irq_data * d)591 static u32 gic_get_rdist_index(struct irq_data *d)
592 {
593 return __gic_get_rdist_index(d->hwirq);
594 }
595
gic_irq_nmi_setup(struct irq_data * d)596 static int gic_irq_nmi_setup(struct irq_data *d)
597 {
598 struct irq_desc *desc = irq_to_desc(d->irq);
599
600 if (!gic_supports_nmi())
601 return -EINVAL;
602
603 if (gic_peek_irq(d, GICD_ISENABLER)) {
604 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
605 return -EINVAL;
606 }
607
608 /*
609 * A secondary irq_chip should be in charge of LPI request,
610 * it should not be possible to get there
611 */
612 if (WARN_ON(irqd_to_hwirq(d) >= 8192))
613 return -EINVAL;
614
615 /* desc lock should already be held */
616 if (gic_irq_in_rdist(d)) {
617 u32 idx = gic_get_rdist_index(d);
618
619 /*
620 * Setting up a percpu interrupt as NMI, only switch handler
621 * for first NMI
622 */
623 if (!refcount_inc_not_zero(&rdist_nmi_refs[idx])) {
624 refcount_set(&rdist_nmi_refs[idx], 1);
625 desc->handle_irq = handle_percpu_devid_fasteoi_nmi;
626 }
627 } else {
628 desc->handle_irq = handle_fasteoi_nmi;
629 }
630
631 gic_irq_set_prio(d, dist_prio_nmi);
632
633 return 0;
634 }
635
gic_irq_nmi_teardown(struct irq_data * d)636 static void gic_irq_nmi_teardown(struct irq_data *d)
637 {
638 struct irq_desc *desc = irq_to_desc(d->irq);
639
640 if (WARN_ON(!gic_supports_nmi()))
641 return;
642
643 if (gic_peek_irq(d, GICD_ISENABLER)) {
644 pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq);
645 return;
646 }
647
648 /*
649 * A secondary irq_chip should be in charge of LPI request,
650 * it should not be possible to get there
651 */
652 if (WARN_ON(irqd_to_hwirq(d) >= 8192))
653 return;
654
655 /* desc lock should already be held */
656 if (gic_irq_in_rdist(d)) {
657 u32 idx = gic_get_rdist_index(d);
658
659 /* Tearing down NMI, only switch handler for last NMI */
660 if (refcount_dec_and_test(&rdist_nmi_refs[idx]))
661 desc->handle_irq = handle_percpu_devid_irq;
662 } else {
663 desc->handle_irq = handle_fasteoi_irq;
664 }
665
666 gic_irq_set_prio(d, dist_prio_irq);
667 }
668
gic_arm64_erratum_2941627_needed(struct irq_data * d)669 static bool gic_arm64_erratum_2941627_needed(struct irq_data *d)
670 {
671 enum gic_intid_range range;
672
673 if (!static_branch_unlikely(&gic_arm64_2941627_erratum))
674 return false;
675
676 range = get_intid_range(d);
677
678 /*
679 * The workaround is needed if the IRQ is an SPI and
680 * the target cpu is different from the one we are
681 * executing on.
682 */
683 return (range == SPI_RANGE || range == ESPI_RANGE) &&
684 !cpumask_test_cpu(raw_smp_processor_id(),
685 irq_data_get_effective_affinity_mask(d));
686 }
687
gic_eoi_irq(struct irq_data * d)688 static void gic_eoi_irq(struct irq_data *d)
689 {
690 write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1);
691 isb();
692
693 if (gic_arm64_erratum_2941627_needed(d)) {
694 /*
695 * Make sure the GIC stream deactivate packet
696 * issued by ICC_EOIR1_EL1 has completed before
697 * deactivating through GICD_IACTIVER.
698 */
699 dsb(sy);
700 gic_poke_irq(d, GICD_ICACTIVER);
701 }
702 }
703
gic_eoimode1_eoi_irq(struct irq_data * d)704 static void gic_eoimode1_eoi_irq(struct irq_data *d)
705 {
706 /*
707 * No need to deactivate an LPI, or an interrupt that
708 * is is getting forwarded to a vcpu.
709 */
710 if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d))
711 return;
712
713 if (!gic_arm64_erratum_2941627_needed(d))
714 gic_write_dir(irqd_to_hwirq(d));
715 else
716 gic_poke_irq(d, GICD_ICACTIVER);
717 }
718
gic_set_type(struct irq_data * d,unsigned int type)719 static int gic_set_type(struct irq_data *d, unsigned int type)
720 {
721 irq_hw_number_t irq = irqd_to_hwirq(d);
722 enum gic_intid_range range;
723 void __iomem *base;
724 u32 offset, index;
725 int ret;
726
727 range = get_intid_range(d);
728
729 /* Interrupt configuration for SGIs can't be changed */
730 if (range == SGI_RANGE)
731 return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0;
732
733 /* SPIs have restrictions on the supported types */
734 if ((range == SPI_RANGE || range == ESPI_RANGE) &&
735 type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING)
736 return -EINVAL;
737
738 if (gic_irq_in_rdist(d))
739 base = gic_data_rdist_sgi_base();
740 else
741 base = gic_dist_base_alias(d);
742
743 offset = convert_offset_index(d, GICD_ICFGR, &index);
744
745 ret = gic_configure_irq(index, type, base + offset);
746 if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) {
747 /* Misconfigured PPIs are usually not fatal */
748 pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq);
749 ret = 0;
750 }
751
752 return ret;
753 }
754
gic_irq_set_vcpu_affinity(struct irq_data * d,void * vcpu)755 static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu)
756 {
757 if (get_intid_range(d) == SGI_RANGE)
758 return -EINVAL;
759
760 if (vcpu)
761 irqd_set_forwarded_to_vcpu(d);
762 else
763 irqd_clr_forwarded_to_vcpu(d);
764 return 0;
765 }
766
gic_cpu_to_affinity(int cpu)767 static u64 gic_cpu_to_affinity(int cpu)
768 {
769 u64 mpidr = cpu_logical_map(cpu);
770 u64 aff;
771
772 /* ASR8601 needs to have its affinities shifted down... */
773 if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001))
774 mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) |
775 (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8));
776
777 aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 |
778 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
779 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
780 MPIDR_AFFINITY_LEVEL(mpidr, 0));
781
782 return aff;
783 }
784
gic_deactivate_unhandled(u32 irqnr)785 static void gic_deactivate_unhandled(u32 irqnr)
786 {
787 if (static_branch_likely(&supports_deactivate_key)) {
788 if (irqnr < 8192)
789 gic_write_dir(irqnr);
790 } else {
791 write_gicreg(irqnr, ICC_EOIR1_EL1);
792 isb();
793 }
794 }
795
796 /*
797 * Follow a read of the IAR with any HW maintenance that needs to happen prior
798 * to invoking the relevant IRQ handler. We must do two things:
799 *
800 * (1) Ensure instruction ordering between a read of IAR and subsequent
801 * instructions in the IRQ handler using an ISB.
802 *
803 * It is possible for the IAR to report an IRQ which was signalled *after*
804 * the CPU took an IRQ exception as multiple interrupts can race to be
805 * recognized by the GIC, earlier interrupts could be withdrawn, and/or
806 * later interrupts could be prioritized by the GIC.
807 *
808 * For devices which are tightly coupled to the CPU, such as PMUs, a
809 * context synchronization event is necessary to ensure that system
810 * register state is not stale, as these may have been indirectly written
811 * *after* exception entry.
812 *
813 * (2) Deactivate the interrupt when EOI mode 1 is in use.
814 */
gic_complete_ack(u32 irqnr)815 static inline void gic_complete_ack(u32 irqnr)
816 {
817 if (static_branch_likely(&supports_deactivate_key))
818 write_gicreg(irqnr, ICC_EOIR1_EL1);
819
820 isb();
821 }
822
gic_rpr_is_nmi_prio(void)823 static bool gic_rpr_is_nmi_prio(void)
824 {
825 if (!gic_supports_nmi())
826 return false;
827
828 return unlikely(gic_read_rpr() == GICV3_PRIO_NMI);
829 }
830
gic_irqnr_is_special(u32 irqnr)831 static bool gic_irqnr_is_special(u32 irqnr)
832 {
833 return irqnr >= 1020 && irqnr <= 1023;
834 }
835
__gic_handle_irq(u32 irqnr,struct pt_regs * regs)836 static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs)
837 {
838 if (gic_irqnr_is_special(irqnr))
839 return;
840
841 gic_complete_ack(irqnr);
842
843 if (generic_handle_domain_irq(gic_data.domain, irqnr)) {
844 WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr);
845 gic_deactivate_unhandled(irqnr);
846 }
847 }
848
__gic_handle_nmi(u32 irqnr,struct pt_regs * regs)849 static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
850 {
851 if (gic_irqnr_is_special(irqnr))
852 return;
853
854 gic_complete_ack(irqnr);
855
856 if (generic_handle_domain_nmi(gic_data.domain, irqnr)) {
857 WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr);
858 gic_deactivate_unhandled(irqnr);
859 }
860 }
861
862 /*
863 * An exception has been taken from a context with IRQs enabled, and this could
864 * be an IRQ or an NMI.
865 *
866 * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear
867 * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning,
868 * after handling any NMI but before handling any IRQ.
869 *
870 * The entry code has performed IRQ entry, and if an NMI is detected we must
871 * perform NMI entry/exit around invoking the handler.
872 */
__gic_handle_irq_from_irqson(struct pt_regs * regs)873 static void __gic_handle_irq_from_irqson(struct pt_regs *regs)
874 {
875 bool is_nmi;
876 u32 irqnr;
877
878 irqnr = gic_read_iar();
879
880 is_nmi = gic_rpr_is_nmi_prio();
881
882 if (is_nmi) {
883 nmi_enter();
884 __gic_handle_nmi(irqnr, regs);
885 nmi_exit();
886 }
887
888 if (gic_prio_masking_enabled()) {
889 gic_pmr_mask_irqs();
890 gic_arch_enable_irqs();
891 }
892
893 if (!is_nmi)
894 __gic_handle_irq(irqnr, regs);
895 }
896
897 /*
898 * An exception has been taken from a context with IRQs disabled, which can only
899 * be an NMI.
900 *
901 * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave
902 * DAIF.IF (and ICC_PMR_EL1) unchanged.
903 *
904 * The entry code has performed NMI entry.
905 */
__gic_handle_irq_from_irqsoff(struct pt_regs * regs)906 static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs)
907 {
908 u64 pmr;
909 u32 irqnr;
910
911 /*
912 * We were in a context with IRQs disabled. However, the
913 * entry code has set PMR to a value that allows any
914 * interrupt to be acknowledged, and not just NMIs. This can
915 * lead to surprising effects if the NMI has been retired in
916 * the meantime, and that there is an IRQ pending. The IRQ
917 * would then be taken in NMI context, something that nobody
918 * wants to debug twice.
919 *
920 * Until we sort this, drop PMR again to a level that will
921 * actually only allow NMIs before reading IAR, and then
922 * restore it to what it was.
923 */
924 pmr = gic_read_pmr();
925 gic_pmr_mask_irqs();
926 isb();
927 irqnr = gic_read_iar();
928 gic_write_pmr(pmr);
929
930 __gic_handle_nmi(irqnr, regs);
931 }
932
gic_handle_irq(struct pt_regs * regs)933 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
934 {
935 if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs)))
936 __gic_handle_irq_from_irqsoff(regs);
937 else
938 __gic_handle_irq_from_irqson(regs);
939 }
940
gic_dist_init(void)941 static void __init gic_dist_init(void)
942 {
943 unsigned int i;
944 u64 affinity;
945 void __iomem *base = gic_data.dist_base;
946 u32 val;
947
948 /* Disable the distributor */
949 writel_relaxed(0, base + GICD_CTLR);
950 gic_dist_wait_for_rwp();
951
952 /*
953 * Configure SPIs as non-secure Group-1. This will only matter
954 * if the GIC only has a single security state. This will not
955 * do the right thing if the kernel is running in secure mode,
956 * but that's not the intended use case anyway.
957 */
958 for (i = 32; i < GIC_LINE_NR; i += 32)
959 writel_relaxed(~0, base + GICD_IGROUPR + i / 8);
960
961 /* Extended SPI range, not handled by the GICv2/GICv3 common code */
962 for (i = 0; i < GIC_ESPI_NR; i += 32) {
963 writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8);
964 writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8);
965 }
966
967 for (i = 0; i < GIC_ESPI_NR; i += 32)
968 writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8);
969
970 for (i = 0; i < GIC_ESPI_NR; i += 16)
971 writel_relaxed(0, base + GICD_ICFGRnE + i / 4);
972
973 for (i = 0; i < GIC_ESPI_NR; i += 4)
974 writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq),
975 base + GICD_IPRIORITYRnE + i);
976
977 /* Now do the common stuff */
978 gic_dist_config(base, GIC_LINE_NR, dist_prio_irq);
979
980 val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1;
981 if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) {
982 pr_info("Enabling SGIs without active state\n");
983 val |= GICD_CTLR_nASSGIreq;
984 }
985
986 /* Enable distributor with ARE, Group1, and wait for it to drain */
987 writel_relaxed(val, base + GICD_CTLR);
988 gic_dist_wait_for_rwp();
989
990 /*
991 * Set all global interrupts to the boot CPU only. ARE must be
992 * enabled.
993 */
994 affinity = gic_cpu_to_affinity(smp_processor_id());
995 for (i = 32; i < GIC_LINE_NR; i++)
996 gic_write_irouter(affinity, base + GICD_IROUTER + i * 8);
997
998 for (i = 0; i < GIC_ESPI_NR; i++)
999 gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8);
1000 }
1001
gic_iterate_rdists(int (* fn)(struct redist_region *,void __iomem *))1002 static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *))
1003 {
1004 int ret = -ENODEV;
1005 int i;
1006
1007 for (i = 0; i < gic_data.nr_redist_regions; i++) {
1008 void __iomem *ptr = gic_data.redist_regions[i].redist_base;
1009 u64 typer;
1010 u32 reg;
1011
1012 reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK;
1013 if (reg != GIC_PIDR2_ARCH_GICv3 &&
1014 reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */
1015 pr_warn("No redistributor present @%p\n", ptr);
1016 break;
1017 }
1018
1019 do {
1020 typer = gic_read_typer(ptr + GICR_TYPER);
1021 ret = fn(gic_data.redist_regions + i, ptr);
1022 if (!ret)
1023 return 0;
1024
1025 if (gic_data.redist_regions[i].single_redist)
1026 break;
1027
1028 if (gic_data.redist_stride) {
1029 ptr += gic_data.redist_stride;
1030 } else {
1031 ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */
1032 if (typer & GICR_TYPER_VLPIS)
1033 ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */
1034 }
1035 } while (!(typer & GICR_TYPER_LAST));
1036 }
1037
1038 return ret ? -ENODEV : 0;
1039 }
1040
__gic_populate_rdist(struct redist_region * region,void __iomem * ptr)1041 static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr)
1042 {
1043 unsigned long mpidr;
1044 u64 typer;
1045 u32 aff;
1046
1047 /*
1048 * Convert affinity to a 32bit value that can be matched to
1049 * GICR_TYPER bits [63:32].
1050 */
1051 mpidr = gic_cpu_to_affinity(smp_processor_id());
1052
1053 aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 |
1054 MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 |
1055 MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 |
1056 MPIDR_AFFINITY_LEVEL(mpidr, 0));
1057
1058 typer = gic_read_typer(ptr + GICR_TYPER);
1059 if ((typer >> 32) == aff) {
1060 u64 offset = ptr - region->redist_base;
1061 raw_spin_lock_init(&gic_data_rdist()->rd_lock);
1062 gic_data_rdist_rd_base() = ptr;
1063 gic_data_rdist()->phys_base = region->phys_base + offset;
1064
1065 pr_info("CPU%d: found redistributor %lx region %d:%pa\n",
1066 smp_processor_id(), mpidr,
1067 (int)(region - gic_data.redist_regions),
1068 &gic_data_rdist()->phys_base);
1069 return 0;
1070 }
1071
1072 /* Try next one */
1073 return 1;
1074 }
1075
gic_populate_rdist(void)1076 static int gic_populate_rdist(void)
1077 {
1078 if (gic_iterate_rdists(__gic_populate_rdist) == 0)
1079 return 0;
1080
1081 /* We couldn't even deal with ourselves... */
1082 WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n",
1083 smp_processor_id(),
1084 (unsigned long)cpu_logical_map(smp_processor_id()));
1085 return -ENODEV;
1086 }
1087
__gic_update_rdist_properties(struct redist_region * region,void __iomem * ptr)1088 static int __gic_update_rdist_properties(struct redist_region *region,
1089 void __iomem *ptr)
1090 {
1091 u64 typer = gic_read_typer(ptr + GICR_TYPER);
1092 u32 ctlr = readl_relaxed(ptr + GICR_CTLR);
1093
1094 /* Boot-time cleanup */
1095 if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) {
1096 u64 val;
1097
1098 /* Deactivate any present vPE */
1099 val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER);
1100 if (val & GICR_VPENDBASER_Valid)
1101 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
1102 ptr + SZ_128K + GICR_VPENDBASER);
1103
1104 /* Mark the VPE table as invalid */
1105 val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER);
1106 val &= ~GICR_VPROPBASER_4_1_VALID;
1107 gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER);
1108 }
1109
1110 gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS);
1111
1112 /*
1113 * TYPER.RVPEID implies some form of DirectLPI, no matter what the
1114 * doc says... :-/ And CTLR.IR implies another subset of DirectLPI
1115 * that the ITS driver can make use of for LPIs (and not VLPIs).
1116 *
1117 * These are 3 different ways to express the same thing, depending
1118 * on the revision of the architecture and its relaxations over
1119 * time. Just group them under the 'direct_lpi' banner.
1120 */
1121 gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID);
1122 gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) |
1123 !!(ctlr & GICR_CTLR_IR) |
1124 gic_data.rdists.has_rvpeid);
1125 gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY);
1126
1127 /* Detect non-sensical configurations */
1128 if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) {
1129 gic_data.rdists.has_direct_lpi = false;
1130 gic_data.rdists.has_vlpis = false;
1131 gic_data.rdists.has_rvpeid = false;
1132 }
1133
1134 gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr);
1135
1136 return 1;
1137 }
1138
gic_update_rdist_properties(void)1139 static void gic_update_rdist_properties(void)
1140 {
1141 gic_data.ppi_nr = UINT_MAX;
1142 gic_iterate_rdists(__gic_update_rdist_properties);
1143 if (WARN_ON(gic_data.ppi_nr == UINT_MAX))
1144 gic_data.ppi_nr = 0;
1145 pr_info("GICv3 features: %d PPIs%s%s\n",
1146 gic_data.ppi_nr,
1147 gic_data.has_rss ? ", RSS" : "",
1148 gic_data.rdists.has_direct_lpi ? ", DirectLPI" : "");
1149
1150 if (gic_data.rdists.has_vlpis)
1151 pr_info("GICv4 features: %s%s%s\n",
1152 gic_data.rdists.has_direct_lpi ? "DirectLPI " : "",
1153 gic_data.rdists.has_rvpeid ? "RVPEID " : "",
1154 gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty " : "");
1155 }
1156
gic_cpu_sys_reg_enable(void)1157 static void gic_cpu_sys_reg_enable(void)
1158 {
1159 /*
1160 * Need to check that the SRE bit has actually been set. If
1161 * not, it means that SRE is disabled at EL2. We're going to
1162 * die painfully, and there is nothing we can do about it.
1163 *
1164 * Kindly inform the luser.
1165 */
1166 if (!gic_enable_sre())
1167 pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n");
1168
1169 }
1170
gic_cpu_sys_reg_init(void)1171 static void gic_cpu_sys_reg_init(void)
1172 {
1173 int i, cpu = smp_processor_id();
1174 u64 mpidr = gic_cpu_to_affinity(cpu);
1175 u64 need_rss = MPIDR_RS(mpidr);
1176 bool group0;
1177 u32 pribits;
1178
1179 pribits = gic_get_pribits();
1180
1181 group0 = gic_has_group0();
1182
1183 /* Set priority mask register */
1184 if (!gic_prio_masking_enabled()) {
1185 write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1);
1186 } else if (gic_supports_nmi()) {
1187 /*
1188 * Check that all CPUs use the same priority space.
1189 *
1190 * If there's a mismatch with the boot CPU, the system is
1191 * likely to die as interrupt masking will not work properly on
1192 * all CPUs.
1193 */
1194 WARN_ON(group0 != cpus_have_group0);
1195 WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled);
1196 }
1197
1198 /*
1199 * Some firmwares hand over to the kernel with the BPR changed from
1200 * its reset value (and with a value large enough to prevent
1201 * any pre-emptive interrupts from working at all). Writing a zero
1202 * to BPR restores is reset value.
1203 */
1204 gic_write_bpr1(0);
1205
1206 if (static_branch_likely(&supports_deactivate_key)) {
1207 /* EOI drops priority only (mode 1) */
1208 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop);
1209 } else {
1210 /* EOI deactivates interrupt too (mode 0) */
1211 gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir);
1212 }
1213
1214 /* Always whack Group0 before Group1 */
1215 if (group0) {
1216 switch(pribits) {
1217 case 8:
1218 case 7:
1219 write_gicreg(0, ICC_AP0R3_EL1);
1220 write_gicreg(0, ICC_AP0R2_EL1);
1221 fallthrough;
1222 case 6:
1223 write_gicreg(0, ICC_AP0R1_EL1);
1224 fallthrough;
1225 case 5:
1226 case 4:
1227 write_gicreg(0, ICC_AP0R0_EL1);
1228 }
1229
1230 isb();
1231 }
1232
1233 switch(pribits) {
1234 case 8:
1235 case 7:
1236 write_gicreg(0, ICC_AP1R3_EL1);
1237 write_gicreg(0, ICC_AP1R2_EL1);
1238 fallthrough;
1239 case 6:
1240 write_gicreg(0, ICC_AP1R1_EL1);
1241 fallthrough;
1242 case 5:
1243 case 4:
1244 write_gicreg(0, ICC_AP1R0_EL1);
1245 }
1246
1247 isb();
1248
1249 /* ... and let's hit the road... */
1250 gic_write_grpen1(1);
1251
1252 /* Keep the RSS capability status in per_cpu variable */
1253 per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS);
1254
1255 /* Check all the CPUs have capable of sending SGIs to other CPUs */
1256 for_each_online_cpu(i) {
1257 bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu);
1258
1259 need_rss |= MPIDR_RS(gic_cpu_to_affinity(i));
1260 if (need_rss && (!have_rss))
1261 pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n",
1262 cpu, (unsigned long)mpidr,
1263 i, (unsigned long)gic_cpu_to_affinity(i));
1264 }
1265
1266 /**
1267 * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0,
1268 * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED
1269 * UNPREDICTABLE choice of :
1270 * - The write is ignored.
1271 * - The RS field is treated as 0.
1272 */
1273 if (need_rss && (!gic_data.has_rss))
1274 pr_crit_once("RSS is required but GICD doesn't support it\n");
1275 }
1276
1277 static bool gicv3_nolpi;
1278
gicv3_nolpi_cfg(char * buf)1279 static int __init gicv3_nolpi_cfg(char *buf)
1280 {
1281 return kstrtobool(buf, &gicv3_nolpi);
1282 }
1283 early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg);
1284
gic_dist_supports_lpis(void)1285 static int gic_dist_supports_lpis(void)
1286 {
1287 return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) &&
1288 !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) &&
1289 !gicv3_nolpi);
1290 }
1291
gic_cpu_init(void)1292 static void gic_cpu_init(void)
1293 {
1294 void __iomem *rbase;
1295 int i;
1296
1297 /* Register ourselves with the rest of the world */
1298 if (gic_populate_rdist())
1299 return;
1300
1301 gic_enable_redist(true);
1302
1303 WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) &&
1304 !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange),
1305 "Distributor has extended ranges, but CPU%d doesn't\n",
1306 smp_processor_id());
1307
1308 rbase = gic_data_rdist_sgi_base();
1309
1310 /* Configure SGIs/PPIs as non-secure Group-1 */
1311 for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32)
1312 writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8);
1313
1314 gic_cpu_config(rbase, gic_data.ppi_nr + SGI_NR, dist_prio_irq);
1315 gic_redist_wait_for_rwp();
1316
1317 /* initialise system registers */
1318 gic_cpu_sys_reg_init();
1319 }
1320
1321 #ifdef CONFIG_SMP
1322
1323 #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT)
1324 #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL)
1325
1326 /*
1327 * gic_starting_cpu() is called after the last point where cpuhp is allowed
1328 * to fail. So pre check for problems earlier.
1329 */
gic_check_rdist(unsigned int cpu)1330 static int gic_check_rdist(unsigned int cpu)
1331 {
1332 if (cpumask_test_cpu(cpu, &broken_rdists))
1333 return -EINVAL;
1334
1335 return 0;
1336 }
1337
gic_starting_cpu(unsigned int cpu)1338 static int gic_starting_cpu(unsigned int cpu)
1339 {
1340 gic_cpu_sys_reg_enable();
1341 gic_cpu_init();
1342
1343 if (gic_dist_supports_lpis())
1344 its_cpu_init();
1345
1346 return 0;
1347 }
1348
gic_compute_target_list(int * base_cpu,const struct cpumask * mask,unsigned long cluster_id)1349 static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask,
1350 unsigned long cluster_id)
1351 {
1352 int next_cpu, cpu = *base_cpu;
1353 unsigned long mpidr;
1354 u16 tlist = 0;
1355
1356 mpidr = gic_cpu_to_affinity(cpu);
1357
1358 while (cpu < nr_cpu_ids) {
1359 tlist |= 1 << (mpidr & 0xf);
1360
1361 next_cpu = cpumask_next(cpu, mask);
1362 if (next_cpu >= nr_cpu_ids)
1363 goto out;
1364 cpu = next_cpu;
1365
1366 mpidr = gic_cpu_to_affinity(cpu);
1367
1368 if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) {
1369 cpu--;
1370 goto out;
1371 }
1372 }
1373 out:
1374 *base_cpu = cpu;
1375 return tlist;
1376 }
1377
1378 #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \
1379 (MPIDR_AFFINITY_LEVEL(cluster_id, level) \
1380 << ICC_SGI1R_AFFINITY_## level ##_SHIFT)
1381
gic_send_sgi(u64 cluster_id,u16 tlist,unsigned int irq)1382 static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq)
1383 {
1384 u64 val;
1385
1386 val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) |
1387 MPIDR_TO_SGI_AFFINITY(cluster_id, 2) |
1388 irq << ICC_SGI1R_SGI_ID_SHIFT |
1389 MPIDR_TO_SGI_AFFINITY(cluster_id, 1) |
1390 MPIDR_TO_SGI_RS(cluster_id) |
1391 tlist << ICC_SGI1R_TARGET_LIST_SHIFT);
1392
1393 pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val);
1394 gic_write_sgi1r(val);
1395 }
1396
gic_ipi_send_mask(struct irq_data * d,const struct cpumask * mask)1397 static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
1398 {
1399 int cpu;
1400
1401 if (WARN_ON(d->hwirq >= 16))
1402 return;
1403
1404 /*
1405 * Ensure that stores to Normal memory are visible to the
1406 * other CPUs before issuing the IPI.
1407 */
1408 dsb(ishst);
1409
1410 for_each_cpu(cpu, mask) {
1411 u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu));
1412 u16 tlist;
1413
1414 tlist = gic_compute_target_list(&cpu, mask, cluster_id);
1415 gic_send_sgi(cluster_id, tlist, d->hwirq);
1416 }
1417
1418 /* Force the above writes to ICC_SGI1R_EL1 to be executed */
1419 isb();
1420 }
1421
gic_smp_init(void)1422 static void __init gic_smp_init(void)
1423 {
1424 struct irq_fwspec sgi_fwspec = {
1425 .fwnode = gic_data.fwnode,
1426 .param_count = 1,
1427 };
1428 int base_sgi;
1429
1430 cpuhp_setup_state_nocalls(CPUHP_BP_PREPARE_DYN,
1431 "irqchip/arm/gicv3:checkrdist",
1432 gic_check_rdist, NULL);
1433
1434 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_GIC_STARTING,
1435 "irqchip/arm/gicv3:starting",
1436 gic_starting_cpu, NULL);
1437
1438 /* Register all 8 non-secure SGIs */
1439 base_sgi = irq_domain_alloc_irqs(gic_data.domain, 8, NUMA_NO_NODE, &sgi_fwspec);
1440 if (WARN_ON(base_sgi <= 0))
1441 return;
1442
1443 set_smp_ipi_range(base_sgi, 8);
1444 }
1445
gic_set_affinity(struct irq_data * d,const struct cpumask * mask_val,bool force)1446 static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1447 bool force)
1448 {
1449 unsigned int cpu;
1450 u32 offset, index;
1451 void __iomem *reg;
1452 int enabled;
1453 u64 val;
1454
1455 if (force)
1456 cpu = cpumask_first(mask_val);
1457 else
1458 cpu = cpumask_any_and(mask_val, cpu_online_mask);
1459
1460 if (cpu >= nr_cpu_ids)
1461 return -EINVAL;
1462
1463 if (gic_irq_in_rdist(d))
1464 return -EINVAL;
1465
1466 /* If interrupt was enabled, disable it first */
1467 enabled = gic_peek_irq(d, GICD_ISENABLER);
1468 if (enabled)
1469 gic_mask_irq(d);
1470
1471 offset = convert_offset_index(d, GICD_IROUTER, &index);
1472 reg = gic_dist_base(d) + offset + (index * 8);
1473 val = gic_cpu_to_affinity(cpu);
1474
1475 gic_write_irouter(val, reg);
1476
1477 /*
1478 * If the interrupt was enabled, enabled it again. Otherwise,
1479 * just wait for the distributor to have digested our changes.
1480 */
1481 if (enabled)
1482 gic_unmask_irq(d);
1483
1484 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1485
1486 return IRQ_SET_MASK_OK_DONE;
1487 }
1488 #else
1489 #define gic_set_affinity NULL
1490 #define gic_ipi_send_mask NULL
1491 #define gic_smp_init() do { } while(0)
1492 #endif
1493
gic_retrigger(struct irq_data * data)1494 static int gic_retrigger(struct irq_data *data)
1495 {
1496 return !gic_irq_set_irqchip_state(data, IRQCHIP_STATE_PENDING, true);
1497 }
1498
1499 #ifdef CONFIG_CPU_PM
gic_cpu_pm_notifier(struct notifier_block * self,unsigned long cmd,void * v)1500 static int gic_cpu_pm_notifier(struct notifier_block *self,
1501 unsigned long cmd, void *v)
1502 {
1503 if (cmd == CPU_PM_EXIT) {
1504 if (gic_dist_security_disabled())
1505 gic_enable_redist(true);
1506 gic_cpu_sys_reg_enable();
1507 gic_cpu_sys_reg_init();
1508 } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) {
1509 gic_write_grpen1(0);
1510 gic_enable_redist(false);
1511 }
1512 return NOTIFY_OK;
1513 }
1514
1515 static struct notifier_block gic_cpu_pm_notifier_block = {
1516 .notifier_call = gic_cpu_pm_notifier,
1517 };
1518
gic_cpu_pm_init(void)1519 static void gic_cpu_pm_init(void)
1520 {
1521 cpu_pm_register_notifier(&gic_cpu_pm_notifier_block);
1522 }
1523
1524 #else
gic_cpu_pm_init(void)1525 static inline void gic_cpu_pm_init(void) { }
1526 #endif /* CONFIG_CPU_PM */
1527
1528 static struct irq_chip gic_chip = {
1529 .name = "GICv3",
1530 .irq_mask = gic_mask_irq,
1531 .irq_unmask = gic_unmask_irq,
1532 .irq_eoi = gic_eoi_irq,
1533 .irq_set_type = gic_set_type,
1534 .irq_set_affinity = gic_set_affinity,
1535 .irq_retrigger = gic_retrigger,
1536 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1537 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1538 .irq_nmi_setup = gic_irq_nmi_setup,
1539 .irq_nmi_teardown = gic_irq_nmi_teardown,
1540 .ipi_send_mask = gic_ipi_send_mask,
1541 .flags = IRQCHIP_SET_TYPE_MASKED |
1542 IRQCHIP_SKIP_SET_WAKE |
1543 IRQCHIP_MASK_ON_SUSPEND,
1544 };
1545
1546 static struct irq_chip gic_eoimode1_chip = {
1547 .name = "GICv3",
1548 .irq_mask = gic_eoimode1_mask_irq,
1549 .irq_unmask = gic_unmask_irq,
1550 .irq_eoi = gic_eoimode1_eoi_irq,
1551 .irq_set_type = gic_set_type,
1552 .irq_set_affinity = gic_set_affinity,
1553 .irq_retrigger = gic_retrigger,
1554 .irq_get_irqchip_state = gic_irq_get_irqchip_state,
1555 .irq_set_irqchip_state = gic_irq_set_irqchip_state,
1556 .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
1557 .irq_nmi_setup = gic_irq_nmi_setup,
1558 .irq_nmi_teardown = gic_irq_nmi_teardown,
1559 .ipi_send_mask = gic_ipi_send_mask,
1560 .flags = IRQCHIP_SET_TYPE_MASKED |
1561 IRQCHIP_SKIP_SET_WAKE |
1562 IRQCHIP_MASK_ON_SUSPEND,
1563 };
1564
gic_irq_domain_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)1565 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
1566 irq_hw_number_t hw)
1567 {
1568 struct irq_chip *chip = &gic_chip;
1569 struct irq_data *irqd = irq_desc_get_irq_data(irq_to_desc(irq));
1570
1571 if (static_branch_likely(&supports_deactivate_key))
1572 chip = &gic_eoimode1_chip;
1573
1574 switch (__get_intid_range(hw)) {
1575 case SGI_RANGE:
1576 case PPI_RANGE:
1577 case EPPI_RANGE:
1578 irq_set_percpu_devid(irq);
1579 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1580 handle_percpu_devid_irq, NULL, NULL);
1581 break;
1582
1583 case SPI_RANGE:
1584 case ESPI_RANGE:
1585 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1586 handle_fasteoi_irq, NULL, NULL);
1587 irq_set_probe(irq);
1588 irqd_set_single_target(irqd);
1589 break;
1590
1591 case LPI_RANGE:
1592 if (!gic_dist_supports_lpis())
1593 return -EPERM;
1594 irq_domain_set_info(d, irq, hw, chip, d->host_data,
1595 handle_fasteoi_irq, NULL, NULL);
1596 break;
1597
1598 default:
1599 return -EPERM;
1600 }
1601
1602 /* Prevents SW retriggers which mess up the ACK/EOI ordering */
1603 irqd_set_handle_enforce_irqctx(irqd);
1604 return 0;
1605 }
1606
gic_irq_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)1607 static int gic_irq_domain_translate(struct irq_domain *d,
1608 struct irq_fwspec *fwspec,
1609 unsigned long *hwirq,
1610 unsigned int *type)
1611 {
1612 if (fwspec->param_count == 1 && fwspec->param[0] < 16) {
1613 *hwirq = fwspec->param[0];
1614 *type = IRQ_TYPE_EDGE_RISING;
1615 return 0;
1616 }
1617
1618 if (is_of_node(fwspec->fwnode)) {
1619 if (fwspec->param_count < 3)
1620 return -EINVAL;
1621
1622 switch (fwspec->param[0]) {
1623 case 0: /* SPI */
1624 *hwirq = fwspec->param[1] + 32;
1625 break;
1626 case 1: /* PPI */
1627 *hwirq = fwspec->param[1] + 16;
1628 break;
1629 case 2: /* ESPI */
1630 *hwirq = fwspec->param[1] + ESPI_BASE_INTID;
1631 break;
1632 case 3: /* EPPI */
1633 *hwirq = fwspec->param[1] + EPPI_BASE_INTID;
1634 break;
1635 case GIC_IRQ_TYPE_LPI: /* LPI */
1636 *hwirq = fwspec->param[1];
1637 break;
1638 case GIC_IRQ_TYPE_PARTITION:
1639 *hwirq = fwspec->param[1];
1640 if (fwspec->param[1] >= 16)
1641 *hwirq += EPPI_BASE_INTID - 16;
1642 else
1643 *hwirq += 16;
1644 break;
1645 default:
1646 return -EINVAL;
1647 }
1648
1649 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1650
1651 /*
1652 * Make it clear that broken DTs are... broken.
1653 * Partitioned PPIs are an unfortunate exception.
1654 */
1655 WARN_ON(*type == IRQ_TYPE_NONE &&
1656 fwspec->param[0] != GIC_IRQ_TYPE_PARTITION);
1657 return 0;
1658 }
1659
1660 if (is_fwnode_irqchip(fwspec->fwnode)) {
1661 if(fwspec->param_count != 2)
1662 return -EINVAL;
1663
1664 if (fwspec->param[0] < 16) {
1665 pr_err(FW_BUG "Illegal GSI%d translation request\n",
1666 fwspec->param[0]);
1667 return -EINVAL;
1668 }
1669
1670 *hwirq = fwspec->param[0];
1671 *type = fwspec->param[1];
1672
1673 WARN_ON(*type == IRQ_TYPE_NONE);
1674 return 0;
1675 }
1676
1677 return -EINVAL;
1678 }
1679
gic_irq_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)1680 static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
1681 unsigned int nr_irqs, void *arg)
1682 {
1683 int i, ret;
1684 irq_hw_number_t hwirq;
1685 unsigned int type = IRQ_TYPE_NONE;
1686 struct irq_fwspec *fwspec = arg;
1687
1688 ret = gic_irq_domain_translate(domain, fwspec, &hwirq, &type);
1689 if (ret)
1690 return ret;
1691
1692 for (i = 0; i < nr_irqs; i++) {
1693 ret = gic_irq_domain_map(domain, virq + i, hwirq + i);
1694 if (ret)
1695 return ret;
1696 }
1697
1698 return 0;
1699 }
1700
gic_irq_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)1701 static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
1702 unsigned int nr_irqs)
1703 {
1704 int i;
1705
1706 for (i = 0; i < nr_irqs; i++) {
1707 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
1708 irq_set_handler(virq + i, NULL);
1709 irq_domain_reset_irq_data(d);
1710 }
1711 }
1712
fwspec_is_partitioned_ppi(struct irq_fwspec * fwspec,irq_hw_number_t hwirq)1713 static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec,
1714 irq_hw_number_t hwirq)
1715 {
1716 enum gic_intid_range range;
1717
1718 if (!gic_data.ppi_descs)
1719 return false;
1720
1721 if (!is_of_node(fwspec->fwnode))
1722 return false;
1723
1724 if (fwspec->param_count < 4 || !fwspec->param[3])
1725 return false;
1726
1727 range = __get_intid_range(hwirq);
1728 if (range != PPI_RANGE && range != EPPI_RANGE)
1729 return false;
1730
1731 return true;
1732 }
1733
gic_irq_domain_select(struct irq_domain * d,struct irq_fwspec * fwspec,enum irq_domain_bus_token bus_token)1734 static int gic_irq_domain_select(struct irq_domain *d,
1735 struct irq_fwspec *fwspec,
1736 enum irq_domain_bus_token bus_token)
1737 {
1738 unsigned int type, ret, ppi_idx;
1739 irq_hw_number_t hwirq;
1740
1741 /* Not for us */
1742 if (fwspec->fwnode != d->fwnode)
1743 return 0;
1744
1745 /* Handle pure domain searches */
1746 if (!fwspec->param_count)
1747 return d->bus_token == bus_token;
1748
1749 /* If this is not DT, then we have a single domain */
1750 if (!is_of_node(fwspec->fwnode))
1751 return 1;
1752
1753 ret = gic_irq_domain_translate(d, fwspec, &hwirq, &type);
1754 if (WARN_ON_ONCE(ret))
1755 return 0;
1756
1757 if (!fwspec_is_partitioned_ppi(fwspec, hwirq))
1758 return d == gic_data.domain;
1759
1760 /*
1761 * If this is a PPI and we have a 4th (non-null) parameter,
1762 * then we need to match the partition domain.
1763 */
1764 ppi_idx = __gic_get_ppi_index(hwirq);
1765 return d == partition_get_domain(gic_data.ppi_descs[ppi_idx]);
1766 }
1767
1768 static const struct irq_domain_ops gic_irq_domain_ops = {
1769 .translate = gic_irq_domain_translate,
1770 .alloc = gic_irq_domain_alloc,
1771 .free = gic_irq_domain_free,
1772 .select = gic_irq_domain_select,
1773 };
1774
partition_domain_translate(struct irq_domain * d,struct irq_fwspec * fwspec,unsigned long * hwirq,unsigned int * type)1775 static int partition_domain_translate(struct irq_domain *d,
1776 struct irq_fwspec *fwspec,
1777 unsigned long *hwirq,
1778 unsigned int *type)
1779 {
1780 unsigned long ppi_intid;
1781 struct device_node *np;
1782 unsigned int ppi_idx;
1783 int ret;
1784
1785 if (!gic_data.ppi_descs)
1786 return -ENOMEM;
1787
1788 np = of_find_node_by_phandle(fwspec->param[3]);
1789 if (WARN_ON(!np))
1790 return -EINVAL;
1791
1792 ret = gic_irq_domain_translate(d, fwspec, &ppi_intid, type);
1793 if (WARN_ON_ONCE(ret))
1794 return 0;
1795
1796 ppi_idx = __gic_get_ppi_index(ppi_intid);
1797 ret = partition_translate_id(gic_data.ppi_descs[ppi_idx],
1798 of_node_to_fwnode(np));
1799 if (ret < 0)
1800 return ret;
1801
1802 *hwirq = ret;
1803 *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK;
1804
1805 return 0;
1806 }
1807
1808 static const struct irq_domain_ops partition_domain_ops = {
1809 .translate = partition_domain_translate,
1810 .select = gic_irq_domain_select,
1811 };
1812
gic_enable_quirk_msm8996(void * data)1813 static bool gic_enable_quirk_msm8996(void *data)
1814 {
1815 struct gic_chip_data *d = data;
1816
1817 d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996;
1818
1819 return true;
1820 }
1821
gic_enable_quirk_cavium_38539(void * data)1822 static bool gic_enable_quirk_cavium_38539(void *data)
1823 {
1824 struct gic_chip_data *d = data;
1825
1826 d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539;
1827
1828 return true;
1829 }
1830
gic_enable_quirk_hip06_07(void * data)1831 static bool gic_enable_quirk_hip06_07(void *data)
1832 {
1833 struct gic_chip_data *d = data;
1834
1835 /*
1836 * HIP06 GICD_IIDR clashes with GIC-600 product number (despite
1837 * not being an actual ARM implementation). The saving grace is
1838 * that GIC-600 doesn't have ESPI, so nothing to do in that case.
1839 * HIP07 doesn't even have a proper IIDR, and still pretends to
1840 * have ESPI. In both cases, put them right.
1841 */
1842 if (d->rdists.gicd_typer & GICD_TYPER_ESPI) {
1843 /* Zero both ESPI and the RES0 field next to it... */
1844 d->rdists.gicd_typer &= ~GENMASK(9, 8);
1845 return true;
1846 }
1847
1848 return false;
1849 }
1850
1851 #define T241_CHIPN_MASK GENMASK_ULL(45, 44)
1852 #define T241_CHIP_GICDA_OFFSET 0x1580000
1853 #define SMCCC_SOC_ID_T241 0x036b0241
1854
gic_enable_quirk_nvidia_t241(void * data)1855 static bool gic_enable_quirk_nvidia_t241(void *data)
1856 {
1857 s32 soc_id = arm_smccc_get_soc_id_version();
1858 unsigned long chip_bmask = 0;
1859 phys_addr_t phys;
1860 u32 i;
1861
1862 /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */
1863 if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241))
1864 return false;
1865
1866 /* Find the chips based on GICR regions PHYS addr */
1867 for (i = 0; i < gic_data.nr_redist_regions; i++) {
1868 chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK,
1869 (u64)gic_data.redist_regions[i].phys_base));
1870 }
1871
1872 if (hweight32(chip_bmask) < 3)
1873 return false;
1874
1875 /* Setup GICD alias regions */
1876 for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) {
1877 if (chip_bmask & BIT(i)) {
1878 phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET;
1879 phys |= FIELD_PREP(T241_CHIPN_MASK, i);
1880 t241_dist_base_alias[i] = ioremap(phys, SZ_64K);
1881 WARN_ON_ONCE(!t241_dist_base_alias[i]);
1882 }
1883 }
1884 static_branch_enable(&gic_nvidia_t241_erratum);
1885 return true;
1886 }
1887
gic_enable_quirk_asr8601(void * data)1888 static bool gic_enable_quirk_asr8601(void *data)
1889 {
1890 struct gic_chip_data *d = data;
1891
1892 d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001;
1893
1894 return true;
1895 }
1896
gic_enable_quirk_arm64_2941627(void * data)1897 static bool gic_enable_quirk_arm64_2941627(void *data)
1898 {
1899 static_branch_enable(&gic_arm64_2941627_erratum);
1900 return true;
1901 }
1902
rd_set_non_coherent(void * data)1903 static bool rd_set_non_coherent(void *data)
1904 {
1905 struct gic_chip_data *d = data;
1906
1907 d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
1908 return true;
1909 }
1910
1911 static const struct gic_quirk gic_quirks[] = {
1912 {
1913 .desc = "GICv3: Qualcomm MSM8996 broken firmware",
1914 .compatible = "qcom,msm8996-gic-v3",
1915 .init = gic_enable_quirk_msm8996,
1916 },
1917 {
1918 .desc = "GICv3: ASR erratum 8601001",
1919 .compatible = "asr,asr8601-gic-v3",
1920 .init = gic_enable_quirk_asr8601,
1921 },
1922 {
1923 .desc = "GICv3: HIP06 erratum 161010803",
1924 .iidr = 0x0204043b,
1925 .mask = 0xffffffff,
1926 .init = gic_enable_quirk_hip06_07,
1927 },
1928 {
1929 .desc = "GICv3: HIP07 erratum 161010803",
1930 .iidr = 0x00000000,
1931 .mask = 0xffffffff,
1932 .init = gic_enable_quirk_hip06_07,
1933 },
1934 {
1935 /*
1936 * Reserved register accesses generate a Synchronous
1937 * External Abort. This erratum applies to:
1938 * - ThunderX: CN88xx
1939 * - OCTEON TX: CN83xx, CN81xx
1940 * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx*
1941 */
1942 .desc = "GICv3: Cavium erratum 38539",
1943 .iidr = 0xa000034c,
1944 .mask = 0xe8f00fff,
1945 .init = gic_enable_quirk_cavium_38539,
1946 },
1947 {
1948 .desc = "GICv3: NVIDIA erratum T241-FABRIC-4",
1949 .iidr = 0x0402043b,
1950 .mask = 0xffffffff,
1951 .init = gic_enable_quirk_nvidia_t241,
1952 },
1953 {
1954 /*
1955 * GIC-700: 2941627 workaround - IP variant [0,1]
1956 *
1957 */
1958 .desc = "GICv3: ARM64 erratum 2941627",
1959 .iidr = 0x0400043b,
1960 .mask = 0xff0e0fff,
1961 .init = gic_enable_quirk_arm64_2941627,
1962 },
1963 {
1964 /*
1965 * GIC-700: 2941627 workaround - IP variant [2]
1966 */
1967 .desc = "GICv3: ARM64 erratum 2941627",
1968 .iidr = 0x0402043b,
1969 .mask = 0xff0f0fff,
1970 .init = gic_enable_quirk_arm64_2941627,
1971 },
1972 {
1973 .desc = "GICv3: non-coherent attribute",
1974 .property = "dma-noncoherent",
1975 .init = rd_set_non_coherent,
1976 },
1977 {
1978 }
1979 };
1980
gic_enable_nmi_support(void)1981 static void gic_enable_nmi_support(void)
1982 {
1983 int i;
1984
1985 if (!gic_prio_masking_enabled())
1986 return;
1987
1988 rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR,
1989 sizeof(*rdist_nmi_refs), GFP_KERNEL);
1990 if (!rdist_nmi_refs)
1991 return;
1992
1993 for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++)
1994 refcount_set(&rdist_nmi_refs[i], 0);
1995
1996 pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n",
1997 gic_has_relaxed_pmr_sync() ? "relaxed" : "forced");
1998
1999 static_branch_enable(&supports_pseudo_nmis);
2000
2001 if (static_branch_likely(&supports_deactivate_key))
2002 gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI;
2003 else
2004 gic_chip.flags |= IRQCHIP_SUPPORTS_NMI;
2005 }
2006
gic_init_bases(phys_addr_t dist_phys_base,void __iomem * dist_base,struct redist_region * rdist_regs,u32 nr_redist_regions,u64 redist_stride,struct fwnode_handle * handle)2007 static int __init gic_init_bases(phys_addr_t dist_phys_base,
2008 void __iomem *dist_base,
2009 struct redist_region *rdist_regs,
2010 u32 nr_redist_regions,
2011 u64 redist_stride,
2012 struct fwnode_handle *handle)
2013 {
2014 u32 typer;
2015 int err;
2016
2017 if (!is_hyp_mode_available())
2018 static_branch_disable(&supports_deactivate_key);
2019
2020 if (static_branch_likely(&supports_deactivate_key))
2021 pr_info("GIC: Using split EOI/Deactivate mode\n");
2022
2023 gic_data.fwnode = handle;
2024 gic_data.dist_phys_base = dist_phys_base;
2025 gic_data.dist_base = dist_base;
2026 gic_data.redist_regions = rdist_regs;
2027 gic_data.nr_redist_regions = nr_redist_regions;
2028 gic_data.redist_stride = redist_stride;
2029
2030 /*
2031 * Find out how many interrupts are supported.
2032 */
2033 typer = readl_relaxed(gic_data.dist_base + GICD_TYPER);
2034 gic_data.rdists.gicd_typer = typer;
2035
2036 gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR),
2037 gic_quirks, &gic_data);
2038
2039 pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32);
2040 pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR);
2041
2042 /*
2043 * ThunderX1 explodes on reading GICD_TYPER2, in violation of the
2044 * architecture spec (which says that reserved registers are RES0).
2045 */
2046 if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539))
2047 gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2);
2048
2049 gic_data.domain = irq_domain_create_tree(handle, &gic_irq_domain_ops,
2050 &gic_data);
2051 gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist));
2052 if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) {
2053 /* Disable GICv4.x features for the erratum T241-FABRIC-4 */
2054 gic_data.rdists.has_rvpeid = true;
2055 gic_data.rdists.has_vlpis = true;
2056 gic_data.rdists.has_direct_lpi = true;
2057 gic_data.rdists.has_vpend_valid_dirty = true;
2058 }
2059
2060 if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) {
2061 err = -ENOMEM;
2062 goto out_free;
2063 }
2064
2065 irq_domain_update_bus_token(gic_data.domain, DOMAIN_BUS_WIRED);
2066
2067 gic_data.has_rss = !!(typer & GICD_TYPER_RSS);
2068
2069 if (typer & GICD_TYPER_MBIS) {
2070 err = mbi_init(handle, gic_data.domain);
2071 if (err)
2072 pr_err("Failed to initialize MBIs\n");
2073 }
2074
2075 set_handle_irq(gic_handle_irq);
2076
2077 gic_update_rdist_properties();
2078
2079 gic_cpu_sys_reg_enable();
2080 gic_prio_init();
2081 gic_dist_init();
2082 gic_cpu_init();
2083 gic_enable_nmi_support();
2084 gic_smp_init();
2085 gic_cpu_pm_init();
2086
2087 if (gic_dist_supports_lpis()) {
2088 its_init(handle, &gic_data.rdists, gic_data.domain, dist_prio_irq);
2089 its_cpu_init();
2090 its_lpi_memreserve_init();
2091 } else {
2092 if (IS_ENABLED(CONFIG_ARM_GIC_V2M))
2093 gicv2m_init(handle, gic_data.domain);
2094 }
2095
2096 return 0;
2097
2098 out_free:
2099 if (gic_data.domain)
2100 irq_domain_remove(gic_data.domain);
2101 free_percpu(gic_data.rdists.rdist);
2102 return err;
2103 }
2104
gic_validate_dist_version(void __iomem * dist_base)2105 static int __init gic_validate_dist_version(void __iomem *dist_base)
2106 {
2107 u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2108
2109 if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4)
2110 return -ENODEV;
2111
2112 return 0;
2113 }
2114
2115 /* Create all possible partitions at boot time */
gic_populate_ppi_partitions(struct device_node * gic_node)2116 static void __init gic_populate_ppi_partitions(struct device_node *gic_node)
2117 {
2118 struct device_node *parts_node, *child_part;
2119 int part_idx = 0, i;
2120 int nr_parts;
2121 struct partition_affinity *parts;
2122
2123 parts_node = of_get_child_by_name(gic_node, "ppi-partitions");
2124 if (!parts_node)
2125 return;
2126
2127 gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL);
2128 if (!gic_data.ppi_descs)
2129 goto out_put_node;
2130
2131 nr_parts = of_get_child_count(parts_node);
2132
2133 if (!nr_parts)
2134 goto out_put_node;
2135
2136 parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL);
2137 if (WARN_ON(!parts))
2138 goto out_put_node;
2139
2140 for_each_child_of_node(parts_node, child_part) {
2141 struct partition_affinity *part;
2142 int n;
2143
2144 part = &parts[part_idx];
2145
2146 part->partition_id = of_node_to_fwnode(child_part);
2147
2148 pr_info("GIC: PPI partition %pOFn[%d] { ",
2149 child_part, part_idx);
2150
2151 n = of_property_count_elems_of_size(child_part, "affinity",
2152 sizeof(u32));
2153 WARN_ON(n <= 0);
2154
2155 for (i = 0; i < n; i++) {
2156 int err, cpu;
2157 u32 cpu_phandle;
2158 struct device_node *cpu_node;
2159
2160 err = of_property_read_u32_index(child_part, "affinity",
2161 i, &cpu_phandle);
2162 if (WARN_ON(err))
2163 continue;
2164
2165 cpu_node = of_find_node_by_phandle(cpu_phandle);
2166 if (WARN_ON(!cpu_node))
2167 continue;
2168
2169 cpu = of_cpu_node_to_id(cpu_node);
2170 if (WARN_ON(cpu < 0)) {
2171 of_node_put(cpu_node);
2172 continue;
2173 }
2174
2175 pr_cont("%pOF[%d] ", cpu_node, cpu);
2176
2177 cpumask_set_cpu(cpu, &part->mask);
2178 of_node_put(cpu_node);
2179 }
2180
2181 pr_cont("}\n");
2182 part_idx++;
2183 }
2184
2185 for (i = 0; i < gic_data.ppi_nr; i++) {
2186 unsigned int irq;
2187 struct partition_desc *desc;
2188 struct irq_fwspec ppi_fwspec = {
2189 .fwnode = gic_data.fwnode,
2190 .param_count = 3,
2191 .param = {
2192 [0] = GIC_IRQ_TYPE_PARTITION,
2193 [1] = i,
2194 [2] = IRQ_TYPE_NONE,
2195 },
2196 };
2197
2198 irq = irq_create_fwspec_mapping(&ppi_fwspec);
2199 if (WARN_ON(!irq))
2200 continue;
2201 desc = partition_create_desc(gic_data.fwnode, parts, nr_parts,
2202 irq, &partition_domain_ops);
2203 if (WARN_ON(!desc))
2204 continue;
2205
2206 gic_data.ppi_descs[i] = desc;
2207 }
2208
2209 out_put_node:
2210 of_node_put(parts_node);
2211 }
2212
gic_of_setup_kvm_info(struct device_node * node,u32 nr_redist_regions)2213 static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions)
2214 {
2215 int ret;
2216 struct resource r;
2217
2218 gic_v3_kvm_info.type = GIC_V3;
2219
2220 gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, 0);
2221 if (!gic_v3_kvm_info.maint_irq)
2222 return;
2223
2224 /* Also skip GICD, GICC, GICH */
2225 ret = of_address_to_resource(node, nr_redist_regions + 3, &r);
2226 if (!ret)
2227 gic_v3_kvm_info.vcpu = r;
2228
2229 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2230 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2231 vgic_set_kvm_info(&gic_v3_kvm_info);
2232 }
2233
gic_request_region(resource_size_t base,resource_size_t size,const char * name)2234 static void gic_request_region(resource_size_t base, resource_size_t size,
2235 const char *name)
2236 {
2237 if (!request_mem_region(base, size, name))
2238 pr_warn_once(FW_BUG "%s region %pa has overlapping address\n",
2239 name, &base);
2240 }
2241
gic_of_iomap(struct device_node * node,int idx,const char * name,struct resource * res)2242 static void __iomem *gic_of_iomap(struct device_node *node, int idx,
2243 const char *name, struct resource *res)
2244 {
2245 void __iomem *base;
2246 int ret;
2247
2248 ret = of_address_to_resource(node, idx, res);
2249 if (ret)
2250 return IOMEM_ERR_PTR(ret);
2251
2252 gic_request_region(res->start, resource_size(res), name);
2253 base = of_iomap(node, idx);
2254
2255 return base ?: IOMEM_ERR_PTR(-ENOMEM);
2256 }
2257
gic_of_init(struct device_node * node,struct device_node * parent)2258 static int __init gic_of_init(struct device_node *node, struct device_node *parent)
2259 {
2260 phys_addr_t dist_phys_base;
2261 void __iomem *dist_base;
2262 struct redist_region *rdist_regs;
2263 struct resource res;
2264 u64 redist_stride;
2265 u32 nr_redist_regions;
2266 int err, i;
2267
2268 dist_base = gic_of_iomap(node, 0, "GICD", &res);
2269 if (IS_ERR(dist_base)) {
2270 pr_err("%pOF: unable to map gic dist registers\n", node);
2271 return PTR_ERR(dist_base);
2272 }
2273
2274 dist_phys_base = res.start;
2275
2276 err = gic_validate_dist_version(dist_base);
2277 if (err) {
2278 pr_err("%pOF: no distributor detected, giving up\n", node);
2279 goto out_unmap_dist;
2280 }
2281
2282 if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions))
2283 nr_redist_regions = 1;
2284
2285 rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs),
2286 GFP_KERNEL);
2287 if (!rdist_regs) {
2288 err = -ENOMEM;
2289 goto out_unmap_dist;
2290 }
2291
2292 for (i = 0; i < nr_redist_regions; i++) {
2293 rdist_regs[i].redist_base = gic_of_iomap(node, 1 + i, "GICR", &res);
2294 if (IS_ERR(rdist_regs[i].redist_base)) {
2295 pr_err("%pOF: couldn't map region %d\n", node, i);
2296 err = -ENODEV;
2297 goto out_unmap_rdist;
2298 }
2299 rdist_regs[i].phys_base = res.start;
2300 }
2301
2302 if (of_property_read_u64(node, "redistributor-stride", &redist_stride))
2303 redist_stride = 0;
2304
2305 gic_enable_of_quirks(node, gic_quirks, &gic_data);
2306
2307 err = gic_init_bases(dist_phys_base, dist_base, rdist_regs,
2308 nr_redist_regions, redist_stride, &node->fwnode);
2309 if (err)
2310 goto out_unmap_rdist;
2311
2312 gic_populate_ppi_partitions(node);
2313
2314 if (static_branch_likely(&supports_deactivate_key))
2315 gic_of_setup_kvm_info(node, nr_redist_regions);
2316 return 0;
2317
2318 out_unmap_rdist:
2319 for (i = 0; i < nr_redist_regions; i++)
2320 if (rdist_regs[i].redist_base && !IS_ERR(rdist_regs[i].redist_base))
2321 iounmap(rdist_regs[i].redist_base);
2322 kfree(rdist_regs);
2323 out_unmap_dist:
2324 iounmap(dist_base);
2325 return err;
2326 }
2327
2328 IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init);
2329
2330 #ifdef CONFIG_ACPI
2331 static struct
2332 {
2333 void __iomem *dist_base;
2334 struct redist_region *redist_regs;
2335 u32 nr_redist_regions;
2336 bool single_redist;
2337 int enabled_rdists;
2338 u32 maint_irq;
2339 int maint_irq_mode;
2340 phys_addr_t vcpu_base;
2341 } acpi_data __initdata;
2342
2343 static void __init
gic_acpi_register_redist(phys_addr_t phys_base,void __iomem * redist_base)2344 gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base)
2345 {
2346 static int count = 0;
2347
2348 acpi_data.redist_regs[count].phys_base = phys_base;
2349 acpi_data.redist_regs[count].redist_base = redist_base;
2350 acpi_data.redist_regs[count].single_redist = acpi_data.single_redist;
2351 count++;
2352 }
2353
2354 static int __init
gic_acpi_parse_madt_redist(union acpi_subtable_headers * header,const unsigned long end)2355 gic_acpi_parse_madt_redist(union acpi_subtable_headers *header,
2356 const unsigned long end)
2357 {
2358 struct acpi_madt_generic_redistributor *redist =
2359 (struct acpi_madt_generic_redistributor *)header;
2360 void __iomem *redist_base;
2361
2362 redist_base = ioremap(redist->base_address, redist->length);
2363 if (!redist_base) {
2364 pr_err("Couldn't map GICR region @%llx\n", redist->base_address);
2365 return -ENOMEM;
2366 }
2367
2368 if (acpi_get_madt_revision() >= 7 &&
2369 (redist->flags & ACPI_MADT_GICR_NON_COHERENT))
2370 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
2371
2372 gic_request_region(redist->base_address, redist->length, "GICR");
2373
2374 gic_acpi_register_redist(redist->base_address, redist_base);
2375 return 0;
2376 }
2377
2378 static int __init
gic_acpi_parse_madt_gicc(union acpi_subtable_headers * header,const unsigned long end)2379 gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header,
2380 const unsigned long end)
2381 {
2382 struct acpi_madt_generic_interrupt *gicc =
2383 (struct acpi_madt_generic_interrupt *)header;
2384 u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK;
2385 u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2;
2386 void __iomem *redist_base;
2387
2388 /* Neither enabled or online capable means it doesn't exist, skip it */
2389 if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
2390 return 0;
2391
2392 /*
2393 * Capable but disabled CPUs can be brought online later. What about
2394 * the redistributor? ACPI doesn't want to say!
2395 * Virtual hotplug systems can use the MADT's "always-on" GICR entries.
2396 * Otherwise, prevent such CPUs from being brought online.
2397 */
2398 if (!(gicc->flags & ACPI_MADT_ENABLED)) {
2399 int cpu = get_cpu_for_acpi_id(gicc->uid);
2400
2401 pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu);
2402 if (cpu >= 0)
2403 cpumask_set_cpu(cpu, &broken_rdists);
2404 return 0;
2405 }
2406
2407 redist_base = ioremap(gicc->gicr_base_address, size);
2408 if (!redist_base)
2409 return -ENOMEM;
2410 gic_request_region(gicc->gicr_base_address, size, "GICR");
2411
2412 if (acpi_get_madt_revision() >= 7 &&
2413 (gicc->flags & ACPI_MADT_GICC_NON_COHERENT))
2414 gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE;
2415
2416 gic_acpi_register_redist(gicc->gicr_base_address, redist_base);
2417 return 0;
2418 }
2419
gic_acpi_collect_gicr_base(void)2420 static int __init gic_acpi_collect_gicr_base(void)
2421 {
2422 acpi_tbl_entry_handler redist_parser;
2423 enum acpi_madt_type type;
2424
2425 if (acpi_data.single_redist) {
2426 type = ACPI_MADT_TYPE_GENERIC_INTERRUPT;
2427 redist_parser = gic_acpi_parse_madt_gicc;
2428 } else {
2429 type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR;
2430 redist_parser = gic_acpi_parse_madt_redist;
2431 }
2432
2433 /* Collect redistributor base addresses in GICR entries */
2434 if (acpi_table_parse_madt(type, redist_parser, 0) > 0)
2435 return 0;
2436
2437 pr_info("No valid GICR entries exist\n");
2438 return -ENODEV;
2439 }
2440
gic_acpi_match_gicr(union acpi_subtable_headers * header,const unsigned long end)2441 static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header,
2442 const unsigned long end)
2443 {
2444 /* Subtable presence means that redist exists, that's it */
2445 return 0;
2446 }
2447
gic_acpi_match_gicc(union acpi_subtable_headers * header,const unsigned long end)2448 static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header,
2449 const unsigned long end)
2450 {
2451 struct acpi_madt_generic_interrupt *gicc =
2452 (struct acpi_madt_generic_interrupt *)header;
2453
2454 /*
2455 * If GICC is enabled and has valid gicr base address, then it means
2456 * GICR base is presented via GICC. The redistributor is only known to
2457 * be accessible if the GICC is marked as enabled. If this bit is not
2458 * set, we'd need to add the redistributor at runtime, which isn't
2459 * supported.
2460 */
2461 if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address)
2462 acpi_data.enabled_rdists++;
2463
2464 return 0;
2465 }
2466
gic_acpi_count_gicr_regions(void)2467 static int __init gic_acpi_count_gicr_regions(void)
2468 {
2469 int count;
2470
2471 /*
2472 * Count how many redistributor regions we have. It is not allowed
2473 * to mix redistributor description, GICR and GICC subtables have to be
2474 * mutually exclusive.
2475 */
2476 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR,
2477 gic_acpi_match_gicr, 0);
2478 if (count > 0) {
2479 acpi_data.single_redist = false;
2480 return count;
2481 }
2482
2483 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2484 gic_acpi_match_gicc, 0);
2485 if (count > 0) {
2486 acpi_data.single_redist = true;
2487 count = acpi_data.enabled_rdists;
2488 }
2489
2490 return count;
2491 }
2492
acpi_validate_gic_table(struct acpi_subtable_header * header,struct acpi_probe_entry * ape)2493 static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header,
2494 struct acpi_probe_entry *ape)
2495 {
2496 struct acpi_madt_generic_distributor *dist;
2497 int count;
2498
2499 dist = (struct acpi_madt_generic_distributor *)header;
2500 if (dist->version != ape->driver_data)
2501 return false;
2502
2503 /* We need to do that exercise anyway, the sooner the better */
2504 count = gic_acpi_count_gicr_regions();
2505 if (count <= 0)
2506 return false;
2507
2508 acpi_data.nr_redist_regions = count;
2509 return true;
2510 }
2511
gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers * header,const unsigned long end)2512 static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header,
2513 const unsigned long end)
2514 {
2515 struct acpi_madt_generic_interrupt *gicc =
2516 (struct acpi_madt_generic_interrupt *)header;
2517 int maint_irq_mode;
2518 static int first_madt = true;
2519
2520 if (!(gicc->flags &
2521 (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE)))
2522 return 0;
2523
2524 maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ?
2525 ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE;
2526
2527 if (first_madt) {
2528 first_madt = false;
2529
2530 acpi_data.maint_irq = gicc->vgic_interrupt;
2531 acpi_data.maint_irq_mode = maint_irq_mode;
2532 acpi_data.vcpu_base = gicc->gicv_base_address;
2533
2534 return 0;
2535 }
2536
2537 /*
2538 * The maintenance interrupt and GICV should be the same for every CPU
2539 */
2540 if ((acpi_data.maint_irq != gicc->vgic_interrupt) ||
2541 (acpi_data.maint_irq_mode != maint_irq_mode) ||
2542 (acpi_data.vcpu_base != gicc->gicv_base_address))
2543 return -EINVAL;
2544
2545 return 0;
2546 }
2547
gic_acpi_collect_virt_info(void)2548 static bool __init gic_acpi_collect_virt_info(void)
2549 {
2550 int count;
2551
2552 count = acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_INTERRUPT,
2553 gic_acpi_parse_virt_madt_gicc, 0);
2554
2555 return (count > 0);
2556 }
2557
2558 #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K)
2559 #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K)
2560 #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K)
2561
gic_acpi_setup_kvm_info(void)2562 static void __init gic_acpi_setup_kvm_info(void)
2563 {
2564 int irq;
2565
2566 if (!gic_acpi_collect_virt_info()) {
2567 pr_warn("Unable to get hardware information used for virtualization\n");
2568 return;
2569 }
2570
2571 gic_v3_kvm_info.type = GIC_V3;
2572
2573 irq = acpi_register_gsi(NULL, acpi_data.maint_irq,
2574 acpi_data.maint_irq_mode,
2575 ACPI_ACTIVE_HIGH);
2576 if (irq <= 0)
2577 return;
2578
2579 gic_v3_kvm_info.maint_irq = irq;
2580
2581 if (acpi_data.vcpu_base) {
2582 struct resource *vcpu = &gic_v3_kvm_info.vcpu;
2583
2584 vcpu->flags = IORESOURCE_MEM;
2585 vcpu->start = acpi_data.vcpu_base;
2586 vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1;
2587 }
2588
2589 gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis;
2590 gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid;
2591 vgic_set_kvm_info(&gic_v3_kvm_info);
2592 }
2593
2594 static struct fwnode_handle *gsi_domain_handle;
2595
gic_v3_get_gsi_domain_id(u32 gsi)2596 static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi)
2597 {
2598 return gsi_domain_handle;
2599 }
2600
2601 static int __init
gic_acpi_init(union acpi_subtable_headers * header,const unsigned long end)2602 gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end)
2603 {
2604 struct acpi_madt_generic_distributor *dist;
2605 size_t size;
2606 int i, err;
2607
2608 /* Get distributor base address */
2609 dist = (struct acpi_madt_generic_distributor *)header;
2610 acpi_data.dist_base = ioremap(dist->base_address,
2611 ACPI_GICV3_DIST_MEM_SIZE);
2612 if (!acpi_data.dist_base) {
2613 pr_err("Unable to map GICD registers\n");
2614 return -ENOMEM;
2615 }
2616 gic_request_region(dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, "GICD");
2617
2618 err = gic_validate_dist_version(acpi_data.dist_base);
2619 if (err) {
2620 pr_err("No distributor detected at @%p, giving up\n",
2621 acpi_data.dist_base);
2622 goto out_dist_unmap;
2623 }
2624
2625 size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions;
2626 acpi_data.redist_regs = kzalloc(size, GFP_KERNEL);
2627 if (!acpi_data.redist_regs) {
2628 err = -ENOMEM;
2629 goto out_dist_unmap;
2630 }
2631
2632 err = gic_acpi_collect_gicr_base();
2633 if (err)
2634 goto out_redist_unmap;
2635
2636 gsi_domain_handle = irq_domain_alloc_fwnode(&dist->base_address);
2637 if (!gsi_domain_handle) {
2638 err = -ENOMEM;
2639 goto out_redist_unmap;
2640 }
2641
2642 err = gic_init_bases(dist->base_address, acpi_data.dist_base,
2643 acpi_data.redist_regs, acpi_data.nr_redist_regions,
2644 0, gsi_domain_handle);
2645 if (err)
2646 goto out_fwhandle_free;
2647
2648 acpi_set_irq_model(ACPI_IRQ_MODEL_GIC, gic_v3_get_gsi_domain_id);
2649
2650 if (static_branch_likely(&supports_deactivate_key))
2651 gic_acpi_setup_kvm_info();
2652
2653 return 0;
2654
2655 out_fwhandle_free:
2656 irq_domain_free_fwnode(gsi_domain_handle);
2657 out_redist_unmap:
2658 for (i = 0; i < acpi_data.nr_redist_regions; i++)
2659 if (acpi_data.redist_regs[i].redist_base)
2660 iounmap(acpi_data.redist_regs[i].redist_base);
2661 kfree(acpi_data.redist_regs);
2662 out_dist_unmap:
2663 iounmap(acpi_data.dist_base);
2664 return err;
2665 }
2666 IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2667 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3,
2668 gic_acpi_init);
2669 IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2670 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4,
2671 gic_acpi_init);
2672 IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR,
2673 acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE,
2674 gic_acpi_init);
2675 #endif
2676