xref: /linux/drivers/irqchip/irq-apple-aic.c (revision b1ac803f47cb1615468f35cf1ccb553c52087301)
176cde263SHector Martin // SPDX-License-Identifier: GPL-2.0-or-later
276cde263SHector Martin /*
376cde263SHector Martin  * Copyright The Asahi Linux Contributors
476cde263SHector Martin  *
576cde263SHector Martin  * Based on irq-lpc32xx:
676cde263SHector Martin  *   Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com>
776cde263SHector Martin  * Based on irq-bcm2836:
876cde263SHector Martin  *   Copyright 2015 Broadcom
976cde263SHector Martin  */
1076cde263SHector Martin 
1176cde263SHector Martin /*
1276cde263SHector Martin  * AIC is a fairly simple interrupt controller with the following features:
1376cde263SHector Martin  *
1476cde263SHector Martin  * - 896 level-triggered hardware IRQs
1576cde263SHector Martin  *   - Single mask bit per IRQ
1676cde263SHector Martin  *   - Per-IRQ affinity setting
1776cde263SHector Martin  *   - Automatic masking on event delivery (auto-ack)
1876cde263SHector Martin  *   - Software triggering (ORed with hw line)
1976cde263SHector Martin  * - 2 per-CPU IPIs (meant as "self" and "other", but they are
2076cde263SHector Martin  *   interchangeable if not symmetric)
2176cde263SHector Martin  * - Automatic prioritization (single event/ack register per CPU, lower IRQs =
2276cde263SHector Martin  *   higher priority)
2376cde263SHector Martin  * - Automatic masking on ack
2476cde263SHector Martin  * - Default "this CPU" register view and explicit per-CPU views
2576cde263SHector Martin  *
2676cde263SHector Martin  * In addition, this driver also handles FIQs, as these are routed to the same
272cf68211SHector Martin  * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and
2876cde263SHector Martin  * performance counters (TODO).
2976cde263SHector Martin  *
3076cde263SHector Martin  * Implementation notes:
3176cde263SHector Martin  *
3276cde263SHector Martin  * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs,
3376cde263SHector Martin  *   and one for IPIs.
3476cde263SHector Martin  * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller
3576cde263SHector Martin  *   and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused).
3676cde263SHector Martin  * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu.
3776cde263SHector Martin  * - DT bindings use 3-cell form (like GIC):
3876cde263SHector Martin  *   - <0 nr flags> - hwirq #nr
3976cde263SHector Martin  *   - <1 nr flags> - FIQ #nr
4076cde263SHector Martin  *     - nr=0  Physical HV timer
4176cde263SHector Martin  *     - nr=1  Virtual HV timer
4276cde263SHector Martin  *     - nr=2  Physical guest timer
4376cde263SHector Martin  *     - nr=3  Virtual guest timer
4476cde263SHector Martin  */
4576cde263SHector Martin 
4676cde263SHector Martin #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
4776cde263SHector Martin 
4876cde263SHector Martin #include <linux/bits.h>
4976cde263SHector Martin #include <linux/bitfield.h>
5076cde263SHector Martin #include <linux/cpuhotplug.h>
5176cde263SHector Martin #include <linux/io.h>
5276cde263SHector Martin #include <linux/irqchip.h>
53b6ca556cSMarc Zyngier #include <linux/irqchip/arm-vgic-info.h>
5476cde263SHector Martin #include <linux/irqdomain.h>
552cf68211SHector Martin #include <linux/jump_label.h>
5676cde263SHector Martin #include <linux/limits.h>
5776cde263SHector Martin #include <linux/of_address.h>
5876cde263SHector Martin #include <linux/slab.h>
5911db7410SMarc Zyngier #include <asm/apple_m1_pmu.h>
602cf68211SHector Martin #include <asm/cputype.h>
6176cde263SHector Martin #include <asm/exception.h>
6276cde263SHector Martin #include <asm/sysreg.h>
6376cde263SHector Martin #include <asm/virt.h>
6476cde263SHector Martin 
6576cde263SHector Martin #include <dt-bindings/interrupt-controller/apple-aic.h>
6676cde263SHector Martin 
6776cde263SHector Martin /*
68dc97fd6fSHector Martin  * AIC v1 registers (MMIO)
6976cde263SHector Martin  */
7076cde263SHector Martin 
7176cde263SHector Martin #define AIC_INFO		0x0004
727c841f5fSHector Martin #define AIC_INFO_NR_IRQ		GENMASK(15, 0)
7376cde263SHector Martin 
7476cde263SHector Martin #define AIC_CONFIG		0x0010
7576cde263SHector Martin 
7676cde263SHector Martin #define AIC_WHOAMI		0x2000
7776cde263SHector Martin #define AIC_EVENT		0x2004
78a801f0eeSHector Martin #define AIC_EVENT_DIE		GENMASK(31, 24)
79a801f0eeSHector Martin #define AIC_EVENT_TYPE		GENMASK(23, 16)
8076cde263SHector Martin #define AIC_EVENT_NUM		GENMASK(15, 0)
8176cde263SHector Martin 
827c841f5fSHector Martin #define AIC_EVENT_TYPE_FIQ	0 /* Software use */
837c841f5fSHector Martin #define AIC_EVENT_TYPE_IRQ	1
8476cde263SHector Martin #define AIC_EVENT_TYPE_IPI	4
8576cde263SHector Martin #define AIC_EVENT_IPI_OTHER	1
8676cde263SHector Martin #define AIC_EVENT_IPI_SELF	2
8776cde263SHector Martin 
8876cde263SHector Martin #define AIC_IPI_SEND		0x2008
8976cde263SHector Martin #define AIC_IPI_ACK		0x200c
9076cde263SHector Martin #define AIC_IPI_MASK_SET	0x2024
9176cde263SHector Martin #define AIC_IPI_MASK_CLR	0x2028
9276cde263SHector Martin 
9376cde263SHector Martin #define AIC_IPI_SEND_CPU(cpu)	BIT(cpu)
9476cde263SHector Martin 
9576cde263SHector Martin #define AIC_IPI_OTHER		BIT(0)
9676cde263SHector Martin #define AIC_IPI_SELF		BIT(31)
9776cde263SHector Martin 
9876cde263SHector Martin #define AIC_TARGET_CPU		0x3000
9976cde263SHector Martin 
10076cde263SHector Martin #define AIC_CPU_IPI_SET(cpu)	(0x5008 + ((cpu) << 7))
10176cde263SHector Martin #define AIC_CPU_IPI_CLR(cpu)	(0x500c + ((cpu) << 7))
10276cde263SHector Martin #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7))
10376cde263SHector Martin #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7))
10476cde263SHector Martin 
105dc97fd6fSHector Martin #define AIC_MAX_IRQ		0x400
106dc97fd6fSHector Martin 
107768d4435SHector Martin /*
108768d4435SHector Martin  * AIC v2 registers (MMIO)
109768d4435SHector Martin  */
110768d4435SHector Martin 
111768d4435SHector Martin #define AIC2_VERSION		0x0000
112768d4435SHector Martin #define AIC2_VERSION_VER	GENMASK(7, 0)
113768d4435SHector Martin 
114768d4435SHector Martin #define AIC2_INFO1		0x0004
115768d4435SHector Martin #define AIC2_INFO1_NR_IRQ	GENMASK(15, 0)
116768d4435SHector Martin #define AIC2_INFO1_LAST_DIE	GENMASK(27, 24)
117768d4435SHector Martin 
118768d4435SHector Martin #define AIC2_INFO2		0x0008
119768d4435SHector Martin 
120768d4435SHector Martin #define AIC2_INFO3		0x000c
121768d4435SHector Martin #define AIC2_INFO3_MAX_IRQ	GENMASK(15, 0)
122768d4435SHector Martin #define AIC2_INFO3_MAX_DIE	GENMASK(27, 24)
123768d4435SHector Martin 
124768d4435SHector Martin #define AIC2_RESET		0x0010
125768d4435SHector Martin #define AIC2_RESET_RESET	BIT(0)
126768d4435SHector Martin 
127768d4435SHector Martin #define AIC2_CONFIG		0x0014
128768d4435SHector Martin #define AIC2_CONFIG_ENABLE	BIT(0)
129768d4435SHector Martin #define AIC2_CONFIG_PREFER_PCPU	BIT(28)
130768d4435SHector Martin 
131768d4435SHector Martin #define AIC2_TIMEOUT		0x0028
132768d4435SHector Martin #define AIC2_CLUSTER_PRIO	0x0030
133768d4435SHector Martin #define AIC2_DELAY_GROUPS	0x0100
134768d4435SHector Martin 
135768d4435SHector Martin #define AIC2_IRQ_CFG		0x2000
136768d4435SHector Martin 
137768d4435SHector Martin /*
138768d4435SHector Martin  * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG:
139768d4435SHector Martin  *
140768d4435SHector Martin  * Repeat for each die:
141768d4435SHector Martin  *   IRQ_CFG: u32 * MAX_IRQS
142768d4435SHector Martin  *   SW_SET: u32 * (MAX_IRQS / 32)
143768d4435SHector Martin  *   SW_CLR: u32 * (MAX_IRQS / 32)
144768d4435SHector Martin  *   MASK_SET: u32 * (MAX_IRQS / 32)
145768d4435SHector Martin  *   MASK_CLR: u32 * (MAX_IRQS / 32)
146768d4435SHector Martin  *   HW_STATE: u32 * (MAX_IRQS / 32)
147768d4435SHector Martin  *
148768d4435SHector Martin  * This is followed by a set of event registers, each 16K page aligned.
149768d4435SHector Martin  * The first one is the AP event register we will use. Unfortunately,
150768d4435SHector Martin  * the actual implemented die count is not specified anywhere in the
151768d4435SHector Martin  * capability registers, so we have to explicitly specify the event
152768d4435SHector Martin  * register as a second reg entry in the device tree to remain
153768d4435SHector Martin  * forward-compatible.
154768d4435SHector Martin  */
155768d4435SHector Martin 
156768d4435SHector Martin #define AIC2_IRQ_CFG_TARGET	GENMASK(3, 0)
157768d4435SHector Martin #define AIC2_IRQ_CFG_DELAY_IDX	GENMASK(7, 5)
158768d4435SHector Martin 
15976cde263SHector Martin #define MASK_REG(x)		(4 * ((x) >> 5))
16076cde263SHector Martin #define MASK_BIT(x)		BIT((x) & GENMASK(4, 0))
16176cde263SHector Martin 
16276cde263SHector Martin /*
16376cde263SHector Martin  * IMP-DEF sysregs that control FIQ sources
16476cde263SHector Martin  */
16576cde263SHector Martin 
16676cde263SHector Martin /* IPI request registers */
16776cde263SHector Martin #define SYS_IMP_APL_IPI_RR_LOCAL_EL1	sys_reg(3, 5, 15, 0, 0)
16876cde263SHector Martin #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1	sys_reg(3, 5, 15, 0, 1)
16976cde263SHector Martin #define IPI_RR_CPU			GENMASK(7, 0)
17076cde263SHector Martin /* Cluster only used for the GLOBAL register */
17176cde263SHector Martin #define IPI_RR_CLUSTER			GENMASK(23, 16)
17276cde263SHector Martin #define IPI_RR_TYPE			GENMASK(29, 28)
17376cde263SHector Martin #define IPI_RR_IMMEDIATE		0
17476cde263SHector Martin #define IPI_RR_RETRACT			1
17576cde263SHector Martin #define IPI_RR_DEFERRED			2
17676cde263SHector Martin #define IPI_RR_NOWAKE			3
17776cde263SHector Martin 
17876cde263SHector Martin /* IPI status register */
17976cde263SHector Martin #define SYS_IMP_APL_IPI_SR_EL1		sys_reg(3, 5, 15, 1, 1)
18076cde263SHector Martin #define IPI_SR_PENDING			BIT(0)
18176cde263SHector Martin 
18276cde263SHector Martin /* Guest timer FIQ enable register */
18376cde263SHector Martin #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2	sys_reg(3, 5, 15, 1, 3)
18476cde263SHector Martin #define VM_TMR_FIQ_ENABLE_V		BIT(0)
18576cde263SHector Martin #define VM_TMR_FIQ_ENABLE_P		BIT(1)
18676cde263SHector Martin 
18776cde263SHector Martin /* Deferred IPI countdown register */
18876cde263SHector Martin #define SYS_IMP_APL_IPI_CR_EL1		sys_reg(3, 5, 15, 3, 1)
18976cde263SHector Martin 
19076cde263SHector Martin /* Uncore PMC control register */
19176cde263SHector Martin #define SYS_IMP_APL_UPMCR0_EL1		sys_reg(3, 7, 15, 0, 4)
19276cde263SHector Martin #define UPMCR0_IMODE			GENMASK(18, 16)
19376cde263SHector Martin #define UPMCR0_IMODE_OFF		0
19476cde263SHector Martin #define UPMCR0_IMODE_AIC		2
19576cde263SHector Martin #define UPMCR0_IMODE_HALT		3
19676cde263SHector Martin #define UPMCR0_IMODE_FIQ		4
19776cde263SHector Martin 
19876cde263SHector Martin /* Uncore PMC status register */
19976cde263SHector Martin #define SYS_IMP_APL_UPMSR_EL1		sys_reg(3, 7, 15, 6, 4)
20076cde263SHector Martin #define UPMSR_IACT			BIT(0)
20176cde263SHector Martin 
2022cf68211SHector Martin /* MPIDR fields */
2032cf68211SHector Martin #define MPIDR_CPU(x)			MPIDR_AFFINITY_LEVEL(x, 0)
2042cf68211SHector Martin #define MPIDR_CLUSTER(x)		MPIDR_AFFINITY_LEVEL(x, 1)
2052cf68211SHector Martin 
206a801f0eeSHector Martin #define AIC_IRQ_HWIRQ(die, irq)	(FIELD_PREP(AIC_EVENT_DIE, die) | \
207a801f0eeSHector Martin 				 FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \
208a801f0eeSHector Martin 				 FIELD_PREP(AIC_EVENT_NUM, irq))
2097c841f5fSHector Martin #define AIC_FIQ_HWIRQ(x)	(FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \
2107c841f5fSHector Martin 				 FIELD_PREP(AIC_EVENT_NUM, x))
2117c841f5fSHector Martin #define AIC_HWIRQ_IRQ(x)	FIELD_GET(AIC_EVENT_NUM, x)
212a801f0eeSHector Martin #define AIC_HWIRQ_DIE(x)	FIELD_GET(AIC_EVENT_DIE, x)
213c7708816SMarc Zyngier #define AIC_NR_FIQ		6
21476cde263SHector Martin #define AIC_NR_SWIPI		32
21576cde263SHector Martin 
21676cde263SHector Martin /*
21776cde263SHector Martin  * FIQ hwirq index definitions: FIQ sources use the DT binding defines
21876cde263SHector Martin  * directly, except that timers are special. At the irqchip level, the
21976cde263SHector Martin  * two timer types are represented by their access method: _EL0 registers
22076cde263SHector Martin  * or _EL02 registers. In the DT binding, the timers are represented
22176cde263SHector Martin  * by their purpose (HV or guest). This mapping is for when the kernel is
22276cde263SHector Martin  * running at EL2 (with VHE). When the kernel is running at EL1, the
22376cde263SHector Martin  * mapping differs and aic_irq_domain_translate() performs the remapping.
22476cde263SHector Martin  */
22576cde263SHector Martin 
22676cde263SHector Martin #define AIC_TMR_EL0_PHYS	AIC_TMR_HV_PHYS
22776cde263SHector Martin #define AIC_TMR_EL0_VIRT	AIC_TMR_HV_VIRT
22876cde263SHector Martin #define AIC_TMR_EL02_PHYS	AIC_TMR_GUEST_PHYS
22976cde263SHector Martin #define AIC_TMR_EL02_VIRT	AIC_TMR_GUEST_VIRT
23076cde263SHector Martin 
2312cf68211SHector Martin DEFINE_STATIC_KEY_TRUE(use_fast_ipi);
2322cf68211SHector Martin 
2332cf68211SHector Martin struct aic_info {
2342cf68211SHector Martin 	int version;
2352cf68211SHector Martin 
236dc97fd6fSHector Martin 	/* Register offsets */
237dc97fd6fSHector Martin 	u32 event;
238dc97fd6fSHector Martin 	u32 target_cpu;
239768d4435SHector Martin 	u32 irq_cfg;
240dc97fd6fSHector Martin 	u32 sw_set;
241dc97fd6fSHector Martin 	u32 sw_clr;
242dc97fd6fSHector Martin 	u32 mask_set;
243dc97fd6fSHector Martin 	u32 mask_clr;
244dc97fd6fSHector Martin 
245a801f0eeSHector Martin 	u32 die_stride;
246a801f0eeSHector Martin 
2472cf68211SHector Martin 	/* Features */
2482cf68211SHector Martin 	bool fast_ipi;
2492cf68211SHector Martin };
2502cf68211SHector Martin 
2512cf68211SHector Martin static const struct aic_info aic1_info = {
2522cf68211SHector Martin 	.version	= 1,
253dc97fd6fSHector Martin 
254dc97fd6fSHector Martin 	.event		= AIC_EVENT,
255dc97fd6fSHector Martin 	.target_cpu	= AIC_TARGET_CPU,
2562cf68211SHector Martin };
2572cf68211SHector Martin 
2582cf68211SHector Martin static const struct aic_info aic1_fipi_info = {
2592cf68211SHector Martin 	.version	= 1,
2602cf68211SHector Martin 
261dc97fd6fSHector Martin 	.event		= AIC_EVENT,
262dc97fd6fSHector Martin 	.target_cpu	= AIC_TARGET_CPU,
263dc97fd6fSHector Martin 
2642cf68211SHector Martin 	.fast_ipi	= true,
2652cf68211SHector Martin };
2662cf68211SHector Martin 
267768d4435SHector Martin static const struct aic_info aic2_info = {
268768d4435SHector Martin 	.version	= 2,
269768d4435SHector Martin 
270768d4435SHector Martin 	.irq_cfg	= AIC2_IRQ_CFG,
271768d4435SHector Martin 
272768d4435SHector Martin 	.fast_ipi	= true,
273768d4435SHector Martin };
274768d4435SHector Martin 
2752cf68211SHector Martin static const struct of_device_id aic_info_match[] = {
2762cf68211SHector Martin 	{
2772cf68211SHector Martin 		.compatible = "apple,t8103-aic",
2782cf68211SHector Martin 		.data = &aic1_fipi_info,
2792cf68211SHector Martin 	},
2802cf68211SHector Martin 	{
2812cf68211SHector Martin 		.compatible = "apple,aic",
2822cf68211SHector Martin 		.data = &aic1_info,
2832cf68211SHector Martin 	},
284768d4435SHector Martin 	{
285768d4435SHector Martin 		.compatible = "apple,aic2",
286768d4435SHector Martin 		.data = &aic2_info,
287768d4435SHector Martin 	},
2882cf68211SHector Martin 	{}
2892cf68211SHector Martin };
2902cf68211SHector Martin 
29176cde263SHector Martin struct aic_irq_chip {
29276cde263SHector Martin 	void __iomem *base;
293768d4435SHector Martin 	void __iomem *event;
29476cde263SHector Martin 	struct irq_domain *hw_domain;
29576cde263SHector Martin 	struct irq_domain *ipi_domain;
296a5e88012SMarc Zyngier 	struct {
297a5e88012SMarc Zyngier 		cpumask_t aff;
298a5e88012SMarc Zyngier 	} *fiq_aff[AIC_NR_FIQ];
299dc97fd6fSHector Martin 
3007c841f5fSHector Martin 	int nr_irq;
301dc97fd6fSHector Martin 	int max_irq;
302a801f0eeSHector Martin 	int nr_die;
303a801f0eeSHector Martin 	int max_die;
3042cf68211SHector Martin 
3052cf68211SHector Martin 	struct aic_info info;
30676cde263SHector Martin };
30776cde263SHector Martin 
30876cde263SHector Martin static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked);
30976cde263SHector Martin 
31076cde263SHector Martin static DEFINE_PER_CPU(atomic_t, aic_vipi_flag);
31176cde263SHector Martin static DEFINE_PER_CPU(atomic_t, aic_vipi_enable);
31276cde263SHector Martin 
31376cde263SHector Martin static struct aic_irq_chip *aic_irqc;
31476cde263SHector Martin 
31576cde263SHector Martin static void aic_handle_ipi(struct pt_regs *regs);
31676cde263SHector Martin 
31776cde263SHector Martin static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg)
31876cde263SHector Martin {
31976cde263SHector Martin 	return readl_relaxed(ic->base + reg);
32076cde263SHector Martin }
32176cde263SHector Martin 
32276cde263SHector Martin static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val)
32376cde263SHector Martin {
32476cde263SHector Martin 	writel_relaxed(val, ic->base + reg);
32576cde263SHector Martin }
32676cde263SHector Martin 
32776cde263SHector Martin /*
32876cde263SHector Martin  * IRQ irqchip
32976cde263SHector Martin  */
33076cde263SHector Martin 
33176cde263SHector Martin static void aic_irq_mask(struct irq_data *d)
33276cde263SHector Martin {
3337c841f5fSHector Martin 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
33476cde263SHector Martin 	struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
33576cde263SHector Martin 
336a801f0eeSHector Martin 	u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
3377c841f5fSHector Martin 	u32 irq = AIC_HWIRQ_IRQ(hwirq);
3387c841f5fSHector Martin 
339a801f0eeSHector Martin 	aic_ic_write(ic, ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq));
34076cde263SHector Martin }
34176cde263SHector Martin 
34276cde263SHector Martin static void aic_irq_unmask(struct irq_data *d)
34376cde263SHector Martin {
3447c841f5fSHector Martin 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
34576cde263SHector Martin 	struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
34676cde263SHector Martin 
347a801f0eeSHector Martin 	u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride;
3487c841f5fSHector Martin 	u32 irq = AIC_HWIRQ_IRQ(hwirq);
3497c841f5fSHector Martin 
350a801f0eeSHector Martin 	aic_ic_write(ic, ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq));
35176cde263SHector Martin }
35276cde263SHector Martin 
35376cde263SHector Martin static void aic_irq_eoi(struct irq_data *d)
35476cde263SHector Martin {
35576cde263SHector Martin 	/*
35676cde263SHector Martin 	 * Reading the interrupt reason automatically acknowledges and masks
35776cde263SHector Martin 	 * the IRQ, so we just unmask it here if needed.
35876cde263SHector Martin 	 */
35960a1cd10SSven Peter 	if (!irqd_irq_masked(d))
36076cde263SHector Martin 		aic_irq_unmask(d);
36176cde263SHector Martin }
36276cde263SHector Martin 
36376cde263SHector Martin static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs)
36476cde263SHector Martin {
36576cde263SHector Martin 	struct aic_irq_chip *ic = aic_irqc;
36676cde263SHector Martin 	u32 event, type, irq;
36776cde263SHector Martin 
36876cde263SHector Martin 	do {
36976cde263SHector Martin 		/*
37076cde263SHector Martin 		 * We cannot use a relaxed read here, as reads from DMA buffers
37176cde263SHector Martin 		 * need to be ordered after the IRQ fires.
37276cde263SHector Martin 		 */
373768d4435SHector Martin 		event = readl(ic->event + ic->info.event);
37476cde263SHector Martin 		type = FIELD_GET(AIC_EVENT_TYPE, event);
37576cde263SHector Martin 		irq = FIELD_GET(AIC_EVENT_NUM, event);
37676cde263SHector Martin 
3777c841f5fSHector Martin 		if (type == AIC_EVENT_TYPE_IRQ)
3787c841f5fSHector Martin 			generic_handle_domain_irq(aic_irqc->hw_domain, event);
37976cde263SHector Martin 		else if (type == AIC_EVENT_TYPE_IPI && irq == 1)
38076cde263SHector Martin 			aic_handle_ipi(regs);
38176cde263SHector Martin 		else if (event != 0)
38276cde263SHector Martin 			pr_err_ratelimited("Unknown IRQ event %d, %d\n", type, irq);
38376cde263SHector Martin 	} while (event);
38476cde263SHector Martin 
38576cde263SHector Martin 	/*
38676cde263SHector Martin 	 * vGIC maintenance interrupts end up here too, so we need to check
38776cde263SHector Martin 	 * for them separately. This should never trigger if KVM is working
38876cde263SHector Martin 	 * properly, because it will have already taken care of clearing it
38976cde263SHector Martin 	 * on guest exit before this handler runs.
39076cde263SHector Martin 	 */
39176cde263SHector Martin 	if (is_kernel_in_hyp_mode() && (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) &&
39276cde263SHector Martin 		read_sysreg_s(SYS_ICH_MISR_EL2) != 0) {
39376cde263SHector Martin 		pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n");
39476cde263SHector Martin 		sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
39576cde263SHector Martin 	}
39676cde263SHector Martin }
39776cde263SHector Martin 
39876cde263SHector Martin static int aic_irq_set_affinity(struct irq_data *d,
39976cde263SHector Martin 				const struct cpumask *mask_val, bool force)
40076cde263SHector Martin {
40176cde263SHector Martin 	irq_hw_number_t hwirq = irqd_to_hwirq(d);
40276cde263SHector Martin 	struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
40376cde263SHector Martin 	int cpu;
40476cde263SHector Martin 
405dc97fd6fSHector Martin 	BUG_ON(!ic->info.target_cpu);
406dc97fd6fSHector Martin 
40776cde263SHector Martin 	if (force)
40876cde263SHector Martin 		cpu = cpumask_first(mask_val);
40976cde263SHector Martin 	else
41076cde263SHector Martin 		cpu = cpumask_any_and(mask_val, cpu_online_mask);
41176cde263SHector Martin 
412dc97fd6fSHector Martin 	aic_ic_write(ic, ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu));
41376cde263SHector Martin 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
41476cde263SHector Martin 
41576cde263SHector Martin 	return IRQ_SET_MASK_OK;
41676cde263SHector Martin }
41776cde263SHector Martin 
41876cde263SHector Martin static int aic_irq_set_type(struct irq_data *d, unsigned int type)
41976cde263SHector Martin {
42076cde263SHector Martin 	/*
42176cde263SHector Martin 	 * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't
42276cde263SHector Martin 	 * have a way to find out the type of any given IRQ, so just allow both.
42376cde263SHector Martin 	 */
42476cde263SHector Martin 	return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL;
42576cde263SHector Martin }
42676cde263SHector Martin 
42776cde263SHector Martin static struct irq_chip aic_chip = {
42876cde263SHector Martin 	.name = "AIC",
42976cde263SHector Martin 	.irq_mask = aic_irq_mask,
43076cde263SHector Martin 	.irq_unmask = aic_irq_unmask,
43176cde263SHector Martin 	.irq_eoi = aic_irq_eoi,
43276cde263SHector Martin 	.irq_set_affinity = aic_irq_set_affinity,
43376cde263SHector Martin 	.irq_set_type = aic_irq_set_type,
43476cde263SHector Martin };
43576cde263SHector Martin 
436768d4435SHector Martin static struct irq_chip aic2_chip = {
437768d4435SHector Martin 	.name = "AIC2",
438768d4435SHector Martin 	.irq_mask = aic_irq_mask,
439768d4435SHector Martin 	.irq_unmask = aic_irq_unmask,
440768d4435SHector Martin 	.irq_eoi = aic_irq_eoi,
441768d4435SHector Martin 	.irq_set_type = aic_irq_set_type,
442768d4435SHector Martin };
443768d4435SHector Martin 
44476cde263SHector Martin /*
44576cde263SHector Martin  * FIQ irqchip
44676cde263SHector Martin  */
44776cde263SHector Martin 
44876cde263SHector Martin static unsigned long aic_fiq_get_idx(struct irq_data *d)
44976cde263SHector Martin {
4507c841f5fSHector Martin 	return AIC_HWIRQ_IRQ(irqd_to_hwirq(d));
45176cde263SHector Martin }
45276cde263SHector Martin 
45376cde263SHector Martin static void aic_fiq_set_mask(struct irq_data *d)
45476cde263SHector Martin {
45576cde263SHector Martin 	/* Only the guest timers have real mask bits, unfortunately. */
45676cde263SHector Martin 	switch (aic_fiq_get_idx(d)) {
45776cde263SHector Martin 	case AIC_TMR_EL02_PHYS:
45876cde263SHector Martin 		sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0);
45976cde263SHector Martin 		isb();
46076cde263SHector Martin 		break;
46176cde263SHector Martin 	case AIC_TMR_EL02_VIRT:
46276cde263SHector Martin 		sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0);
46376cde263SHector Martin 		isb();
46476cde263SHector Martin 		break;
46576cde263SHector Martin 	default:
46676cde263SHector Martin 		break;
46776cde263SHector Martin 	}
46876cde263SHector Martin }
46976cde263SHector Martin 
47076cde263SHector Martin static void aic_fiq_clear_mask(struct irq_data *d)
47176cde263SHector Martin {
47276cde263SHector Martin 	switch (aic_fiq_get_idx(d)) {
47376cde263SHector Martin 	case AIC_TMR_EL02_PHYS:
47476cde263SHector Martin 		sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P);
47576cde263SHector Martin 		isb();
47676cde263SHector Martin 		break;
47776cde263SHector Martin 	case AIC_TMR_EL02_VIRT:
47876cde263SHector Martin 		sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V);
47976cde263SHector Martin 		isb();
48076cde263SHector Martin 		break;
48176cde263SHector Martin 	default:
48276cde263SHector Martin 		break;
48376cde263SHector Martin 	}
48476cde263SHector Martin }
48576cde263SHector Martin 
48676cde263SHector Martin static void aic_fiq_mask(struct irq_data *d)
48776cde263SHector Martin {
48876cde263SHector Martin 	aic_fiq_set_mask(d);
48976cde263SHector Martin 	__this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d)));
49076cde263SHector Martin }
49176cde263SHector Martin 
49276cde263SHector Martin static void aic_fiq_unmask(struct irq_data *d)
49376cde263SHector Martin {
49476cde263SHector Martin 	aic_fiq_clear_mask(d);
49576cde263SHector Martin 	__this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d)));
49676cde263SHector Martin }
49776cde263SHector Martin 
49876cde263SHector Martin static void aic_fiq_eoi(struct irq_data *d)
49976cde263SHector Martin {
50076cde263SHector Martin 	/* We mask to ack (where we can), so we need to unmask at EOI. */
50176cde263SHector Martin 	if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d)))
50276cde263SHector Martin 		aic_fiq_clear_mask(d);
50376cde263SHector Martin }
50476cde263SHector Martin 
50576cde263SHector Martin #define TIMER_FIRING(x)                                                        \
50676cde263SHector Martin 	(((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK |            \
50776cde263SHector Martin 		 ARCH_TIMER_CTRL_IT_STAT)) ==                                  \
50876cde263SHector Martin 	 (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT))
50976cde263SHector Martin 
51076cde263SHector Martin static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs)
51176cde263SHector Martin {
51276cde263SHector Martin 	/*
51376cde263SHector Martin 	 * It would be really nice if we had a system register that lets us get
51476cde263SHector Martin 	 * the FIQ source state without having to peek down into sources...
51576cde263SHector Martin 	 * but such a register does not seem to exist.
51676cde263SHector Martin 	 *
51776cde263SHector Martin 	 * So, we have these potential sources to test for:
51876cde263SHector Martin 	 *  - Fast IPIs (not yet used)
51976cde263SHector Martin 	 *  - The 4 timers (CNTP, CNTV for each of HV and guest)
52076cde263SHector Martin 	 *  - Per-core PMCs (not yet supported)
52176cde263SHector Martin 	 *  - Per-cluster uncore PMCs (not yet supported)
52276cde263SHector Martin 	 *
52376cde263SHector Martin 	 * Since not dealing with any of these results in a FIQ storm,
52476cde263SHector Martin 	 * we check for everything here, even things we don't support yet.
52576cde263SHector Martin 	 */
52676cde263SHector Martin 
52776cde263SHector Martin 	if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) {
5282cf68211SHector Martin 		if (static_branch_likely(&use_fast_ipi)) {
5292cf68211SHector Martin 			aic_handle_ipi(regs);
5302cf68211SHector Martin 		} else {
53176cde263SHector Martin 			pr_err_ratelimited("Fast IPI fired. Acking.\n");
53276cde263SHector Martin 			write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
53376cde263SHector Martin 		}
5342cf68211SHector Martin 	}
53576cde263SHector Martin 
53676cde263SHector Martin 	if (TIMER_FIRING(read_sysreg(cntp_ctl_el0)))
5370953fb26SMark Rutland 		generic_handle_domain_irq(aic_irqc->hw_domain,
5387c841f5fSHector Martin 					  AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS));
53976cde263SHector Martin 
54076cde263SHector Martin 	if (TIMER_FIRING(read_sysreg(cntv_ctl_el0)))
5410953fb26SMark Rutland 		generic_handle_domain_irq(aic_irqc->hw_domain,
5427c841f5fSHector Martin 					  AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT));
54376cde263SHector Martin 
54476cde263SHector Martin 	if (is_kernel_in_hyp_mode()) {
54576cde263SHector Martin 		uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2);
54676cde263SHector Martin 
54776cde263SHector Martin 		if ((enabled & VM_TMR_FIQ_ENABLE_P) &&
54876cde263SHector Martin 		    TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02)))
5490953fb26SMark Rutland 			generic_handle_domain_irq(aic_irqc->hw_domain,
5507c841f5fSHector Martin 						  AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS));
55176cde263SHector Martin 
55276cde263SHector Martin 		if ((enabled & VM_TMR_FIQ_ENABLE_V) &&
55376cde263SHector Martin 		    TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02)))
5540953fb26SMark Rutland 			generic_handle_domain_irq(aic_irqc->hw_domain,
5557c841f5fSHector Martin 						  AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT));
55676cde263SHector Martin 	}
55776cde263SHector Martin 
558c7708816SMarc Zyngier 	if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) {
559c7708816SMarc Zyngier 		int irq;
560c7708816SMarc Zyngier 		if (cpumask_test_cpu(smp_processor_id(),
561c7708816SMarc Zyngier 				     &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff))
562c7708816SMarc Zyngier 			irq = AIC_CPU_PMU_P;
563c7708816SMarc Zyngier 		else
564c7708816SMarc Zyngier 			irq = AIC_CPU_PMU_E;
565c7708816SMarc Zyngier 		generic_handle_domain_irq(aic_irqc->hw_domain,
56689ea5be1SMarc Zyngier 					  AIC_FIQ_HWIRQ(irq));
56776cde263SHector Martin 	}
56876cde263SHector Martin 
56976cde263SHector Martin 	if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ &&
57076cde263SHector Martin 			(read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) {
57176cde263SHector Martin 		/* Same story with uncore PMCs */
57276cde263SHector Martin 		pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n");
57376cde263SHector Martin 		sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
57476cde263SHector Martin 				   FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
57576cde263SHector Martin 	}
57676cde263SHector Martin }
57776cde263SHector Martin 
57876cde263SHector Martin static int aic_fiq_set_type(struct irq_data *d, unsigned int type)
57976cde263SHector Martin {
58076cde263SHector Martin 	return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL;
58176cde263SHector Martin }
58276cde263SHector Martin 
58376cde263SHector Martin static struct irq_chip fiq_chip = {
58476cde263SHector Martin 	.name = "AIC-FIQ",
58576cde263SHector Martin 	.irq_mask = aic_fiq_mask,
58676cde263SHector Martin 	.irq_unmask = aic_fiq_unmask,
58776cde263SHector Martin 	.irq_ack = aic_fiq_set_mask,
58876cde263SHector Martin 	.irq_eoi = aic_fiq_eoi,
58976cde263SHector Martin 	.irq_set_type = aic_fiq_set_type,
59076cde263SHector Martin };
59176cde263SHector Martin 
59276cde263SHector Martin /*
59376cde263SHector Martin  * Main IRQ domain
59476cde263SHector Martin  */
59576cde263SHector Martin 
59676cde263SHector Martin static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq,
59776cde263SHector Martin 			      irq_hw_number_t hw)
59876cde263SHector Martin {
59976cde263SHector Martin 	struct aic_irq_chip *ic = id->host_data;
6007c841f5fSHector Martin 	u32 type = FIELD_GET(AIC_EVENT_TYPE, hw);
601768d4435SHector Martin 	struct irq_chip *chip = &aic_chip;
60276cde263SHector Martin 
603768d4435SHector Martin 	if (ic->info.version == 2)
604768d4435SHector Martin 		chip = &aic2_chip;
60576cde263SHector Martin 
6067c841f5fSHector Martin 	if (type == AIC_EVENT_TYPE_IRQ) {
607768d4435SHector Martin 		irq_domain_set_info(id, irq, hw, chip, id->host_data,
60876cde263SHector Martin 				    handle_fasteoi_irq, NULL, NULL);
60976cde263SHector Martin 		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
61076cde263SHector Martin 	} else {
61189ea5be1SMarc Zyngier 		int fiq = FIELD_GET(AIC_EVENT_NUM, hw);
612c7708816SMarc Zyngier 
613c7708816SMarc Zyngier 		switch (fiq) {
614c7708816SMarc Zyngier 		case AIC_CPU_PMU_P:
615c7708816SMarc Zyngier 		case AIC_CPU_PMU_E:
616c7708816SMarc Zyngier 			irq_set_percpu_devid_partition(irq, &ic->fiq_aff[fiq]->aff);
617c7708816SMarc Zyngier 			break;
618c7708816SMarc Zyngier 		default:
61976cde263SHector Martin 			irq_set_percpu_devid(irq);
620c7708816SMarc Zyngier 			break;
621c7708816SMarc Zyngier 		}
622c7708816SMarc Zyngier 
62376cde263SHector Martin 		irq_domain_set_info(id, irq, hw, &fiq_chip, id->host_data,
62476cde263SHector Martin 				    handle_percpu_devid_irq, NULL, NULL);
62576cde263SHector Martin 	}
62676cde263SHector Martin 
62776cde263SHector Martin 	return 0;
62876cde263SHector Martin }
62976cde263SHector Martin 
63076cde263SHector Martin static int aic_irq_domain_translate(struct irq_domain *id,
63176cde263SHector Martin 				    struct irq_fwspec *fwspec,
63276cde263SHector Martin 				    unsigned long *hwirq,
63376cde263SHector Martin 				    unsigned int *type)
63476cde263SHector Martin {
63576cde263SHector Martin 	struct aic_irq_chip *ic = id->host_data;
636a801f0eeSHector Martin 	u32 *args;
637a801f0eeSHector Martin 	u32 die = 0;
63876cde263SHector Martin 
639a801f0eeSHector Martin 	if (fwspec->param_count < 3 || fwspec->param_count > 4 ||
640a801f0eeSHector Martin 	    !is_of_node(fwspec->fwnode))
64176cde263SHector Martin 		return -EINVAL;
64276cde263SHector Martin 
643a801f0eeSHector Martin 	args = &fwspec->param[1];
644a801f0eeSHector Martin 
645a801f0eeSHector Martin 	if (fwspec->param_count == 4) {
646a801f0eeSHector Martin 		die = args[0];
647a801f0eeSHector Martin 		args++;
648a801f0eeSHector Martin 	}
649a801f0eeSHector Martin 
65076cde263SHector Martin 	switch (fwspec->param[0]) {
65176cde263SHector Martin 	case AIC_IRQ:
652a801f0eeSHector Martin 		if (die >= ic->nr_die)
65376cde263SHector Martin 			return -EINVAL;
654a801f0eeSHector Martin 		if (args[0] >= ic->nr_irq)
655a801f0eeSHector Martin 			return -EINVAL;
656a801f0eeSHector Martin 		*hwirq = AIC_IRQ_HWIRQ(die, args[0]);
65776cde263SHector Martin 		break;
65876cde263SHector Martin 	case AIC_FIQ:
659a801f0eeSHector Martin 		if (die != 0)
66076cde263SHector Martin 			return -EINVAL;
661a801f0eeSHector Martin 		if (args[0] >= AIC_NR_FIQ)
662a801f0eeSHector Martin 			return -EINVAL;
663a801f0eeSHector Martin 		*hwirq = AIC_FIQ_HWIRQ(args[0]);
66476cde263SHector Martin 
66576cde263SHector Martin 		/*
66676cde263SHector Martin 		 * In EL1 the non-redirected registers are the guest's,
66776cde263SHector Martin 		 * not EL2's, so remap the hwirqs to match.
66876cde263SHector Martin 		 */
66976cde263SHector Martin 		if (!is_kernel_in_hyp_mode()) {
670a801f0eeSHector Martin 			switch (args[0]) {
67176cde263SHector Martin 			case AIC_TMR_GUEST_PHYS:
6727c841f5fSHector Martin 				*hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS);
67376cde263SHector Martin 				break;
67476cde263SHector Martin 			case AIC_TMR_GUEST_VIRT:
6757c841f5fSHector Martin 				*hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT);
67676cde263SHector Martin 				break;
67776cde263SHector Martin 			case AIC_TMR_HV_PHYS:
67876cde263SHector Martin 			case AIC_TMR_HV_VIRT:
67976cde263SHector Martin 				return -ENOENT;
68076cde263SHector Martin 			default:
68176cde263SHector Martin 				break;
68276cde263SHector Martin 			}
68376cde263SHector Martin 		}
68476cde263SHector Martin 		break;
68576cde263SHector Martin 	default:
68676cde263SHector Martin 		return -EINVAL;
68776cde263SHector Martin 	}
68876cde263SHector Martin 
689a801f0eeSHector Martin 	*type = args[1] & IRQ_TYPE_SENSE_MASK;
69076cde263SHector Martin 
69176cde263SHector Martin 	return 0;
69276cde263SHector Martin }
69376cde263SHector Martin 
69476cde263SHector Martin static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
69576cde263SHector Martin 				unsigned int nr_irqs, void *arg)
69676cde263SHector Martin {
69776cde263SHector Martin 	unsigned int type = IRQ_TYPE_NONE;
69876cde263SHector Martin 	struct irq_fwspec *fwspec = arg;
69976cde263SHector Martin 	irq_hw_number_t hwirq;
70076cde263SHector Martin 	int i, ret;
70176cde263SHector Martin 
70276cde263SHector Martin 	ret = aic_irq_domain_translate(domain, fwspec, &hwirq, &type);
70376cde263SHector Martin 	if (ret)
70476cde263SHector Martin 		return ret;
70576cde263SHector Martin 
70676cde263SHector Martin 	for (i = 0; i < nr_irqs; i++) {
70776cde263SHector Martin 		ret = aic_irq_domain_map(domain, virq + i, hwirq + i);
70876cde263SHector Martin 		if (ret)
70976cde263SHector Martin 			return ret;
71076cde263SHector Martin 	}
71176cde263SHector Martin 
71276cde263SHector Martin 	return 0;
71376cde263SHector Martin }
71476cde263SHector Martin 
71576cde263SHector Martin static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq,
71676cde263SHector Martin 				unsigned int nr_irqs)
71776cde263SHector Martin {
71876cde263SHector Martin 	int i;
71976cde263SHector Martin 
72076cde263SHector Martin 	for (i = 0; i < nr_irqs; i++) {
72176cde263SHector Martin 		struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
72276cde263SHector Martin 
72376cde263SHector Martin 		irq_set_handler(virq + i, NULL);
72476cde263SHector Martin 		irq_domain_reset_irq_data(d);
72576cde263SHector Martin 	}
72676cde263SHector Martin }
72776cde263SHector Martin 
72876cde263SHector Martin static const struct irq_domain_ops aic_irq_domain_ops = {
72976cde263SHector Martin 	.translate	= aic_irq_domain_translate,
73076cde263SHector Martin 	.alloc		= aic_irq_domain_alloc,
73176cde263SHector Martin 	.free		= aic_irq_domain_free,
73276cde263SHector Martin };
73376cde263SHector Martin 
73476cde263SHector Martin /*
73576cde263SHector Martin  * IPI irqchip
73676cde263SHector Martin  */
73776cde263SHector Martin 
7382cf68211SHector Martin static void aic_ipi_send_fast(int cpu)
7392cf68211SHector Martin {
7402cf68211SHector Martin 	u64 mpidr = cpu_logical_map(cpu);
7412cf68211SHector Martin 	u64 my_mpidr = read_cpuid_mpidr();
7422cf68211SHector Martin 	u64 cluster = MPIDR_CLUSTER(mpidr);
7432cf68211SHector Martin 	u64 idx = MPIDR_CPU(mpidr);
7442cf68211SHector Martin 
7452cf68211SHector Martin 	if (MPIDR_CLUSTER(my_mpidr) == cluster)
7462cf68211SHector Martin 		write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx),
7472cf68211SHector Martin 			       SYS_IMP_APL_IPI_RR_LOCAL_EL1);
7482cf68211SHector Martin 	else
7492cf68211SHector Martin 		write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster),
7502cf68211SHector Martin 			       SYS_IMP_APL_IPI_RR_GLOBAL_EL1);
7512cf68211SHector Martin 	isb();
7522cf68211SHector Martin }
7532cf68211SHector Martin 
75476cde263SHector Martin static void aic_ipi_mask(struct irq_data *d)
75576cde263SHector Martin {
75676cde263SHector Martin 	u32 irq_bit = BIT(irqd_to_hwirq(d));
75776cde263SHector Martin 
75876cde263SHector Martin 	/* No specific ordering requirements needed here. */
75976cde263SHector Martin 	atomic_andnot(irq_bit, this_cpu_ptr(&aic_vipi_enable));
76076cde263SHector Martin }
76176cde263SHector Martin 
76276cde263SHector Martin static void aic_ipi_unmask(struct irq_data *d)
76376cde263SHector Martin {
76476cde263SHector Martin 	struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
76576cde263SHector Martin 	u32 irq_bit = BIT(irqd_to_hwirq(d));
76676cde263SHector Martin 
76776cde263SHector Martin 	atomic_or(irq_bit, this_cpu_ptr(&aic_vipi_enable));
76876cde263SHector Martin 
76976cde263SHector Martin 	/*
77076cde263SHector Martin 	 * The atomic_or() above must complete before the atomic_read()
77176cde263SHector Martin 	 * below to avoid racing aic_ipi_send_mask().
77276cde263SHector Martin 	 */
77376cde263SHector Martin 	smp_mb__after_atomic();
77476cde263SHector Martin 
77576cde263SHector Martin 	/*
77676cde263SHector Martin 	 * If a pending vIPI was unmasked, raise a HW IPI to ourselves.
77776cde263SHector Martin 	 * No barriers needed here since this is a self-IPI.
77876cde263SHector Martin 	 */
7792cf68211SHector Martin 	if (atomic_read(this_cpu_ptr(&aic_vipi_flag)) & irq_bit) {
7802cf68211SHector Martin 		if (static_branch_likely(&use_fast_ipi))
7812cf68211SHector Martin 			aic_ipi_send_fast(smp_processor_id());
7822cf68211SHector Martin 		else
78376cde263SHector Martin 			aic_ic_write(ic, AIC_IPI_SEND, AIC_IPI_SEND_CPU(smp_processor_id()));
78476cde263SHector Martin 	}
7852cf68211SHector Martin }
78676cde263SHector Martin 
78776cde263SHector Martin static void aic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask)
78876cde263SHector Martin {
78976cde263SHector Martin 	struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d);
79076cde263SHector Martin 	u32 irq_bit = BIT(irqd_to_hwirq(d));
79176cde263SHector Martin 	u32 send = 0;
79276cde263SHector Martin 	int cpu;
79376cde263SHector Martin 	unsigned long pending;
79476cde263SHector Martin 
79576cde263SHector Martin 	for_each_cpu(cpu, mask) {
79676cde263SHector Martin 		/*
79776cde263SHector Martin 		 * This sequence is the mirror of the one in aic_ipi_unmask();
79876cde263SHector Martin 		 * see the comment there. Additionally, release semantics
79976cde263SHector Martin 		 * ensure that the vIPI flag set is ordered after any shared
80076cde263SHector Martin 		 * memory accesses that precede it. This therefore also pairs
80176cde263SHector Martin 		 * with the atomic_fetch_andnot in aic_handle_ipi().
80276cde263SHector Martin 		 */
80376cde263SHector Martin 		pending = atomic_fetch_or_release(irq_bit, per_cpu_ptr(&aic_vipi_flag, cpu));
80476cde263SHector Martin 
80576cde263SHector Martin 		/*
80676cde263SHector Martin 		 * The atomic_fetch_or_release() above must complete before the
80776cde263SHector Martin 		 * atomic_read() below to avoid racing aic_ipi_unmask().
80876cde263SHector Martin 		 */
80976cde263SHector Martin 		smp_mb__after_atomic();
81076cde263SHector Martin 
81176cde263SHector Martin 		if (!(pending & irq_bit) &&
8122cf68211SHector Martin 		    (atomic_read(per_cpu_ptr(&aic_vipi_enable, cpu)) & irq_bit)) {
8132cf68211SHector Martin 			if (static_branch_likely(&use_fast_ipi))
8142cf68211SHector Martin 				aic_ipi_send_fast(cpu);
8152cf68211SHector Martin 			else
81676cde263SHector Martin 				send |= AIC_IPI_SEND_CPU(cpu);
81776cde263SHector Martin 		}
8182cf68211SHector Martin 	}
81976cde263SHector Martin 
82076cde263SHector Martin 	/*
82176cde263SHector Martin 	 * The flag writes must complete before the physical IPI is issued
82276cde263SHector Martin 	 * to another CPU. This is implied by the control dependency on
82376cde263SHector Martin 	 * the result of atomic_read_acquire() above, which is itself
82476cde263SHector Martin 	 * already ordered after the vIPI flag write.
82576cde263SHector Martin 	 */
82676cde263SHector Martin 	if (send)
82776cde263SHector Martin 		aic_ic_write(ic, AIC_IPI_SEND, send);
82876cde263SHector Martin }
82976cde263SHector Martin 
83076cde263SHector Martin static struct irq_chip ipi_chip = {
83176cde263SHector Martin 	.name = "AIC-IPI",
83276cde263SHector Martin 	.irq_mask = aic_ipi_mask,
83376cde263SHector Martin 	.irq_unmask = aic_ipi_unmask,
83476cde263SHector Martin 	.ipi_send_mask = aic_ipi_send_mask,
83576cde263SHector Martin };
83676cde263SHector Martin 
83776cde263SHector Martin /*
83876cde263SHector Martin  * IPI IRQ domain
83976cde263SHector Martin  */
84076cde263SHector Martin 
84176cde263SHector Martin static void aic_handle_ipi(struct pt_regs *regs)
84276cde263SHector Martin {
84376cde263SHector Martin 	int i;
84476cde263SHector Martin 	unsigned long enabled, firing;
84576cde263SHector Martin 
84676cde263SHector Martin 	/*
84776cde263SHector Martin 	 * Ack the IPI. We need to order this after the AIC event read, but
84876cde263SHector Martin 	 * that is enforced by normal MMIO ordering guarantees.
8492cf68211SHector Martin 	 *
8502cf68211SHector Martin 	 * For the Fast IPI case, this needs to be ordered before the vIPI
8512cf68211SHector Martin 	 * handling below, so we need to isb();
85276cde263SHector Martin 	 */
8532cf68211SHector Martin 	if (static_branch_likely(&use_fast_ipi)) {
8542cf68211SHector Martin 		write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
8552cf68211SHector Martin 		isb();
8562cf68211SHector Martin 	} else {
85776cde263SHector Martin 		aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER);
8582cf68211SHector Martin 	}
85976cde263SHector Martin 
86076cde263SHector Martin 	/*
86176cde263SHector Martin 	 * The mask read does not need to be ordered. Only we can change
86276cde263SHector Martin 	 * our own mask anyway, so no races are possible here, as long as
86376cde263SHector Martin 	 * we are properly in the interrupt handler (which is covered by
86476cde263SHector Martin 	 * the barrier that is part of the top-level AIC handler's readl()).
86576cde263SHector Martin 	 */
86676cde263SHector Martin 	enabled = atomic_read(this_cpu_ptr(&aic_vipi_enable));
86776cde263SHector Martin 
86876cde263SHector Martin 	/*
86976cde263SHector Martin 	 * Clear the IPIs we are about to handle. This pairs with the
87076cde263SHector Martin 	 * atomic_fetch_or_release() in aic_ipi_send_mask(), and needs to be
87176cde263SHector Martin 	 * ordered after the aic_ic_write() above (to avoid dropping vIPIs) and
87276cde263SHector Martin 	 * before IPI handling code (to avoid races handling vIPIs before they
87376cde263SHector Martin 	 * are signaled). The former is taken care of by the release semantics
87476cde263SHector Martin 	 * of the write portion, while the latter is taken care of by the
87576cde263SHector Martin 	 * acquire semantics of the read portion.
87676cde263SHector Martin 	 */
87776cde263SHector Martin 	firing = atomic_fetch_andnot(enabled, this_cpu_ptr(&aic_vipi_flag)) & enabled;
87876cde263SHector Martin 
87976cde263SHector Martin 	for_each_set_bit(i, &firing, AIC_NR_SWIPI)
8800953fb26SMark Rutland 		generic_handle_domain_irq(aic_irqc->ipi_domain, i);
88176cde263SHector Martin 
88276cde263SHector Martin 	/*
88376cde263SHector Martin 	 * No ordering needed here; at worst this just changes the timing of
88476cde263SHector Martin 	 * when the next IPI will be delivered.
88576cde263SHector Martin 	 */
8862cf68211SHector Martin 	if (!static_branch_likely(&use_fast_ipi))
88776cde263SHector Martin 		aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
88876cde263SHector Martin }
88976cde263SHector Martin 
89076cde263SHector Martin static int aic_ipi_alloc(struct irq_domain *d, unsigned int virq,
89176cde263SHector Martin 			 unsigned int nr_irqs, void *args)
89276cde263SHector Martin {
89376cde263SHector Martin 	int i;
89476cde263SHector Martin 
89576cde263SHector Martin 	for (i = 0; i < nr_irqs; i++) {
89676cde263SHector Martin 		irq_set_percpu_devid(virq + i);
89776cde263SHector Martin 		irq_domain_set_info(d, virq + i, i, &ipi_chip, d->host_data,
89876cde263SHector Martin 				    handle_percpu_devid_irq, NULL, NULL);
89976cde263SHector Martin 	}
90076cde263SHector Martin 
90176cde263SHector Martin 	return 0;
90276cde263SHector Martin }
90376cde263SHector Martin 
90476cde263SHector Martin static void aic_ipi_free(struct irq_domain *d, unsigned int virq, unsigned int nr_irqs)
90576cde263SHector Martin {
90676cde263SHector Martin 	/* Not freeing IPIs */
90776cde263SHector Martin }
90876cde263SHector Martin 
90976cde263SHector Martin static const struct irq_domain_ops aic_ipi_domain_ops = {
91076cde263SHector Martin 	.alloc = aic_ipi_alloc,
91176cde263SHector Martin 	.free = aic_ipi_free,
91276cde263SHector Martin };
91376cde263SHector Martin 
9143d9e575fSDonghyeok Kim static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node)
91576cde263SHector Martin {
91676cde263SHector Martin 	struct irq_domain *ipi_domain;
91776cde263SHector Martin 	int base_ipi;
91876cde263SHector Martin 
91976cde263SHector Martin 	ipi_domain = irq_domain_create_linear(irqc->hw_domain->fwnode, AIC_NR_SWIPI,
92076cde263SHector Martin 					      &aic_ipi_domain_ops, irqc);
92176cde263SHector Martin 	if (WARN_ON(!ipi_domain))
92276cde263SHector Martin 		return -ENODEV;
92376cde263SHector Martin 
92476cde263SHector Martin 	ipi_domain->flags |= IRQ_DOMAIN_FLAG_IPI_SINGLE;
92576cde263SHector Martin 	irq_domain_update_bus_token(ipi_domain, DOMAIN_BUS_IPI);
92676cde263SHector Martin 
92776cde263SHector Martin 	base_ipi = __irq_domain_alloc_irqs(ipi_domain, -1, AIC_NR_SWIPI,
92876cde263SHector Martin 					   NUMA_NO_NODE, NULL, false, NULL);
92976cde263SHector Martin 
93076cde263SHector Martin 	if (WARN_ON(!base_ipi)) {
93176cde263SHector Martin 		irq_domain_remove(ipi_domain);
93276cde263SHector Martin 		return -ENODEV;
93376cde263SHector Martin 	}
93476cde263SHector Martin 
93576cde263SHector Martin 	set_smp_ipi_range(base_ipi, AIC_NR_SWIPI);
93676cde263SHector Martin 
93776cde263SHector Martin 	irqc->ipi_domain = ipi_domain;
93876cde263SHector Martin 
93976cde263SHector Martin 	return 0;
94076cde263SHector Martin }
94176cde263SHector Martin 
94276cde263SHector Martin static int aic_init_cpu(unsigned int cpu)
94376cde263SHector Martin {
94476cde263SHector Martin 	/* Mask all hard-wired per-CPU IRQ/FIQ sources */
94576cde263SHector Martin 
94676cde263SHector Martin 	/* Pending Fast IPI FIQs */
94776cde263SHector Martin 	write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1);
94876cde263SHector Martin 
94976cde263SHector Martin 	/* Timer FIQs */
95076cde263SHector Martin 	sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
95176cde263SHector Martin 	sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK);
95276cde263SHector Martin 
95376cde263SHector Martin 	/* EL2-only (VHE mode) IRQ sources */
95476cde263SHector Martin 	if (is_kernel_in_hyp_mode()) {
95576cde263SHector Martin 		/* Guest timers */
95676cde263SHector Martin 		sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2,
95776cde263SHector Martin 				   VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0);
95876cde263SHector Martin 
95976cde263SHector Martin 		/* vGIC maintenance IRQ */
96076cde263SHector Martin 		sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0);
96176cde263SHector Martin 	}
96276cde263SHector Martin 
96376cde263SHector Martin 	/* PMC FIQ */
96476cde263SHector Martin 	sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT,
96576cde263SHector Martin 			   FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF));
96676cde263SHector Martin 
96776cde263SHector Martin 	/* Uncore PMC FIQ */
96876cde263SHector Martin 	sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE,
96976cde263SHector Martin 			   FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF));
97076cde263SHector Martin 
97176cde263SHector Martin 	/* Commit all of the above */
97276cde263SHector Martin 	isb();
97376cde263SHector Martin 
974768d4435SHector Martin 	if (aic_irqc->info.version == 1) {
97576cde263SHector Martin 		/*
97676cde263SHector Martin 		 * Make sure the kernel's idea of logical CPU order is the same as AIC's
97776cde263SHector Martin 		 * If we ever end up with a mismatch here, we will have to introduce
97876cde263SHector Martin 		 * a mapping table similar to what other irqchip drivers do.
97976cde263SHector Martin 		 */
98076cde263SHector Martin 		WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id());
98176cde263SHector Martin 
98276cde263SHector Martin 		/*
98376cde263SHector Martin 		 * Always keep IPIs unmasked at the hardware level (except auto-masking
98476cde263SHector Martin 		 * by AIC during processing). We manage masks at the vIPI level.
9852cf68211SHector Martin 		 * These registers only exist on AICv1, AICv2 always uses fast IPIs.
98676cde263SHector Martin 		 */
98776cde263SHector Martin 		aic_ic_write(aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER);
9882cf68211SHector Martin 		if (static_branch_likely(&use_fast_ipi)) {
9892cf68211SHector Martin 			aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER);
9902cf68211SHector Martin 		} else {
99176cde263SHector Martin 			aic_ic_write(aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF);
99276cde263SHector Martin 			aic_ic_write(aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER);
9932cf68211SHector Martin 		}
994768d4435SHector Martin 	}
99576cde263SHector Martin 
99676cde263SHector Martin 	/* Initialize the local mask state */
99776cde263SHector Martin 	__this_cpu_write(aic_fiq_unmasked, 0);
99876cde263SHector Martin 
99976cde263SHector Martin 	return 0;
100076cde263SHector Martin }
100176cde263SHector Martin 
1002b6ca556cSMarc Zyngier static struct gic_kvm_info vgic_info __initdata = {
1003b6ca556cSMarc Zyngier 	.type			= GIC_V3,
1004b6ca556cSMarc Zyngier 	.no_maint_irq_mask	= true,
1005b6ca556cSMarc Zyngier 	.no_hw_deactivation	= true,
1006b6ca556cSMarc Zyngier };
1007b6ca556cSMarc Zyngier 
1008a5e88012SMarc Zyngier static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff)
1009a5e88012SMarc Zyngier {
1010a5e88012SMarc Zyngier 	int i, n;
1011a5e88012SMarc Zyngier 	u32 fiq;
1012a5e88012SMarc Zyngier 
1013a5e88012SMarc Zyngier 	if (of_property_read_u32(aff, "apple,fiq-index", &fiq) ||
1014a5e88012SMarc Zyngier 	    WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq])
1015a5e88012SMarc Zyngier 		return;
1016a5e88012SMarc Zyngier 
1017a5e88012SMarc Zyngier 	n = of_property_count_elems_of_size(aff, "cpus", sizeof(u32));
1018a5e88012SMarc Zyngier 	if (WARN_ON(n < 0))
1019a5e88012SMarc Zyngier 		return;
1020a5e88012SMarc Zyngier 
1021dc29812dSMarc Zyngier 	ic->fiq_aff[fiq] = kzalloc(sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL);
1022a5e88012SMarc Zyngier 	if (!ic->fiq_aff[fiq])
1023a5e88012SMarc Zyngier 		return;
1024a5e88012SMarc Zyngier 
1025a5e88012SMarc Zyngier 	for (i = 0; i < n; i++) {
1026a5e88012SMarc Zyngier 		struct device_node *cpu_node;
1027a5e88012SMarc Zyngier 		u32 cpu_phandle;
1028a5e88012SMarc Zyngier 		int cpu;
1029a5e88012SMarc Zyngier 
1030a5e88012SMarc Zyngier 		if (of_property_read_u32_index(aff, "cpus", i, &cpu_phandle))
1031a5e88012SMarc Zyngier 			continue;
1032a5e88012SMarc Zyngier 
1033a5e88012SMarc Zyngier 		cpu_node = of_find_node_by_phandle(cpu_phandle);
1034a5e88012SMarc Zyngier 		if (WARN_ON(!cpu_node))
1035a5e88012SMarc Zyngier 			continue;
1036a5e88012SMarc Zyngier 
1037a5e88012SMarc Zyngier 		cpu = of_cpu_node_to_id(cpu_node);
1038*b1ac803fSMiaoqian Lin 		of_node_put(cpu_node);
1039a5e88012SMarc Zyngier 		if (WARN_ON(cpu < 0))
1040a5e88012SMarc Zyngier 			continue;
1041a5e88012SMarc Zyngier 
1042a5e88012SMarc Zyngier 		cpumask_set_cpu(cpu, &ic->fiq_aff[fiq]->aff);
1043a5e88012SMarc Zyngier 	}
1044a5e88012SMarc Zyngier }
1045a5e88012SMarc Zyngier 
104676cde263SHector Martin static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent)
104776cde263SHector Martin {
1048a801f0eeSHector Martin 	int i, die;
1049a801f0eeSHector Martin 	u32 off, start_off;
105076cde263SHector Martin 	void __iomem *regs;
105176cde263SHector Martin 	struct aic_irq_chip *irqc;
1052a5e88012SMarc Zyngier 	struct device_node *affs;
10532cf68211SHector Martin 	const struct of_device_id *match;
105476cde263SHector Martin 
105576cde263SHector Martin 	regs = of_iomap(node, 0);
105676cde263SHector Martin 	if (WARN_ON(!regs))
105776cde263SHector Martin 		return -EIO;
105876cde263SHector Martin 
105976cde263SHector Martin 	irqc = kzalloc(sizeof(*irqc), GFP_KERNEL);
1060768d4435SHector Martin 	if (!irqc) {
1061768d4435SHector Martin 		iounmap(regs);
106276cde263SHector Martin 		return -ENOMEM;
1063768d4435SHector Martin 	}
106476cde263SHector Martin 
106576cde263SHector Martin 	irqc->base = regs;
106676cde263SHector Martin 
10672cf68211SHector Martin 	match = of_match_node(aic_info_match, node);
10682cf68211SHector Martin 	if (!match)
1069768d4435SHector Martin 		goto err_unmap;
107076cde263SHector Martin 
10712cf68211SHector Martin 	irqc->info = *(struct aic_info *)match->data;
10722cf68211SHector Martin 
10732cf68211SHector Martin 	aic_irqc = irqc;
10742cf68211SHector Martin 
1075dc97fd6fSHector Martin 	switch (irqc->info.version) {
1076dc97fd6fSHector Martin 	case 1: {
1077dc97fd6fSHector Martin 		u32 info;
1078dc97fd6fSHector Martin 
107976cde263SHector Martin 		info = aic_ic_read(irqc, AIC_INFO);
10807c841f5fSHector Martin 		irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info);
1081dc97fd6fSHector Martin 		irqc->max_irq = AIC_MAX_IRQ;
1082a801f0eeSHector Martin 		irqc->nr_die = irqc->max_die = 1;
1083dc97fd6fSHector Martin 
1084a801f0eeSHector Martin 		off = start_off = irqc->info.target_cpu;
1085dc97fd6fSHector Martin 		off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */
1086dc97fd6fSHector Martin 
1087768d4435SHector Martin 		irqc->event = irqc->base;
1088768d4435SHector Martin 
1089768d4435SHector Martin 		break;
109076cde263SHector Martin 	}
1091768d4435SHector Martin 	case 2: {
1092768d4435SHector Martin 		u32 info1, info3;
1093768d4435SHector Martin 
1094768d4435SHector Martin 		info1 = aic_ic_read(irqc, AIC2_INFO1);
1095768d4435SHector Martin 		info3 = aic_ic_read(irqc, AIC2_INFO3);
1096768d4435SHector Martin 
1097768d4435SHector Martin 		irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1);
1098768d4435SHector Martin 		irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3);
1099768d4435SHector Martin 		irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1;
1100768d4435SHector Martin 		irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3);
1101768d4435SHector Martin 
1102768d4435SHector Martin 		off = start_off = irqc->info.irq_cfg;
1103768d4435SHector Martin 		off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */
1104768d4435SHector Martin 
1105768d4435SHector Martin 		irqc->event = of_iomap(node, 1);
1106768d4435SHector Martin 		if (WARN_ON(!irqc->event))
1107768d4435SHector Martin 			goto err_unmap;
1108768d4435SHector Martin 
1109dc97fd6fSHector Martin 		break;
1110dc97fd6fSHector Martin 	}
1111dc97fd6fSHector Martin 	}
1112dc97fd6fSHector Martin 
1113dc97fd6fSHector Martin 	irqc->info.sw_set = off;
1114dc97fd6fSHector Martin 	off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */
1115dc97fd6fSHector Martin 	irqc->info.sw_clr = off;
1116dc97fd6fSHector Martin 	off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */
1117dc97fd6fSHector Martin 	irqc->info.mask_set = off;
1118dc97fd6fSHector Martin 	off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */
1119dc97fd6fSHector Martin 	irqc->info.mask_clr = off;
1120dc97fd6fSHector Martin 	off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */
1121dc97fd6fSHector Martin 	off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */
112276cde263SHector Martin 
11232cf68211SHector Martin 	if (irqc->info.fast_ipi)
11242cf68211SHector Martin 		static_branch_enable(&use_fast_ipi);
11252cf68211SHector Martin 	else
11262cf68211SHector Martin 		static_branch_disable(&use_fast_ipi);
11272cf68211SHector Martin 
1128a801f0eeSHector Martin 	irqc->info.die_stride = off - start_off;
1129a801f0eeSHector Martin 
11307c841f5fSHector Martin 	irqc->hw_domain = irq_domain_create_tree(of_node_to_fwnode(node),
113176cde263SHector Martin 						 &aic_irq_domain_ops, irqc);
1132768d4435SHector Martin 	if (WARN_ON(!irqc->hw_domain))
1133768d4435SHector Martin 		goto err_unmap;
113476cde263SHector Martin 
113576cde263SHector Martin 	irq_domain_update_bus_token(irqc->hw_domain, DOMAIN_BUS_WIRED);
113676cde263SHector Martin 
1137768d4435SHector Martin 	if (aic_init_smp(irqc, node))
1138768d4435SHector Martin 		goto err_remove_domain;
113976cde263SHector Martin 
1140a5e88012SMarc Zyngier 	affs = of_get_child_by_name(node, "affinities");
1141a5e88012SMarc Zyngier 	if (affs) {
1142a5e88012SMarc Zyngier 		struct device_node *chld;
1143a5e88012SMarc Zyngier 
1144a5e88012SMarc Zyngier 		for_each_child_of_node(affs, chld)
1145a5e88012SMarc Zyngier 			build_fiq_affinity(irqc, chld);
1146a5e88012SMarc Zyngier 	}
1147a5e88012SMarc Zyngier 
114876cde263SHector Martin 	set_handle_irq(aic_handle_irq);
114976cde263SHector Martin 	set_handle_fiq(aic_handle_fiq);
115076cde263SHector Martin 
1151a801f0eeSHector Martin 	off = 0;
1152a801f0eeSHector Martin 	for (die = 0; die < irqc->nr_die; die++) {
11537c841f5fSHector Martin 		for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1154a801f0eeSHector Martin 			aic_ic_write(irqc, irqc->info.mask_set + off + i * 4, U32_MAX);
11557c841f5fSHector Martin 		for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++)
1156a801f0eeSHector Martin 			aic_ic_write(irqc, irqc->info.sw_clr + off + i * 4, U32_MAX);
1157a801f0eeSHector Martin 		if (irqc->info.target_cpu)
11587c841f5fSHector Martin 			for (i = 0; i < irqc->nr_irq; i++)
1159a801f0eeSHector Martin 				aic_ic_write(irqc, irqc->info.target_cpu + off + i * 4, 1);
1160a801f0eeSHector Martin 		off += irqc->info.die_stride;
1161a801f0eeSHector Martin 	}
116276cde263SHector Martin 
1163768d4435SHector Martin 	if (irqc->info.version == 2) {
1164768d4435SHector Martin 		u32 config = aic_ic_read(irqc, AIC2_CONFIG);
1165768d4435SHector Martin 
1166768d4435SHector Martin 		config |= AIC2_CONFIG_ENABLE;
1167768d4435SHector Martin 		aic_ic_write(irqc, AIC2_CONFIG, config);
1168768d4435SHector Martin 	}
116976cde263SHector Martin 
117076cde263SHector Martin 	if (!is_kernel_in_hyp_mode())
117176cde263SHector Martin 		pr_info("Kernel running in EL1, mapping interrupts");
117276cde263SHector Martin 
11732cf68211SHector Martin 	if (static_branch_likely(&use_fast_ipi))
11742cf68211SHector Martin 		pr_info("Using Fast IPIs");
11752cf68211SHector Martin 
117676cde263SHector Martin 	cpuhp_setup_state(CPUHP_AP_IRQ_APPLE_AIC_STARTING,
117776cde263SHector Martin 			  "irqchip/apple-aic/ipi:starting",
117876cde263SHector Martin 			  aic_init_cpu, NULL);
117976cde263SHector Martin 
1180b6ca556cSMarc Zyngier 	vgic_set_kvm_info(&vgic_info);
1181b6ca556cSMarc Zyngier 
1182a801f0eeSHector Martin 	pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs",
1183a801f0eeSHector Martin 		irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI);
118476cde263SHector Martin 
118576cde263SHector Martin 	return 0;
1186768d4435SHector Martin 
1187768d4435SHector Martin err_remove_domain:
1188768d4435SHector Martin 	irq_domain_remove(irqc->hw_domain);
1189768d4435SHector Martin err_unmap:
1190768d4435SHector Martin 	if (irqc->event && irqc->event != irqc->base)
1191768d4435SHector Martin 		iounmap(irqc->event);
1192768d4435SHector Martin 	iounmap(irqc->base);
1193768d4435SHector Martin 	kfree(irqc);
1194768d4435SHector Martin 	return -ENODEV;
119576cde263SHector Martin }
119676cde263SHector Martin 
1197768d4435SHector Martin IRQCHIP_DECLARE(apple_aic, "apple,aic", aic_of_ic_init);
1198768d4435SHector Martin IRQCHIP_DECLARE(apple_aic2, "apple,aic2", aic_of_ic_init);
1199