xref: /linux/drivers/irqchip/irq-riscv-aplic-direct.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #include <linux/acpi.h>
8 #include <linux/bitfield.h>
9 #include <linux/bitops.h>
10 #include <linux/cpu.h>
11 #include <linux/interrupt.h>
12 #include <linux/irqchip.h>
13 #include <linux/irqchip/chained_irq.h>
14 #include <linux/irqchip/riscv-aplic.h>
15 #include <linux/module.h>
16 #include <linux/of_address.h>
17 #include <linux/printk.h>
18 #include <linux/smp.h>
19 
20 #include "irq-riscv-aplic-main.h"
21 
22 #define APLIC_DISABLE_IDELIVERY		0
23 #define APLIC_ENABLE_IDELIVERY		1
24 #define APLIC_DISABLE_ITHRESHOLD	1
25 #define APLIC_ENABLE_ITHRESHOLD		0
26 
27 struct aplic_direct {
28 	struct aplic_priv	priv;
29 	struct irq_domain	*irqdomain;
30 	struct cpumask		lmask;
31 };
32 
33 struct aplic_idc {
34 	unsigned int		hart_index;
35 	void __iomem		*regs;
36 	struct aplic_direct	*direct;
37 };
38 
39 static unsigned int aplic_direct_parent_irq;
40 static DEFINE_PER_CPU(struct aplic_idc, aplic_idcs);
41 
42 static void aplic_direct_irq_eoi(struct irq_data *d)
43 {
44 	/*
45 	 * The fasteoi_handler requires irq_eoi() callback hence
46 	 * provide a dummy handler.
47 	 */
48 }
49 
50 #ifdef CONFIG_SMP
51 static int aplic_direct_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
52 				     bool force)
53 {
54 	struct aplic_priv *priv = irq_data_get_irq_chip_data(d);
55 	struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
56 	struct aplic_idc *idc;
57 	unsigned int cpu, val;
58 	void __iomem *target;
59 
60 	if (force)
61 		cpu = cpumask_first_and(&direct->lmask, mask_val);
62 	else
63 		cpu = cpumask_first_and_and(&direct->lmask, mask_val, cpu_online_mask);
64 
65 	if (cpu >= nr_cpu_ids)
66 		return -EINVAL;
67 
68 	idc = per_cpu_ptr(&aplic_idcs, cpu);
69 	target = priv->regs + APLIC_TARGET_BASE + (d->hwirq - 1) * sizeof(u32);
70 	val = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
71 	val |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
72 	writel(val, target);
73 
74 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
75 
76 	return IRQ_SET_MASK_OK_DONE;
77 }
78 #endif
79 
80 static struct irq_chip aplic_direct_chip = {
81 	.name		= "APLIC-DIRECT",
82 	.irq_mask	= aplic_irq_mask,
83 	.irq_unmask	= aplic_irq_unmask,
84 	.irq_set_type	= aplic_irq_set_type,
85 	.irq_eoi	= aplic_direct_irq_eoi,
86 #ifdef CONFIG_SMP
87 	.irq_set_affinity = aplic_direct_set_affinity,
88 #endif
89 	.flags		= IRQCHIP_SET_TYPE_MASKED |
90 			  IRQCHIP_SKIP_SET_WAKE |
91 			  IRQCHIP_MASK_ON_SUSPEND,
92 };
93 
94 static int aplic_direct_irqdomain_translate(struct irq_domain *d, struct irq_fwspec *fwspec,
95 					    unsigned long *hwirq, unsigned int *type)
96 {
97 	struct aplic_priv *priv = d->host_data;
98 
99 	return aplic_irqdomain_translate(fwspec, priv->gsi_base, hwirq, type);
100 }
101 
102 static int aplic_direct_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
103 					unsigned int nr_irqs, void *arg)
104 {
105 	struct aplic_priv *priv = domain->host_data;
106 	struct aplic_direct *direct = container_of(priv, struct aplic_direct, priv);
107 	struct irq_fwspec *fwspec = arg;
108 	irq_hw_number_t hwirq;
109 	unsigned int type;
110 	int i, ret;
111 
112 	ret = aplic_irqdomain_translate(fwspec, priv->gsi_base, &hwirq, &type);
113 	if (ret)
114 		return ret;
115 
116 	for (i = 0; i < nr_irqs; i++) {
117 		irq_domain_set_info(domain, virq + i, hwirq + i, &aplic_direct_chip,
118 				    priv, handle_fasteoi_irq, NULL, NULL);
119 		irq_set_affinity(virq + i, &direct->lmask);
120 	}
121 
122 	return 0;
123 }
124 
125 static const struct irq_domain_ops aplic_direct_irqdomain_ops = {
126 	.translate	= aplic_direct_irqdomain_translate,
127 	.alloc		= aplic_direct_irqdomain_alloc,
128 	.free		= irq_domain_free_irqs_top,
129 };
130 
131 /*
132  * To handle an APLIC direct interrupts, we just read the CLAIMI register
133  * which will return highest priority pending interrupt and clear the
134  * pending bit of the interrupt. This process is repeated until CLAIMI
135  * register return zero value.
136  */
137 static void aplic_direct_handle_irq(struct irq_desc *desc)
138 {
139 	struct aplic_idc *idc = this_cpu_ptr(&aplic_idcs);
140 	struct irq_domain *irqdomain = idc->direct->irqdomain;
141 	struct irq_chip *chip = irq_desc_get_chip(desc);
142 	irq_hw_number_t hw_irq;
143 	int irq;
144 
145 	chained_irq_enter(chip, desc);
146 
147 	while ((hw_irq = readl(idc->regs + APLIC_IDC_CLAIMI))) {
148 		hw_irq = hw_irq >> APLIC_IDC_TOPI_ID_SHIFT;
149 		irq = irq_find_mapping(irqdomain, hw_irq);
150 
151 		if (unlikely(irq <= 0)) {
152 			dev_warn_ratelimited(idc->direct->priv.dev,
153 					     "hw_irq %lu mapping not found\n", hw_irq);
154 		} else {
155 			generic_handle_irq(irq);
156 		}
157 	}
158 
159 	chained_irq_exit(chip, desc);
160 }
161 
162 static void aplic_idc_set_delivery(struct aplic_idc *idc, bool en)
163 {
164 	u32 de = (en) ? APLIC_ENABLE_IDELIVERY : APLIC_DISABLE_IDELIVERY;
165 	u32 th = (en) ? APLIC_ENABLE_ITHRESHOLD : APLIC_DISABLE_ITHRESHOLD;
166 
167 	/* Priority must be less than threshold for interrupt triggering */
168 	writel(th, idc->regs + APLIC_IDC_ITHRESHOLD);
169 
170 	/* Delivery must be set to 1 for interrupt triggering */
171 	writel(de, idc->regs + APLIC_IDC_IDELIVERY);
172 }
173 
174 static int aplic_direct_dying_cpu(unsigned int cpu)
175 {
176 	if (aplic_direct_parent_irq)
177 		disable_percpu_irq(aplic_direct_parent_irq);
178 
179 	return 0;
180 }
181 
182 static int aplic_direct_starting_cpu(unsigned int cpu)
183 {
184 	if (aplic_direct_parent_irq) {
185 		enable_percpu_irq(aplic_direct_parent_irq,
186 				  irq_get_trigger_type(aplic_direct_parent_irq));
187 	}
188 
189 	return 0;
190 }
191 
192 static int aplic_direct_parse_parent_hwirq(struct device *dev, u32 index,
193 					   u32 *parent_hwirq, unsigned long *parent_hartid,
194 					   struct aplic_priv *priv)
195 {
196 	struct of_phandle_args parent;
197 	unsigned long hartid;
198 	int rc;
199 
200 	if (!is_of_node(dev->fwnode)) {
201 		hartid = acpi_rintc_ext_parent_to_hartid(priv->acpi_aplic_id, index);
202 		if (hartid == INVALID_HARTID)
203 			return -ENODEV;
204 
205 		*parent_hartid = hartid;
206 		*parent_hwirq = RV_IRQ_EXT;
207 		return 0;
208 	}
209 
210 	rc = of_irq_parse_one(to_of_node(dev->fwnode), index, &parent);
211 	if (rc)
212 		return rc;
213 
214 	rc = riscv_of_parent_hartid(parent.np, parent_hartid);
215 	if (rc)
216 		return rc;
217 
218 	*parent_hwirq = parent.args[0];
219 	return 0;
220 }
221 
222 int aplic_direct_setup(struct device *dev, void __iomem *regs)
223 {
224 	int i, j, rc, cpu, current_cpu, setup_count = 0;
225 	struct aplic_direct *direct;
226 	struct irq_domain *domain;
227 	struct aplic_priv *priv;
228 	struct aplic_idc *idc;
229 	unsigned long hartid;
230 	u32 v, hwirq;
231 
232 	direct = devm_kzalloc(dev, sizeof(*direct), GFP_KERNEL);
233 	if (!direct)
234 		return -ENOMEM;
235 	priv = &direct->priv;
236 
237 	rc = aplic_setup_priv(priv, dev, regs);
238 	if (rc) {
239 		dev_err(dev, "failed to create APLIC context\n");
240 		return rc;
241 	}
242 
243 	/* Setup per-CPU IDC and target CPU mask */
244 	current_cpu = get_cpu();
245 	for (i = 0; i < priv->nr_idcs; i++) {
246 		rc = aplic_direct_parse_parent_hwirq(dev, i, &hwirq, &hartid, priv);
247 		if (rc) {
248 			dev_warn(dev, "parent irq for IDC%d not found\n", i);
249 			continue;
250 		}
251 
252 		/*
253 		 * Skip interrupts other than external interrupts for
254 		 * current privilege level.
255 		 */
256 		if (hwirq != RV_IRQ_EXT)
257 			continue;
258 
259 		cpu = riscv_hartid_to_cpuid(hartid);
260 		if (cpu < 0) {
261 			dev_warn(dev, "invalid cpuid for IDC%d\n", i);
262 			continue;
263 		}
264 
265 		cpumask_set_cpu(cpu, &direct->lmask);
266 
267 		idc = per_cpu_ptr(&aplic_idcs, cpu);
268 		idc->hart_index = i;
269 		idc->regs = priv->regs + APLIC_IDC_BASE + i * APLIC_IDC_SIZE;
270 		idc->direct = direct;
271 
272 		aplic_idc_set_delivery(idc, true);
273 
274 		/*
275 		 * Boot cpu might not have APLIC hart_index = 0 so check
276 		 * and update target registers of all interrupts.
277 		 */
278 		if (cpu == current_cpu && idc->hart_index) {
279 			v = FIELD_PREP(APLIC_TARGET_HART_IDX, idc->hart_index);
280 			v |= FIELD_PREP(APLIC_TARGET_IPRIO, APLIC_DEFAULT_PRIORITY);
281 			for (j = 1; j <= priv->nr_irqs; j++)
282 				writel(v, priv->regs + APLIC_TARGET_BASE + (j - 1) * sizeof(u32));
283 		}
284 
285 		setup_count++;
286 	}
287 	put_cpu();
288 
289 	/* Find parent domain and register chained handler */
290 	domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(),
291 					  DOMAIN_BUS_ANY);
292 	if (!aplic_direct_parent_irq && domain) {
293 		aplic_direct_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
294 		if (aplic_direct_parent_irq) {
295 			irq_set_chained_handler(aplic_direct_parent_irq,
296 						aplic_direct_handle_irq);
297 
298 			/*
299 			 * Setup CPUHP notifier to enable parent
300 			 * interrupt on all CPUs
301 			 */
302 			cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
303 					  "irqchip/riscv/aplic:starting",
304 					  aplic_direct_starting_cpu,
305 					  aplic_direct_dying_cpu);
306 		}
307 	}
308 
309 	/* Fail if we were not able to setup IDC for any CPU */
310 	if (!setup_count)
311 		return -ENODEV;
312 
313 	/* Setup global config and interrupt delivery */
314 	aplic_init_hw_global(priv, false);
315 
316 	/* Create irq domain instance for the APLIC */
317 	direct->irqdomain = irq_domain_create_linear(dev->fwnode, priv->nr_irqs + 1,
318 						     &aplic_direct_irqdomain_ops, priv);
319 	if (!direct->irqdomain) {
320 		dev_err(dev, "failed to create direct irq domain\n");
321 		return -ENOMEM;
322 	}
323 
324 	/* Advertise the interrupt controller */
325 	dev_info(dev, "%d interrupts directly connected to %d CPUs\n",
326 		 priv->nr_irqs, priv->nr_idcs);
327 
328 	return 0;
329 }
330