xref: /linux/drivers/irqchip/irq-riscv-imsic-early.c (revision 7fc2cd2e4b398c57c9cf961cfea05eadbf34c05c)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
9 #include <linux/cpu.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqchip/riscv-imsic.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/spinlock.h>
21 #include <linux/smp.h>
22 
23 #include "irq-riscv-imsic-state.h"
24 
25 static int imsic_parent_irq;
26 bool imsic_noipi __ro_after_init;
27 
28 static int __init imsic_noipi_cfg(char *buf)
29 {
30 	imsic_noipi = true;
31 	return 0;
32 }
33 early_param("irqchip.riscv_imsic_noipi", imsic_noipi_cfg);
34 
35 #ifdef CONFIG_SMP
36 static void imsic_ipi_send(unsigned int cpu)
37 {
38 	struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
39 
40 	writel(IMSIC_IPI_ID, local->msi_va);
41 }
42 
43 static void imsic_ipi_starting_cpu(void)
44 {
45 	if (imsic_noipi)
46 		return;
47 
48 	/* Enable IPIs for current CPU. */
49 	__imsic_id_set_enable(IMSIC_IPI_ID);
50 }
51 
52 static void imsic_ipi_dying_cpu(void)
53 {
54 	if (imsic_noipi)
55 		return;
56 
57 	/* Disable IPIs for current CPU. */
58 	__imsic_id_clear_enable(IMSIC_IPI_ID);
59 }
60 
61 static int __init imsic_ipi_domain_init(void)
62 {
63 	int virq;
64 
65 	if (imsic_noipi)
66 		return 0;
67 
68 	/* Create IMSIC IPI multiplexing */
69 	virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send);
70 	if (virq <= 0)
71 		return virq < 0 ? virq : -ENOMEM;
72 
73 	/* Set vIRQ range */
74 	riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI);
75 
76 	/* Announce that IMSIC is providing IPIs */
77 	pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID);
78 
79 	return 0;
80 }
81 #else
82 static void imsic_ipi_starting_cpu(void) { }
83 static void imsic_ipi_dying_cpu(void) { }
84 static int __init imsic_ipi_domain_init(void) { return 0; }
85 #endif
86 
87 /*
88  * To handle an interrupt, we read the TOPEI CSR and write zero in one
89  * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to
90  * Linux interrupt number and let Linux IRQ subsystem handle it.
91  */
92 static void imsic_handle_irq(struct irq_desc *desc)
93 {
94 	struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
95 	struct irq_chip *chip = irq_desc_get_chip(desc);
96 	unsigned long local_id;
97 
98 	/*
99 	 * Process pending local synchronization instead of waiting
100 	 * for per-CPU local timer to expire.
101 	 */
102 	imsic_local_sync_all(false);
103 
104 	chained_irq_enter(chip, desc);
105 
106 	while ((local_id = csr_swap(CSR_TOPEI, 0))) {
107 		local_id >>= TOPEI_ID_SHIFT;
108 
109 		if (!imsic_noipi && local_id == IMSIC_IPI_ID) {
110 			if (IS_ENABLED(CONFIG_SMP))
111 				ipi_mux_process();
112 			continue;
113 		}
114 
115 		if (unlikely(local_id > imsic->global.nr_ids)) {
116 			pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
117 			continue;
118 		}
119 
120 		generic_handle_irq(lpriv->vectors[local_id].irq);
121 	}
122 
123 	chained_irq_exit(chip, desc);
124 }
125 
126 static int imsic_starting_cpu(unsigned int cpu)
127 {
128 	/* Mark per-CPU IMSIC state as online */
129 	imsic_state_online();
130 
131 	/* Enable per-CPU parent interrupt */
132 	enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
133 
134 	/* Setup IPIs */
135 	imsic_ipi_starting_cpu();
136 
137 	/*
138 	 * Interrupts identities might have been enabled/disabled while
139 	 * this CPU was not running so sync-up local enable/disable state.
140 	 */
141 	imsic_local_sync_all(true);
142 
143 	/* Enable local interrupt delivery */
144 	imsic_local_delivery(true);
145 
146 	return 0;
147 }
148 
149 static int imsic_dying_cpu(unsigned int cpu)
150 {
151 	/* Cleanup IPIs */
152 	imsic_ipi_dying_cpu();
153 
154 	/* Mark per-CPU IMSIC state as offline */
155 	imsic_state_offline();
156 
157 	return 0;
158 }
159 
160 static int __init imsic_early_probe(struct fwnode_handle *fwnode)
161 {
162 	struct irq_domain *domain;
163 	int rc;
164 
165 	/* Find parent domain and register chained handler */
166 	domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
167 	if (!domain) {
168 		pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
169 		return -ENOENT;
170 	}
171 	imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
172 	if (!imsic_parent_irq) {
173 		pr_err("%pfwP: Failed to create INTC mapping\n", fwnode);
174 		return -ENOENT;
175 	}
176 
177 	/* Initialize IPI domain */
178 	rc = imsic_ipi_domain_init();
179 	if (rc) {
180 		pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode);
181 		return rc;
182 	}
183 
184 	/* Setup chained handler to the parent domain interrupt */
185 	irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq);
186 
187 	/*
188 	 * Setup cpuhp state (must be done after setting imsic_parent_irq)
189 	 *
190 	 * Don't disable per-CPU IMSIC file when CPU goes offline
191 	 * because this affects IPI and the masking/unmasking of
192 	 * virtual IPIs is done via generic IPI-Mux
193 	 */
194 	cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting",
195 			  imsic_starting_cpu, imsic_dying_cpu);
196 
197 	return 0;
198 }
199 
200 static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
201 {
202 	struct fwnode_handle *fwnode = &node->fwnode;
203 	int rc;
204 
205 	/* Setup IMSIC state */
206 	rc = imsic_setup_state(fwnode, NULL);
207 	if (rc) {
208 		pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc);
209 		return rc;
210 	}
211 
212 	/* Do early setup of IPIs */
213 	rc = imsic_early_probe(fwnode);
214 	if (rc)
215 		return rc;
216 
217 	/* Ensure that OF platform device gets probed */
218 	of_node_clear_flag(node, OF_POPULATED);
219 	return 0;
220 }
221 
222 IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init);
223 
224 #ifdef CONFIG_ACPI
225 
226 static struct fwnode_handle *imsic_acpi_fwnode;
227 
228 struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev)
229 {
230 	return imsic_acpi_fwnode;
231 }
232 EXPORT_SYMBOL_GPL(imsic_acpi_get_fwnode);
233 
234 static int __init imsic_early_acpi_init(union acpi_subtable_headers *header,
235 					const unsigned long end)
236 {
237 	struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header;
238 	int rc;
239 
240 	imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic");
241 	if (!imsic_acpi_fwnode) {
242 		pr_err("unable to allocate IMSIC FW node\n");
243 		return -ENOMEM;
244 	}
245 
246 	/* Setup IMSIC state */
247 	rc = imsic_setup_state(imsic_acpi_fwnode, imsic);
248 	if (rc) {
249 		pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc);
250 		return rc;
251 	}
252 
253 	/* Do early setup of IMSIC state and IPIs */
254 	rc = imsic_early_probe(imsic_acpi_fwnode);
255 	if (rc) {
256 		irq_domain_free_fwnode(imsic_acpi_fwnode);
257 		imsic_acpi_fwnode = NULL;
258 		return rc;
259 	}
260 
261 	rc = imsic_platform_acpi_probe(imsic_acpi_fwnode);
262 
263 #ifdef CONFIG_PCI
264 	if (!rc)
265 		pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode);
266 #endif
267 
268 	if (rc)
269 		pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n",
270 		       imsic_acpi_fwnode, rc);
271 
272 	/*
273 	 * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can
274 	 * continue to work. So, no need to return failure. This is similar to
275 	 * DT where IPI works but MSI probe fails for some reason.
276 	 */
277 	return 0;
278 }
279 
280 IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL,
281 		     1, imsic_early_acpi_init);
282 #endif
283