xref: /linux/drivers/irqchip/irq-riscv-imsic-early.c (revision e3966940559d52aa1800a008dcfeec218dd31f88)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4  * Copyright (C) 2022 Ventana Micro Systems Inc.
5  */
6 
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
9 #include <linux/cpu.h>
10 #include <linux/export.h>
11 #include <linux/interrupt.h>
12 #include <linux/init.h>
13 #include <linux/io.h>
14 #include <linux/irq.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqchip/chained_irq.h>
17 #include <linux/irqchip/riscv-imsic.h>
18 #include <linux/module.h>
19 #include <linux/pci.h>
20 #include <linux/spinlock.h>
21 #include <linux/smp.h>
22 
23 #include "irq-riscv-imsic-state.h"
24 
25 static int imsic_parent_irq;
26 bool imsic_noipi __ro_after_init;
27 
28 static int __init imsic_noipi_cfg(char *buf)
29 {
30 	imsic_noipi = true;
31 	return 0;
32 }
33 early_param("irqchip.riscv_imsic_noipi", imsic_noipi_cfg);
34 
35 #ifdef CONFIG_SMP
36 static void imsic_ipi_send(unsigned int cpu)
37 {
38 	struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
39 
40 	writel(IMSIC_IPI_ID, local->msi_va);
41 }
42 
43 static void imsic_ipi_starting_cpu(void)
44 {
45 	if (imsic_noipi)
46 		return;
47 
48 	/* Enable IPIs for current CPU. */
49 	__imsic_id_set_enable(IMSIC_IPI_ID);
50 }
51 
52 static void imsic_ipi_dying_cpu(void)
53 {
54 	if (imsic_noipi)
55 		return;
56 
57 	/* Disable IPIs for current CPU. */
58 	__imsic_id_clear_enable(IMSIC_IPI_ID);
59 }
60 
61 static int __init imsic_ipi_domain_init(void)
62 {
63 	int virq;
64 
65 	if (imsic_noipi)
66 		return 0;
67 
68 	/* Create IMSIC IPI multiplexing */
69 	virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send);
70 	if (virq <= 0)
71 		return virq < 0 ? virq : -ENOMEM;
72 
73 	/* Set vIRQ range */
74 	riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI);
75 
76 	/* Announce that IMSIC is providing IPIs */
77 	pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID);
78 
79 	return 0;
80 }
81 #else
82 static void imsic_ipi_starting_cpu(void) { }
83 static void imsic_ipi_dying_cpu(void) { }
84 static int __init imsic_ipi_domain_init(void) { return 0; }
85 #endif
86 
87 /*
88  * To handle an interrupt, we read the TOPEI CSR and write zero in one
89  * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to
90  * Linux interrupt number and let Linux IRQ subsystem handle it.
91  */
92 static void imsic_handle_irq(struct irq_desc *desc)
93 {
94 	struct irq_chip *chip = irq_desc_get_chip(desc);
95 	int cpu = smp_processor_id();
96 	struct imsic_vector *vec;
97 	unsigned long local_id;
98 
99 	/*
100 	 * Process pending local synchronization instead of waiting
101 	 * for per-CPU local timer to expire.
102 	 */
103 	imsic_local_sync_all(false);
104 
105 	chained_irq_enter(chip, desc);
106 
107 	while ((local_id = csr_swap(CSR_TOPEI, 0))) {
108 		local_id >>= TOPEI_ID_SHIFT;
109 
110 		if (!imsic_noipi && local_id == IMSIC_IPI_ID) {
111 			if (IS_ENABLED(CONFIG_SMP))
112 				ipi_mux_process();
113 			continue;
114 		}
115 
116 		if (unlikely(!imsic->base_domain))
117 			continue;
118 
119 		vec = imsic_vector_from_local_id(cpu, local_id);
120 		if (!vec) {
121 			pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
122 			continue;
123 		}
124 
125 		generic_handle_irq(vec->irq);
126 	}
127 
128 	chained_irq_exit(chip, desc);
129 }
130 
131 static int imsic_starting_cpu(unsigned int cpu)
132 {
133 	/* Mark per-CPU IMSIC state as online */
134 	imsic_state_online();
135 
136 	/* Enable per-CPU parent interrupt */
137 	enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
138 
139 	/* Setup IPIs */
140 	imsic_ipi_starting_cpu();
141 
142 	/*
143 	 * Interrupts identities might have been enabled/disabled while
144 	 * this CPU was not running so sync-up local enable/disable state.
145 	 */
146 	imsic_local_sync_all(true);
147 
148 	/* Enable local interrupt delivery */
149 	imsic_local_delivery(true);
150 
151 	return 0;
152 }
153 
154 static int imsic_dying_cpu(unsigned int cpu)
155 {
156 	/* Cleanup IPIs */
157 	imsic_ipi_dying_cpu();
158 
159 	/* Mark per-CPU IMSIC state as offline */
160 	imsic_state_offline();
161 
162 	return 0;
163 }
164 
165 static int __init imsic_early_probe(struct fwnode_handle *fwnode)
166 {
167 	struct irq_domain *domain;
168 	int rc;
169 
170 	/* Find parent domain and register chained handler */
171 	domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
172 	if (!domain) {
173 		pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
174 		return -ENOENT;
175 	}
176 	imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
177 	if (!imsic_parent_irq) {
178 		pr_err("%pfwP: Failed to create INTC mapping\n", fwnode);
179 		return -ENOENT;
180 	}
181 
182 	/* Initialize IPI domain */
183 	rc = imsic_ipi_domain_init();
184 	if (rc) {
185 		pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode);
186 		return rc;
187 	}
188 
189 	/* Setup chained handler to the parent domain interrupt */
190 	irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq);
191 
192 	/*
193 	 * Setup cpuhp state (must be done after setting imsic_parent_irq)
194 	 *
195 	 * Don't disable per-CPU IMSIC file when CPU goes offline
196 	 * because this affects IPI and the masking/unmasking of
197 	 * virtual IPIs is done via generic IPI-Mux
198 	 */
199 	cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting",
200 			  imsic_starting_cpu, imsic_dying_cpu);
201 
202 	return 0;
203 }
204 
205 static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
206 {
207 	struct fwnode_handle *fwnode = &node->fwnode;
208 	int rc;
209 
210 	/* Setup IMSIC state */
211 	rc = imsic_setup_state(fwnode, NULL);
212 	if (rc) {
213 		pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc);
214 		return rc;
215 	}
216 
217 	/* Do early setup of IPIs */
218 	rc = imsic_early_probe(fwnode);
219 	if (rc)
220 		return rc;
221 
222 	/* Ensure that OF platform device gets probed */
223 	of_node_clear_flag(node, OF_POPULATED);
224 	return 0;
225 }
226 
227 IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init);
228 
229 #ifdef CONFIG_ACPI
230 
231 static struct fwnode_handle *imsic_acpi_fwnode;
232 
233 struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev)
234 {
235 	return imsic_acpi_fwnode;
236 }
237 EXPORT_SYMBOL_GPL(imsic_acpi_get_fwnode);
238 
239 static int __init imsic_early_acpi_init(union acpi_subtable_headers *header,
240 					const unsigned long end)
241 {
242 	struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header;
243 	int rc;
244 
245 	imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic");
246 	if (!imsic_acpi_fwnode) {
247 		pr_err("unable to allocate IMSIC FW node\n");
248 		return -ENOMEM;
249 	}
250 
251 	/* Setup IMSIC state */
252 	rc = imsic_setup_state(imsic_acpi_fwnode, imsic);
253 	if (rc) {
254 		pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc);
255 		return rc;
256 	}
257 
258 	/* Do early setup of IMSIC state and IPIs */
259 	rc = imsic_early_probe(imsic_acpi_fwnode);
260 	if (rc) {
261 		irq_domain_free_fwnode(imsic_acpi_fwnode);
262 		imsic_acpi_fwnode = NULL;
263 		return rc;
264 	}
265 
266 	rc = imsic_platform_acpi_probe(imsic_acpi_fwnode);
267 
268 #ifdef CONFIG_PCI
269 	if (!rc)
270 		pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode);
271 #endif
272 
273 	if (rc)
274 		pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n",
275 		       imsic_acpi_fwnode, rc);
276 
277 	/*
278 	 * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can
279 	 * continue to work. So, no need to return failure. This is similar to
280 	 * DT where IPI works but MSI probe fails for some reason.
281 	 */
282 	return 0;
283 }
284 
285 IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL,
286 		     1, imsic_early_acpi_init);
287 #endif
288