1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 */
6
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
9 #include <linux/cpu.h>
10 #include <linux/interrupt.h>
11 #include <linux/init.h>
12 #include <linux/io.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/chained_irq.h>
16 #include <linux/irqchip/riscv-imsic.h>
17 #include <linux/module.h>
18 #include <linux/pci.h>
19 #include <linux/spinlock.h>
20 #include <linux/smp.h>
21
22 #include "irq-riscv-imsic-state.h"
23
24 static int imsic_parent_irq;
25 bool imsic_noipi __ro_after_init;
26
imsic_noipi_cfg(char * buf)27 static int __init imsic_noipi_cfg(char *buf)
28 {
29 imsic_noipi = true;
30 return 0;
31 }
32 early_param("irqchip.riscv_imsic_noipi", imsic_noipi_cfg);
33
34 #ifdef CONFIG_SMP
imsic_ipi_send(unsigned int cpu)35 static void imsic_ipi_send(unsigned int cpu)
36 {
37 struct imsic_local_config *local = per_cpu_ptr(imsic->global.local, cpu);
38
39 writel(IMSIC_IPI_ID, local->msi_va);
40 }
41
imsic_ipi_starting_cpu(void)42 static void imsic_ipi_starting_cpu(void)
43 {
44 if (imsic_noipi)
45 return;
46
47 /* Enable IPIs for current CPU. */
48 __imsic_id_set_enable(IMSIC_IPI_ID);
49 }
50
imsic_ipi_dying_cpu(void)51 static void imsic_ipi_dying_cpu(void)
52 {
53 if (imsic_noipi)
54 return;
55
56 /* Disable IPIs for current CPU. */
57 __imsic_id_clear_enable(IMSIC_IPI_ID);
58 }
59
imsic_ipi_domain_init(void)60 static int __init imsic_ipi_domain_init(void)
61 {
62 int virq;
63
64 if (imsic_noipi)
65 return 0;
66
67 /* Create IMSIC IPI multiplexing */
68 virq = ipi_mux_create(IMSIC_NR_IPI, imsic_ipi_send);
69 if (virq <= 0)
70 return virq < 0 ? virq : -ENOMEM;
71
72 /* Set vIRQ range */
73 riscv_ipi_set_virq_range(virq, IMSIC_NR_IPI);
74
75 /* Announce that IMSIC is providing IPIs */
76 pr_info("%pfwP: providing IPIs using interrupt %d\n", imsic->fwnode, IMSIC_IPI_ID);
77
78 return 0;
79 }
80 #else
imsic_ipi_starting_cpu(void)81 static void imsic_ipi_starting_cpu(void) { }
imsic_ipi_dying_cpu(void)82 static void imsic_ipi_dying_cpu(void) { }
imsic_ipi_domain_init(void)83 static int __init imsic_ipi_domain_init(void) { return 0; }
84 #endif
85
86 /*
87 * To handle an interrupt, we read the TOPEI CSR and write zero in one
88 * instruction. If TOPEI CSR is non-zero then we translate TOPEI.ID to
89 * Linux interrupt number and let Linux IRQ subsystem handle it.
90 */
imsic_handle_irq(struct irq_desc * desc)91 static void imsic_handle_irq(struct irq_desc *desc)
92 {
93 struct irq_chip *chip = irq_desc_get_chip(desc);
94 int cpu = smp_processor_id();
95 struct imsic_vector *vec;
96 unsigned long local_id;
97
98 /*
99 * Process pending local synchronization instead of waiting
100 * for per-CPU local timer to expire.
101 */
102 imsic_local_sync_all(false);
103
104 chained_irq_enter(chip, desc);
105
106 while ((local_id = csr_swap(CSR_TOPEI, 0))) {
107 local_id >>= TOPEI_ID_SHIFT;
108
109 if (!imsic_noipi && local_id == IMSIC_IPI_ID) {
110 if (IS_ENABLED(CONFIG_SMP))
111 ipi_mux_process();
112 continue;
113 }
114
115 if (unlikely(!imsic->base_domain))
116 continue;
117
118 vec = imsic_vector_from_local_id(cpu, local_id);
119 if (!vec) {
120 pr_warn_ratelimited("vector not found for local ID 0x%lx\n", local_id);
121 continue;
122 }
123
124 generic_handle_irq(vec->irq);
125 }
126
127 chained_irq_exit(chip, desc);
128 }
129
imsic_starting_cpu(unsigned int cpu)130 static int imsic_starting_cpu(unsigned int cpu)
131 {
132 /* Mark per-CPU IMSIC state as online */
133 imsic_state_online();
134
135 /* Enable per-CPU parent interrupt */
136 enable_percpu_irq(imsic_parent_irq, irq_get_trigger_type(imsic_parent_irq));
137
138 /* Setup IPIs */
139 imsic_ipi_starting_cpu();
140
141 /*
142 * Interrupts identities might have been enabled/disabled while
143 * this CPU was not running so sync-up local enable/disable state.
144 */
145 imsic_local_sync_all(true);
146
147 /* Enable local interrupt delivery */
148 imsic_local_delivery(true);
149
150 return 0;
151 }
152
imsic_dying_cpu(unsigned int cpu)153 static int imsic_dying_cpu(unsigned int cpu)
154 {
155 /* Cleanup IPIs */
156 imsic_ipi_dying_cpu();
157
158 /* Mark per-CPU IMSIC state as offline */
159 imsic_state_offline();
160
161 return 0;
162 }
163
imsic_early_probe(struct fwnode_handle * fwnode)164 static int __init imsic_early_probe(struct fwnode_handle *fwnode)
165 {
166 struct irq_domain *domain;
167 int rc;
168
169 /* Find parent domain and register chained handler */
170 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY);
171 if (!domain) {
172 pr_err("%pfwP: Failed to find INTC domain\n", fwnode);
173 return -ENOENT;
174 }
175 imsic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT);
176 if (!imsic_parent_irq) {
177 pr_err("%pfwP: Failed to create INTC mapping\n", fwnode);
178 return -ENOENT;
179 }
180
181 /* Initialize IPI domain */
182 rc = imsic_ipi_domain_init();
183 if (rc) {
184 pr_err("%pfwP: Failed to initialize IPI domain\n", fwnode);
185 return rc;
186 }
187
188 /* Setup chained handler to the parent domain interrupt */
189 irq_set_chained_handler(imsic_parent_irq, imsic_handle_irq);
190
191 /*
192 * Setup cpuhp state (must be done after setting imsic_parent_irq)
193 *
194 * Don't disable per-CPU IMSIC file when CPU goes offline
195 * because this affects IPI and the masking/unmasking of
196 * virtual IPIs is done via generic IPI-Mux
197 */
198 cpuhp_setup_state(CPUHP_AP_IRQ_RISCV_IMSIC_STARTING, "irqchip/riscv/imsic:starting",
199 imsic_starting_cpu, imsic_dying_cpu);
200
201 return 0;
202 }
203
imsic_early_dt_init(struct device_node * node,struct device_node * parent)204 static int __init imsic_early_dt_init(struct device_node *node, struct device_node *parent)
205 {
206 struct fwnode_handle *fwnode = &node->fwnode;
207 int rc;
208
209 /* Setup IMSIC state */
210 rc = imsic_setup_state(fwnode, NULL);
211 if (rc) {
212 pr_err("%pfwP: failed to setup state (error %d)\n", fwnode, rc);
213 return rc;
214 }
215
216 /* Do early setup of IPIs */
217 rc = imsic_early_probe(fwnode);
218 if (rc)
219 return rc;
220
221 /* Ensure that OF platform device gets probed */
222 of_node_clear_flag(node, OF_POPULATED);
223 return 0;
224 }
225
226 IRQCHIP_DECLARE(riscv_imsic, "riscv,imsics", imsic_early_dt_init);
227
228 #ifdef CONFIG_ACPI
229
230 static struct fwnode_handle *imsic_acpi_fwnode;
231
imsic_acpi_get_fwnode(struct device * dev)232 struct fwnode_handle *imsic_acpi_get_fwnode(struct device *dev)
233 {
234 return imsic_acpi_fwnode;
235 }
236
imsic_early_acpi_init(union acpi_subtable_headers * header,const unsigned long end)237 static int __init imsic_early_acpi_init(union acpi_subtable_headers *header,
238 const unsigned long end)
239 {
240 struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)header;
241 int rc;
242
243 imsic_acpi_fwnode = irq_domain_alloc_named_fwnode("imsic");
244 if (!imsic_acpi_fwnode) {
245 pr_err("unable to allocate IMSIC FW node\n");
246 return -ENOMEM;
247 }
248
249 /* Setup IMSIC state */
250 rc = imsic_setup_state(imsic_acpi_fwnode, imsic);
251 if (rc) {
252 pr_err("%pfwP: failed to setup state (error %d)\n", imsic_acpi_fwnode, rc);
253 return rc;
254 }
255
256 /* Do early setup of IMSIC state and IPIs */
257 rc = imsic_early_probe(imsic_acpi_fwnode);
258 if (rc) {
259 irq_domain_free_fwnode(imsic_acpi_fwnode);
260 imsic_acpi_fwnode = NULL;
261 return rc;
262 }
263
264 rc = imsic_platform_acpi_probe(imsic_acpi_fwnode);
265
266 #ifdef CONFIG_PCI
267 if (!rc)
268 pci_msi_register_fwnode_provider(&imsic_acpi_get_fwnode);
269 #endif
270
271 if (rc)
272 pr_err("%pfwP: failed to register IMSIC for MSI functionality (error %d)\n",
273 imsic_acpi_fwnode, rc);
274
275 /*
276 * Even if imsic_platform_acpi_probe() fails, the IPI part of IMSIC can
277 * continue to work. So, no need to return failure. This is similar to
278 * DT where IPI works but MSI probe fails for some reason.
279 */
280 return 0;
281 }
282
283 IRQCHIP_ACPI_DECLARE(riscv_imsic, ACPI_MADT_TYPE_IMSIC, NULL,
284 1, imsic_early_acpi_init);
285 #endif
286