1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2024 Loongson Technologies, Inc.
4 */
5
6 #include <linux/cpuhotplug.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/msi.h>
15 #include <linux/radix-tree.h>
16 #include <linux/spinlock.h>
17
18 #include <asm/loongarch.h>
19 #include <asm/setup.h>
20
21 #include "irq-msi-lib.h"
22 #include "irq-loongson.h"
23
24 #define VECTORS_PER_REG 64
25 #define IRR_VECTOR_MASK 0xffUL
26 #define IRR_INVALID_MASK 0x80000000UL
27 #define AVEC_MSG_OFFSET 0x100000
28
29 #ifdef CONFIG_SMP
30 struct pending_list {
31 struct list_head head;
32 };
33
34 static struct cpumask intersect_mask;
35 static DEFINE_PER_CPU(struct pending_list, pending_list);
36 #endif
37
38 static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
39
40 struct avecintc_chip {
41 raw_spinlock_t lock;
42 struct fwnode_handle *fwnode;
43 struct irq_domain *domain;
44 struct irq_matrix *vector_matrix;
45 phys_addr_t msi_base_addr;
46 };
47
48 static struct avecintc_chip loongarch_avec;
49
50 struct avecintc_data {
51 struct list_head entry;
52 unsigned int cpu;
53 unsigned int vec;
54 unsigned int prev_cpu;
55 unsigned int prev_vec;
56 unsigned int moving;
57 };
58
avecintc_enable(void)59 static inline void avecintc_enable(void)
60 {
61 u64 value;
62
63 value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
64 value |= IOCSR_MISC_FUNC_AVEC_EN;
65 iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
66 }
67
avecintc_ack_irq(struct irq_data * d)68 static inline void avecintc_ack_irq(struct irq_data *d)
69 {
70 }
71
avecintc_mask_irq(struct irq_data * d)72 static inline void avecintc_mask_irq(struct irq_data *d)
73 {
74 }
75
avecintc_unmask_irq(struct irq_data * d)76 static inline void avecintc_unmask_irq(struct irq_data *d)
77 {
78 }
79
80 #ifdef CONFIG_SMP
pending_list_init(int cpu)81 static inline void pending_list_init(int cpu)
82 {
83 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
84
85 INIT_LIST_HEAD(&plist->head);
86 }
87
avecintc_sync(struct avecintc_data * adata)88 static void avecintc_sync(struct avecintc_data *adata)
89 {
90 struct pending_list *plist;
91
92 if (cpu_online(adata->prev_cpu)) {
93 plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
94 list_add_tail(&adata->entry, &plist->head);
95 adata->moving = 1;
96 mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR);
97 }
98 }
99
avecintc_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)100 static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
101 {
102 int cpu, ret, vector;
103 struct avecintc_data *adata;
104
105 scoped_guard(raw_spinlock, &loongarch_avec.lock) {
106 adata = irq_data_get_irq_chip_data(data);
107
108 if (adata->moving)
109 return -EBUSY;
110
111 if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
112 return 0;
113
114 cpumask_and(&intersect_mask, dest, cpu_online_mask);
115
116 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
117 if (ret < 0)
118 return ret;
119
120 vector = ret;
121 adata->cpu = cpu;
122 adata->vec = vector;
123 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
124 avecintc_sync(adata);
125 }
126
127 irq_data_update_effective_affinity(data, cpumask_of(cpu));
128
129 return IRQ_SET_MASK_OK;
130 }
131
avecintc_cpu_online(unsigned int cpu)132 static int avecintc_cpu_online(unsigned int cpu)
133 {
134 if (!loongarch_avec.vector_matrix)
135 return 0;
136
137 guard(raw_spinlock)(&loongarch_avec.lock);
138
139 avecintc_enable();
140
141 irq_matrix_online(loongarch_avec.vector_matrix);
142
143 pending_list_init(cpu);
144
145 return 0;
146 }
147
avecintc_cpu_offline(unsigned int cpu)148 static int avecintc_cpu_offline(unsigned int cpu)
149 {
150 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
151
152 if (!loongarch_avec.vector_matrix)
153 return 0;
154
155 guard(raw_spinlock)(&loongarch_avec.lock);
156
157 if (!list_empty(&plist->head))
158 pr_warn("CPU#%d vector is busy\n", cpu);
159
160 irq_matrix_offline(loongarch_avec.vector_matrix);
161
162 return 0;
163 }
164
complete_irq_moving(void)165 void complete_irq_moving(void)
166 {
167 struct pending_list *plist = this_cpu_ptr(&pending_list);
168 struct avecintc_data *adata, *tdata;
169 int cpu, vector, bias;
170 uint64_t isr;
171
172 guard(raw_spinlock)(&loongarch_avec.lock);
173
174 list_for_each_entry_safe(adata, tdata, &plist->head, entry) {
175 cpu = adata->prev_cpu;
176 vector = adata->prev_vec;
177 bias = vector / VECTORS_PER_REG;
178 switch (bias) {
179 case 0:
180 isr = csr_read64(LOONGARCH_CSR_ISR0);
181 break;
182 case 1:
183 isr = csr_read64(LOONGARCH_CSR_ISR1);
184 break;
185 case 2:
186 isr = csr_read64(LOONGARCH_CSR_ISR2);
187 break;
188 case 3:
189 isr = csr_read64(LOONGARCH_CSR_ISR3);
190 break;
191 }
192
193 if (isr & (1UL << (vector % VECTORS_PER_REG))) {
194 mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
195 continue;
196 }
197 list_del(&adata->entry);
198 irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
199 this_cpu_write(irq_map[vector], NULL);
200 adata->moving = 0;
201 adata->prev_cpu = adata->cpu;
202 adata->prev_vec = adata->vec;
203 }
204 }
205 #endif
206
avecintc_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)207 static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
208 {
209 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
210
211 msg->address_hi = 0x0;
212 msg->address_lo = (loongarch_avec.msi_base_addr | (adata->vec & 0xff) << 4)
213 | ((cpu_logical_map(adata->cpu & 0xffff)) << 12);
214 msg->data = 0x0;
215 }
216
217 static struct irq_chip avec_irq_controller = {
218 .name = "AVECINTC",
219 .irq_ack = avecintc_ack_irq,
220 .irq_mask = avecintc_mask_irq,
221 .irq_unmask = avecintc_unmask_irq,
222 #ifdef CONFIG_SMP
223 .irq_set_affinity = avecintc_set_affinity,
224 #endif
225 .irq_compose_msi_msg = avecintc_compose_msi_msg,
226 };
227
avecintc_irq_dispatch(struct irq_desc * desc)228 static void avecintc_irq_dispatch(struct irq_desc *desc)
229 {
230 struct irq_chip *chip = irq_desc_get_chip(desc);
231 struct irq_desc *d;
232
233 chained_irq_enter(chip, desc);
234
235 while (true) {
236 unsigned long vector = csr_read64(LOONGARCH_CSR_IRR);
237 if (vector & IRR_INVALID_MASK)
238 break;
239
240 vector &= IRR_VECTOR_MASK;
241
242 d = this_cpu_read(irq_map[vector]);
243 if (d) {
244 generic_handle_irq_desc(d);
245 } else {
246 spurious_interrupt();
247 pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector);
248 }
249 }
250
251 chained_irq_exit(chip, desc);
252 }
253
avecintc_alloc_vector(struct irq_data * irqd,struct avecintc_data * adata)254 static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata)
255 {
256 int cpu, ret;
257
258 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
259
260 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
261 if (ret < 0)
262 return ret;
263
264 adata->prev_cpu = adata->cpu = cpu;
265 adata->prev_vec = adata->vec = ret;
266 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
267
268 return 0;
269 }
270
avecintc_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)271 static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq,
272 unsigned int nr_irqs, void *arg)
273 {
274 for (unsigned int i = 0; i < nr_irqs; i++) {
275 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i);
276 struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL);
277 int ret;
278
279 if (!adata)
280 return -ENOMEM;
281
282 ret = avecintc_alloc_vector(irqd, adata);
283 if (ret < 0) {
284 kfree(adata);
285 return ret;
286 }
287
288 irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller,
289 adata, handle_edge_irq, NULL, NULL);
290 irqd_set_single_target(irqd);
291 irqd_set_affinity_on_activate(irqd);
292 }
293
294 return 0;
295 }
296
avecintc_free_vector(struct irq_data * irqd,struct avecintc_data * adata)297 static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata)
298 {
299 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
300
301 per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
302 irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
303
304 #ifdef CONFIG_SMP
305 if (!adata->moving)
306 return;
307
308 per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
309 irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false);
310 list_del_init(&adata->entry);
311 #endif
312 }
313
avecintc_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)314 static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq,
315 unsigned int nr_irqs)
316 {
317 for (unsigned int i = 0; i < nr_irqs; i++) {
318 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
319
320 if (d) {
321 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
322
323 avecintc_free_vector(d, adata);
324 irq_domain_reset_irq_data(d);
325 kfree(adata);
326 }
327 }
328 }
329
330 static const struct irq_domain_ops avecintc_domain_ops = {
331 .alloc = avecintc_domain_alloc,
332 .free = avecintc_domain_free,
333 .select = msi_lib_irq_domain_select,
334 };
335
irq_matrix_init(void)336 static int __init irq_matrix_init(void)
337 {
338 loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS);
339 if (!loongarch_avec.vector_matrix)
340 return -ENOMEM;
341
342 for (int i = 0; i < NR_LEGACY_VECTORS; i++)
343 irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
344
345 irq_matrix_online(loongarch_avec.vector_matrix);
346
347 return 0;
348 }
349
avecintc_init(struct irq_domain * parent)350 static int __init avecintc_init(struct irq_domain *parent)
351 {
352 int ret, parent_irq;
353
354 raw_spin_lock_init(&loongarch_avec.lock);
355
356 loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC");
357 if (!loongarch_avec.fwnode) {
358 pr_err("Unable to allocate domain handle\n");
359 ret = -ENOMEM;
360 goto out;
361 }
362
363 loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
364 &avecintc_domain_ops, NULL);
365 if (!loongarch_avec.domain) {
366 pr_err("Unable to create IRQ domain\n");
367 ret = -ENOMEM;
368 goto out_free_handle;
369 }
370
371 parent_irq = irq_create_mapping(parent, INT_AVEC);
372 if (!parent_irq) {
373 pr_err("Failed to mapping hwirq\n");
374 ret = -EINVAL;
375 goto out_remove_domain;
376 }
377
378 ret = irq_matrix_init();
379 if (ret < 0) {
380 pr_err("Failed to init irq matrix\n");
381 goto out_remove_domain;
382 }
383 irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL);
384
385 #ifdef CONFIG_SMP
386 pending_list_init(0);
387 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING,
388 "irqchip/loongarch/avecintc:starting",
389 avecintc_cpu_online, avecintc_cpu_offline);
390 #endif
391 avecintc_enable();
392
393 return ret;
394
395 out_remove_domain:
396 irq_domain_remove(loongarch_avec.domain);
397 out_free_handle:
398 irq_domain_free_fwnode(loongarch_avec.fwnode);
399 out:
400 return ret;
401 }
402
pch_msi_parse_madt(union acpi_subtable_headers * header,const unsigned long end)403 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
404 const unsigned long end)
405 {
406 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
407
408 loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
409
410 return pch_msi_acpi_init_avec(loongarch_avec.domain);
411 }
412
acpi_cascade_irqdomain_init(void)413 static inline int __init acpi_cascade_irqdomain_init(void)
414 {
415 return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
416 }
417
avecintc_acpi_init(struct irq_domain * parent)418 int __init avecintc_acpi_init(struct irq_domain *parent)
419 {
420 int ret = avecintc_init(parent);
421 if (ret < 0) {
422 pr_err("Failed to init IRQ domain\n");
423 return ret;
424 }
425
426 ret = acpi_cascade_irqdomain_init();
427 if (ret < 0) {
428 pr_err("Failed to init cascade IRQ domain\n");
429 return ret;
430 }
431
432 return ret;
433 }
434