1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2024 Loongson Technologies, Inc.
4 */
5
6 #include <linux/cpuhotplug.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/msi.h>
15 #include <linux/radix-tree.h>
16 #include <linux/spinlock.h>
17
18 #include <asm/loongarch.h>
19 #include <asm/setup.h>
20
21 #include <linux/irqchip/irq-msi-lib.h>
22 #include "irq-loongson.h"
23
24 #define VECTORS_PER_REG 64
25 #define IRR_VECTOR_MASK 0xffUL
26 #define IRR_INVALID_MASK 0x80000000UL
27 #define AVEC_MSG_OFFSET 0x100000
28
29 #ifdef CONFIG_SMP
30 struct pending_list {
31 struct list_head head;
32 };
33
34 static struct cpumask intersect_mask;
35 static DEFINE_PER_CPU(struct pending_list, pending_list);
36 #endif
37
38 static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
39
40 struct avecintc_chip {
41 raw_spinlock_t lock;
42 struct fwnode_handle *fwnode;
43 struct irq_domain *domain;
44 struct irq_matrix *vector_matrix;
45 phys_addr_t msi_base_addr;
46 };
47
48 static struct avecintc_chip loongarch_avec;
49
50 struct avecintc_data {
51 struct list_head entry;
52 unsigned int cpu;
53 unsigned int vec;
54 unsigned int prev_cpu;
55 unsigned int prev_vec;
56 unsigned int moving;
57 };
58
avecintc_enable(void)59 static inline void avecintc_enable(void)
60 {
61 u64 value;
62
63 value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
64 value |= IOCSR_MISC_FUNC_AVEC_EN;
65 iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
66 }
67
avecintc_ack_irq(struct irq_data * d)68 static inline void avecintc_ack_irq(struct irq_data *d)
69 {
70 }
71
avecintc_mask_irq(struct irq_data * d)72 static inline void avecintc_mask_irq(struct irq_data *d)
73 {
74 }
75
avecintc_unmask_irq(struct irq_data * d)76 static inline void avecintc_unmask_irq(struct irq_data *d)
77 {
78 }
79
80 #ifdef CONFIG_SMP
pending_list_init(int cpu)81 static inline void pending_list_init(int cpu)
82 {
83 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
84
85 INIT_LIST_HEAD(&plist->head);
86 }
87
avecintc_sync(struct avecintc_data * adata)88 static void avecintc_sync(struct avecintc_data *adata)
89 {
90 struct pending_list *plist;
91
92 if (cpu_online(adata->prev_cpu)) {
93 plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
94 list_add_tail(&adata->entry, &plist->head);
95 adata->moving = 1;
96 mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR);
97 }
98 }
99
avecintc_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)100 static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
101 {
102 int cpu, ret, vector;
103 struct avecintc_data *adata;
104
105 scoped_guard(raw_spinlock, &loongarch_avec.lock) {
106 adata = irq_data_get_irq_chip_data(data);
107
108 if (adata->moving)
109 return -EBUSY;
110
111 if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
112 return 0;
113
114 cpumask_and(&intersect_mask, dest, cpu_online_mask);
115
116 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
117 if (ret < 0)
118 return ret;
119
120 vector = ret;
121 adata->cpu = cpu;
122 adata->vec = vector;
123 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
124 avecintc_sync(adata);
125 }
126
127 irq_data_update_effective_affinity(data, cpumask_of(cpu));
128
129 return IRQ_SET_MASK_OK;
130 }
131
avecintc_cpu_online(unsigned int cpu)132 static int avecintc_cpu_online(unsigned int cpu)
133 {
134 if (!loongarch_avec.vector_matrix)
135 return 0;
136
137 guard(raw_spinlock)(&loongarch_avec.lock);
138
139 avecintc_enable();
140
141 irq_matrix_online(loongarch_avec.vector_matrix);
142
143 pending_list_init(cpu);
144
145 return 0;
146 }
147
avecintc_cpu_offline(unsigned int cpu)148 static int avecintc_cpu_offline(unsigned int cpu)
149 {
150 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
151
152 if (!loongarch_avec.vector_matrix)
153 return 0;
154
155 guard(raw_spinlock)(&loongarch_avec.lock);
156
157 if (!list_empty(&plist->head))
158 pr_warn("CPU#%d vector is busy\n", cpu);
159
160 irq_matrix_offline(loongarch_avec.vector_matrix);
161
162 return 0;
163 }
164
complete_irq_moving(void)165 void complete_irq_moving(void)
166 {
167 struct pending_list *plist = this_cpu_ptr(&pending_list);
168 struct avecintc_data *adata, *tdata;
169 int cpu, vector, bias;
170 uint64_t isr;
171
172 guard(raw_spinlock)(&loongarch_avec.lock);
173
174 list_for_each_entry_safe(adata, tdata, &plist->head, entry) {
175 cpu = adata->prev_cpu;
176 vector = adata->prev_vec;
177 bias = vector / VECTORS_PER_REG;
178 switch (bias) {
179 case 0:
180 isr = csr_read64(LOONGARCH_CSR_ISR0);
181 break;
182 case 1:
183 isr = csr_read64(LOONGARCH_CSR_ISR1);
184 break;
185 case 2:
186 isr = csr_read64(LOONGARCH_CSR_ISR2);
187 break;
188 case 3:
189 isr = csr_read64(LOONGARCH_CSR_ISR3);
190 break;
191 }
192
193 if (isr & (1UL << (vector % VECTORS_PER_REG))) {
194 mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
195 continue;
196 }
197 list_del(&adata->entry);
198 irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
199 this_cpu_write(irq_map[vector], NULL);
200 adata->moving = 0;
201 adata->prev_cpu = adata->cpu;
202 adata->prev_vec = adata->vec;
203 }
204 }
205 #endif
206
avecintc_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)207 static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
208 {
209 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
210
211 msg->address_hi = 0x0;
212 msg->address_lo = (loongarch_avec.msi_base_addr |
213 (adata->vec & AVEC_IRQ_MASK) << AVEC_IRQ_SHIFT) |
214 ((cpu_logical_map(adata->cpu & AVEC_CPU_MASK)) << AVEC_CPU_SHIFT);
215 msg->data = 0x0;
216 }
217
218 static struct irq_chip avec_irq_controller = {
219 .name = "AVECINTC",
220 .irq_ack = avecintc_ack_irq,
221 .irq_mask = avecintc_mask_irq,
222 .irq_unmask = avecintc_unmask_irq,
223 #ifdef CONFIG_SMP
224 .irq_set_affinity = avecintc_set_affinity,
225 #endif
226 .irq_compose_msi_msg = avecintc_compose_msi_msg,
227 };
228
avecintc_irq_dispatch(struct irq_desc * desc)229 static void avecintc_irq_dispatch(struct irq_desc *desc)
230 {
231 struct irq_chip *chip = irq_desc_get_chip(desc);
232 struct irq_desc *d;
233
234 chained_irq_enter(chip, desc);
235
236 while (true) {
237 unsigned long vector = csr_read64(LOONGARCH_CSR_IRR);
238 if (vector & IRR_INVALID_MASK)
239 break;
240
241 vector &= IRR_VECTOR_MASK;
242
243 d = this_cpu_read(irq_map[vector]);
244 if (d) {
245 generic_handle_irq_desc(d);
246 } else {
247 spurious_interrupt();
248 pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector);
249 }
250 }
251
252 chained_irq_exit(chip, desc);
253 }
254
avecintc_alloc_vector(struct irq_data * irqd,struct avecintc_data * adata)255 static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata)
256 {
257 int cpu, ret;
258
259 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
260
261 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
262 if (ret < 0)
263 return ret;
264
265 adata->prev_cpu = adata->cpu = cpu;
266 adata->prev_vec = adata->vec = ret;
267 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
268
269 return 0;
270 }
271
avecintc_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)272 static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq,
273 unsigned int nr_irqs, void *arg)
274 {
275 for (unsigned int i = 0; i < nr_irqs; i++) {
276 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i);
277 struct avecintc_data *adata = kzalloc(sizeof(*adata), GFP_KERNEL);
278 int ret;
279
280 if (!adata)
281 return -ENOMEM;
282
283 ret = avecintc_alloc_vector(irqd, adata);
284 if (ret < 0) {
285 kfree(adata);
286 return ret;
287 }
288
289 irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller,
290 adata, handle_edge_irq, NULL, NULL);
291 irqd_set_single_target(irqd);
292 irqd_set_affinity_on_activate(irqd);
293 }
294
295 return 0;
296 }
297
avecintc_free_vector(struct irq_data * irqd,struct avecintc_data * adata)298 static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata)
299 {
300 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
301
302 per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
303 irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
304
305 #ifdef CONFIG_SMP
306 if (!adata->moving)
307 return;
308
309 per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
310 irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false);
311 list_del_init(&adata->entry);
312 #endif
313 }
314
avecintc_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)315 static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq,
316 unsigned int nr_irqs)
317 {
318 for (unsigned int i = 0; i < nr_irqs; i++) {
319 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
320
321 if (d) {
322 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
323
324 avecintc_free_vector(d, adata);
325 irq_domain_reset_irq_data(d);
326 kfree(adata);
327 }
328 }
329 }
330
331 static const struct irq_domain_ops avecintc_domain_ops = {
332 .alloc = avecintc_domain_alloc,
333 .free = avecintc_domain_free,
334 .select = msi_lib_irq_domain_select,
335 };
336
irq_matrix_init(void)337 static int __init irq_matrix_init(void)
338 {
339 loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS);
340 if (!loongarch_avec.vector_matrix)
341 return -ENOMEM;
342
343 for (int i = 0; i < NR_LEGACY_VECTORS; i++)
344 irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
345
346 irq_matrix_online(loongarch_avec.vector_matrix);
347
348 return 0;
349 }
350
avecintc_init(struct irq_domain * parent)351 static int __init avecintc_init(struct irq_domain *parent)
352 {
353 int ret, parent_irq;
354
355 raw_spin_lock_init(&loongarch_avec.lock);
356
357 loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC");
358 if (!loongarch_avec.fwnode) {
359 pr_err("Unable to allocate domain handle\n");
360 ret = -ENOMEM;
361 goto out;
362 }
363
364 loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
365 &avecintc_domain_ops, NULL);
366 if (!loongarch_avec.domain) {
367 pr_err("Unable to create IRQ domain\n");
368 ret = -ENOMEM;
369 goto out_free_handle;
370 }
371
372 parent_irq = irq_create_mapping(parent, INT_AVEC);
373 if (!parent_irq) {
374 pr_err("Failed to mapping hwirq\n");
375 ret = -EINVAL;
376 goto out_remove_domain;
377 }
378
379 ret = irq_matrix_init();
380 if (ret < 0) {
381 pr_err("Failed to init irq matrix\n");
382 goto out_remove_domain;
383 }
384 irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL);
385
386 #ifdef CONFIG_SMP
387 pending_list_init(0);
388 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING,
389 "irqchip/loongarch/avecintc:starting",
390 avecintc_cpu_online, avecintc_cpu_offline);
391 #endif
392 avecintc_enable();
393
394 return ret;
395
396 out_remove_domain:
397 irq_domain_remove(loongarch_avec.domain);
398 out_free_handle:
399 irq_domain_free_fwnode(loongarch_avec.fwnode);
400 out:
401 return ret;
402 }
403
pch_msi_parse_madt(union acpi_subtable_headers * header,const unsigned long end)404 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
405 const unsigned long end)
406 {
407 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
408
409 loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
410
411 return pch_msi_acpi_init_avec(loongarch_avec.domain);
412 }
413
acpi_cascade_irqdomain_init(void)414 static inline int __init acpi_cascade_irqdomain_init(void)
415 {
416 return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
417 }
418
avecintc_acpi_init(struct irq_domain * parent)419 int __init avecintc_acpi_init(struct irq_domain *parent)
420 {
421 int ret = avecintc_init(parent);
422 if (ret < 0) {
423 pr_err("Failed to init IRQ domain\n");
424 return ret;
425 }
426
427 ret = acpi_cascade_irqdomain_init();
428 if (ret < 0) {
429 pr_err("Failed to init cascade IRQ domain\n");
430 return ret;
431 }
432
433 return ret;
434 }
435