1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2020-2024 Loongson Technologies, Inc.
4 */
5
6 #include <linux/cpuhotplug.h>
7 #include <linux/init.h>
8 #include <linux/interrupt.h>
9 #include <linux/irq.h>
10 #include <linux/irqchip.h>
11 #include <linux/irqchip/chained_irq.h>
12 #include <linux/irqdomain.h>
13 #include <linux/kernel.h>
14 #include <linux/msi.h>
15 #include <linux/radix-tree.h>
16 #include <linux/spinlock.h>
17
18 #include <asm/loongarch.h>
19 #include <asm/setup.h>
20
21 #include <linux/irqchip/irq-msi-lib.h>
22 #include "irq-loongson.h"
23
24 #define VECTORS_PER_REG 64
25 #define IRR_VECTOR_MASK 0xffUL
26 #define IRR_INVALID_MASK 0x80000000UL
27 #define AVEC_MSG_OFFSET 0x100000
28
29 #ifdef CONFIG_SMP
30 struct pending_list {
31 struct list_head head;
32 };
33
34 static struct cpumask intersect_mask;
35 static DEFINE_PER_CPU(struct pending_list, pending_list);
36 #endif
37
38 static DEFINE_PER_CPU(struct irq_desc * [NR_VECTORS], irq_map);
39
40 struct avecintc_chip {
41 raw_spinlock_t lock;
42 struct fwnode_handle *fwnode;
43 struct irq_domain *domain;
44 struct irq_matrix *vector_matrix;
45 phys_addr_t msi_base_addr;
46 };
47
48 static struct avecintc_chip loongarch_avec;
49
50 struct avecintc_data {
51 struct list_head entry;
52 unsigned int cpu;
53 unsigned int vec;
54 unsigned int prev_cpu;
55 unsigned int prev_vec;
56 unsigned int moving;
57 };
58
avecintc_enable(void)59 static inline void avecintc_enable(void)
60 {
61 #ifdef CONFIG_MACH_LOONGSON64
62 u64 value;
63
64 value = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
65 value |= IOCSR_MISC_FUNC_AVEC_EN;
66 iocsr_write64(value, LOONGARCH_IOCSR_MISC_FUNC);
67 #endif
68 }
69
avecintc_ack_irq(struct irq_data * d)70 static inline void avecintc_ack_irq(struct irq_data *d)
71 {
72 }
73
avecintc_mask_irq(struct irq_data * d)74 static inline void avecintc_mask_irq(struct irq_data *d)
75 {
76 }
77
avecintc_unmask_irq(struct irq_data * d)78 static inline void avecintc_unmask_irq(struct irq_data *d)
79 {
80 }
81
82 #ifdef CONFIG_SMP
pending_list_init(int cpu)83 static inline void pending_list_init(int cpu)
84 {
85 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
86
87 INIT_LIST_HEAD(&plist->head);
88 }
89
avecintc_sync(struct avecintc_data * adata)90 static void avecintc_sync(struct avecintc_data *adata)
91 {
92 struct pending_list *plist;
93
94 if (cpu_online(adata->prev_cpu)) {
95 plist = per_cpu_ptr(&pending_list, adata->prev_cpu);
96 list_add_tail(&adata->entry, &plist->head);
97 adata->moving = 1;
98 mp_ops.send_ipi_single(adata->prev_cpu, ACTION_CLEAR_VECTOR);
99 }
100 }
101
avecintc_set_affinity(struct irq_data * data,const struct cpumask * dest,bool force)102 static int avecintc_set_affinity(struct irq_data *data, const struct cpumask *dest, bool force)
103 {
104 int cpu, ret, vector;
105 struct avecintc_data *adata;
106
107 scoped_guard(raw_spinlock, &loongarch_avec.lock) {
108 adata = irq_data_get_irq_chip_data(data);
109
110 if (adata->moving)
111 return -EBUSY;
112
113 if (cpu_online(adata->cpu) && cpumask_test_cpu(adata->cpu, dest))
114 return 0;
115
116 cpumask_and(&intersect_mask, dest, cpu_online_mask);
117
118 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, &intersect_mask, false, &cpu);
119 if (ret < 0)
120 return ret;
121
122 vector = ret;
123 adata->cpu = cpu;
124 adata->vec = vector;
125 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(data);
126 avecintc_sync(adata);
127 }
128
129 irq_data_update_effective_affinity(data, cpumask_of(cpu));
130
131 return IRQ_SET_MASK_OK;
132 }
133
avecintc_cpu_online(unsigned int cpu)134 static int avecintc_cpu_online(unsigned int cpu)
135 {
136 if (!loongarch_avec.vector_matrix)
137 return 0;
138
139 guard(raw_spinlock)(&loongarch_avec.lock);
140
141 avecintc_enable();
142
143 irq_matrix_online(loongarch_avec.vector_matrix);
144
145 pending_list_init(cpu);
146
147 return 0;
148 }
149
avecintc_cpu_offline(unsigned int cpu)150 static int avecintc_cpu_offline(unsigned int cpu)
151 {
152 struct pending_list *plist = per_cpu_ptr(&pending_list, cpu);
153
154 if (!loongarch_avec.vector_matrix)
155 return 0;
156
157 guard(raw_spinlock)(&loongarch_avec.lock);
158
159 if (!list_empty(&plist->head))
160 pr_warn("CPU#%d vector is busy\n", cpu);
161
162 irq_matrix_offline(loongarch_avec.vector_matrix);
163
164 return 0;
165 }
166
complete_irq_moving(void)167 void complete_irq_moving(void)
168 {
169 struct pending_list *plist = this_cpu_ptr(&pending_list);
170 struct avecintc_data *adata, *tdata;
171 int cpu, vector, bias;
172 unsigned long isr;
173
174 guard(raw_spinlock)(&loongarch_avec.lock);
175
176 list_for_each_entry_safe(adata, tdata, &plist->head, entry) {
177 cpu = adata->prev_cpu;
178 vector = adata->prev_vec;
179 bias = vector / VECTORS_PER_REG;
180 switch (bias) {
181 case 0:
182 isr = csr_read(LOONGARCH_CSR_ISR0);
183 break;
184 case 1:
185 isr = csr_read(LOONGARCH_CSR_ISR1);
186 break;
187 case 2:
188 isr = csr_read(LOONGARCH_CSR_ISR2);
189 break;
190 case 3:
191 isr = csr_read(LOONGARCH_CSR_ISR3);
192 break;
193 }
194
195 if (isr & (1UL << (vector % VECTORS_PER_REG))) {
196 mp_ops.send_ipi_single(cpu, ACTION_CLEAR_VECTOR);
197 continue;
198 }
199 list_del(&adata->entry);
200 irq_matrix_free(loongarch_avec.vector_matrix, cpu, vector, false);
201 this_cpu_write(irq_map[vector], NULL);
202 adata->moving = 0;
203 adata->prev_cpu = adata->cpu;
204 adata->prev_vec = adata->vec;
205 }
206 }
207 #endif
208
avecintc_compose_msi_msg(struct irq_data * d,struct msi_msg * msg)209 static void avecintc_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
210 {
211 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
212
213 msg->address_hi = 0x0;
214 msg->address_lo = (loongarch_avec.msi_base_addr |
215 (adata->vec & AVEC_IRQ_MASK) << AVEC_IRQ_SHIFT) |
216 ((cpu_logical_map(adata->cpu & AVEC_CPU_MASK)) << AVEC_CPU_SHIFT);
217 msg->data = 0x0;
218 }
219
220 static struct irq_chip avec_irq_controller = {
221 .name = "AVECINTC",
222 .irq_ack = avecintc_ack_irq,
223 .irq_mask = avecintc_mask_irq,
224 .irq_unmask = avecintc_unmask_irq,
225 #ifdef CONFIG_SMP
226 .irq_set_affinity = avecintc_set_affinity,
227 #endif
228 .irq_compose_msi_msg = avecintc_compose_msi_msg,
229 };
230
avecintc_irq_dispatch(struct irq_desc * desc)231 static void avecintc_irq_dispatch(struct irq_desc *desc)
232 {
233 struct irq_chip *chip = irq_desc_get_chip(desc);
234 struct irq_desc *d;
235
236 chained_irq_enter(chip, desc);
237
238 while (true) {
239 unsigned long vector = csr_read(LOONGARCH_CSR_IRR);
240 if (vector & IRR_INVALID_MASK)
241 break;
242
243 vector &= IRR_VECTOR_MASK;
244
245 d = this_cpu_read(irq_map[vector]);
246 if (d) {
247 generic_handle_irq_desc(d);
248 } else {
249 spurious_interrupt();
250 pr_warn("Unexpected IRQ occurs on CPU#%d [vector %ld]\n", smp_processor_id(), vector);
251 }
252 }
253
254 chained_irq_exit(chip, desc);
255 }
256
avecintc_alloc_vector(struct irq_data * irqd,struct avecintc_data * adata)257 static int avecintc_alloc_vector(struct irq_data *irqd, struct avecintc_data *adata)
258 {
259 int cpu, ret;
260
261 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
262
263 ret = irq_matrix_alloc(loongarch_avec.vector_matrix, cpu_online_mask, false, &cpu);
264 if (ret < 0)
265 return ret;
266
267 adata->prev_cpu = adata->cpu = cpu;
268 adata->prev_vec = adata->vec = ret;
269 per_cpu_ptr(irq_map, adata->cpu)[adata->vec] = irq_data_to_desc(irqd);
270
271 return 0;
272 }
273
avecintc_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)274 static int avecintc_domain_alloc(struct irq_domain *domain, unsigned int virq,
275 unsigned int nr_irqs, void *arg)
276 {
277 for (unsigned int i = 0; i < nr_irqs; i++) {
278 struct irq_data *irqd = irq_domain_get_irq_data(domain, virq + i);
279 struct avecintc_data *adata = kzalloc_obj(*adata);
280 int ret;
281
282 if (!adata)
283 return -ENOMEM;
284
285 ret = avecintc_alloc_vector(irqd, adata);
286 if (ret < 0) {
287 kfree(adata);
288 return ret;
289 }
290
291 irq_domain_set_info(domain, virq + i, virq + i, &avec_irq_controller,
292 adata, handle_edge_irq, NULL, NULL);
293 irqd_set_single_target(irqd);
294 irqd_set_affinity_on_activate(irqd);
295 }
296
297 return 0;
298 }
299
avecintc_free_vector(struct irq_data * irqd,struct avecintc_data * adata)300 static void avecintc_free_vector(struct irq_data *irqd, struct avecintc_data *adata)
301 {
302 guard(raw_spinlock_irqsave)(&loongarch_avec.lock);
303
304 per_cpu(irq_map, adata->cpu)[adata->vec] = NULL;
305 irq_matrix_free(loongarch_avec.vector_matrix, adata->cpu, adata->vec, false);
306
307 #ifdef CONFIG_SMP
308 if (!adata->moving)
309 return;
310
311 per_cpu(irq_map, adata->prev_cpu)[adata->prev_vec] = NULL;
312 irq_matrix_free(loongarch_avec.vector_matrix, adata->prev_cpu, adata->prev_vec, false);
313 list_del_init(&adata->entry);
314 #endif
315 }
316
avecintc_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)317 static void avecintc_domain_free(struct irq_domain *domain, unsigned int virq,
318 unsigned int nr_irqs)
319 {
320 for (unsigned int i = 0; i < nr_irqs; i++) {
321 struct irq_data *d = irq_domain_get_irq_data(domain, virq + i);
322
323 if (d) {
324 struct avecintc_data *adata = irq_data_get_irq_chip_data(d);
325
326 avecintc_free_vector(d, adata);
327 irq_domain_reset_irq_data(d);
328 kfree(adata);
329 }
330 }
331 }
332
333 static const struct irq_domain_ops avecintc_domain_ops = {
334 .alloc = avecintc_domain_alloc,
335 .free = avecintc_domain_free,
336 .select = msi_lib_irq_domain_select,
337 };
338
irq_matrix_init(void)339 static int __init irq_matrix_init(void)
340 {
341 loongarch_avec.vector_matrix = irq_alloc_matrix(NR_VECTORS, 0, NR_VECTORS);
342 if (!loongarch_avec.vector_matrix)
343 return -ENOMEM;
344
345 for (int i = 0; i < NR_LEGACY_VECTORS; i++)
346 irq_matrix_assign_system(loongarch_avec.vector_matrix, i, false);
347
348 irq_matrix_online(loongarch_avec.vector_matrix);
349
350 return 0;
351 }
352
avecintc_init(struct irq_domain * parent)353 static int __init avecintc_init(struct irq_domain *parent)
354 {
355 int ret, parent_irq;
356
357 raw_spin_lock_init(&loongarch_avec.lock);
358
359 loongarch_avec.fwnode = irq_domain_alloc_named_fwnode("AVECINTC");
360 if (!loongarch_avec.fwnode) {
361 pr_err("Unable to allocate domain handle\n");
362 ret = -ENOMEM;
363 goto out;
364 }
365
366 loongarch_avec.domain = irq_domain_create_tree(loongarch_avec.fwnode,
367 &avecintc_domain_ops, NULL);
368 if (!loongarch_avec.domain) {
369 pr_err("Unable to create IRQ domain\n");
370 ret = -ENOMEM;
371 goto out_free_handle;
372 }
373
374 parent_irq = irq_create_mapping(parent, INT_AVEC);
375 if (!parent_irq) {
376 pr_err("Failed to mapping hwirq\n");
377 ret = -EINVAL;
378 goto out_remove_domain;
379 }
380
381 ret = irq_matrix_init();
382 if (ret < 0) {
383 pr_err("Failed to init irq matrix\n");
384 goto out_remove_domain;
385 }
386 irq_set_chained_handler_and_data(parent_irq, avecintc_irq_dispatch, NULL);
387
388 #ifdef CONFIG_SMP
389 pending_list_init(0);
390 cpuhp_setup_state_nocalls(CPUHP_AP_IRQ_AVECINTC_STARTING,
391 "irqchip/loongarch/avecintc:starting",
392 avecintc_cpu_online, avecintc_cpu_offline);
393 #endif
394 avecintc_enable();
395
396 return ret;
397
398 out_remove_domain:
399 irq_domain_remove(loongarch_avec.domain);
400 out_free_handle:
401 irq_domain_free_fwnode(loongarch_avec.fwnode);
402 out:
403 return ret;
404 }
405
pch_msi_parse_madt(union acpi_subtable_headers * header,const unsigned long end)406 static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
407 const unsigned long end)
408 {
409 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
410
411 loongarch_avec.msi_base_addr = pchmsi_entry->msg_address - AVEC_MSG_OFFSET;
412
413 return pch_msi_acpi_init_avec(loongarch_avec.domain);
414 }
415
acpi_cascade_irqdomain_init(void)416 static inline int __init acpi_cascade_irqdomain_init(void)
417 {
418 return acpi_table_parse_madt(ACPI_MADT_TYPE_MSI_PIC, pch_msi_parse_madt, 1);
419 }
420
avecintc_acpi_init(struct irq_domain * parent)421 int __init avecintc_acpi_init(struct irq_domain *parent)
422 {
423 int ret = avecintc_init(parent);
424 if (ret < 0) {
425 pr_err("Failed to init IRQ domain\n");
426 return ret;
427 }
428
429 ret = acpi_cascade_irqdomain_init();
430 if (ret < 0) {
431 pr_err("Failed to init cascade IRQ domain\n");
432 return ret;
433 }
434
435 return ret;
436 }
437