1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2021 Western Digital Corporation or its affiliates.
4 * Copyright (C) 2022 Ventana Micro Systems Inc.
5 */
6
7 #define pr_fmt(fmt) "riscv-imsic: " fmt
8 #include <linux/acpi.h>
9 #include <linux/cpu.h>
10 #include <linux/bitmap.h>
11 #include <linux/interrupt.h>
12 #include <linux/irq.h>
13 #include <linux/module.h>
14 #include <linux/of.h>
15 #include <linux/of_address.h>
16 #include <linux/of_irq.h>
17 #include <linux/seq_file.h>
18 #include <linux/spinlock.h>
19 #include <linux/smp.h>
20 #include <asm/hwcap.h>
21
22 #include "irq-riscv-imsic-state.h"
23
24 #define IMSIC_DISABLE_EIDELIVERY 0
25 #define IMSIC_ENABLE_EIDELIVERY 1
26 #define IMSIC_DISABLE_EITHRESHOLD 1
27 #define IMSIC_ENABLE_EITHRESHOLD 0
28
imsic_csr_write(unsigned long reg,unsigned long val)29 static inline void imsic_csr_write(unsigned long reg, unsigned long val)
30 {
31 csr_write(CSR_ISELECT, reg);
32 csr_write(CSR_IREG, val);
33 }
34
imsic_csr_read(unsigned long reg)35 static inline unsigned long imsic_csr_read(unsigned long reg)
36 {
37 csr_write(CSR_ISELECT, reg);
38 return csr_read(CSR_IREG);
39 }
40
imsic_csr_read_clear(unsigned long reg,unsigned long val)41 static inline unsigned long imsic_csr_read_clear(unsigned long reg, unsigned long val)
42 {
43 csr_write(CSR_ISELECT, reg);
44 return csr_read_clear(CSR_IREG, val);
45 }
46
imsic_csr_set(unsigned long reg,unsigned long val)47 static inline void imsic_csr_set(unsigned long reg, unsigned long val)
48 {
49 csr_write(CSR_ISELECT, reg);
50 csr_set(CSR_IREG, val);
51 }
52
imsic_csr_clear(unsigned long reg,unsigned long val)53 static inline void imsic_csr_clear(unsigned long reg, unsigned long val)
54 {
55 csr_write(CSR_ISELECT, reg);
56 csr_clear(CSR_IREG, val);
57 }
58
59 struct imsic_priv *imsic;
60
imsic_get_global_config(void)61 const struct imsic_global_config *imsic_get_global_config(void)
62 {
63 return imsic ? &imsic->global : NULL;
64 }
65 EXPORT_SYMBOL_GPL(imsic_get_global_config);
66
__imsic_eix_read_clear(unsigned long id,bool pend)67 static bool __imsic_eix_read_clear(unsigned long id, bool pend)
68 {
69 unsigned long isel, imask;
70
71 isel = id / BITS_PER_LONG;
72 isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
73 isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
74 imask = BIT(id & (__riscv_xlen - 1));
75
76 return !!(imsic_csr_read_clear(isel, imask) & imask);
77 }
78
__imsic_id_read_clear_enabled(unsigned long id)79 static inline bool __imsic_id_read_clear_enabled(unsigned long id)
80 {
81 return __imsic_eix_read_clear(id, false);
82 }
83
__imsic_id_read_clear_pending(unsigned long id)84 static inline bool __imsic_id_read_clear_pending(unsigned long id)
85 {
86 return __imsic_eix_read_clear(id, true);
87 }
88
__imsic_eix_update(unsigned long base_id,unsigned long num_id,bool pend,bool val)89 void __imsic_eix_update(unsigned long base_id, unsigned long num_id, bool pend, bool val)
90 {
91 unsigned long id = base_id, last_id = base_id + num_id;
92 unsigned long i, isel, ireg;
93
94 while (id < last_id) {
95 isel = id / BITS_PER_LONG;
96 isel *= BITS_PER_LONG / IMSIC_EIPx_BITS;
97 isel += pend ? IMSIC_EIP0 : IMSIC_EIE0;
98
99 /*
100 * Prepare the ID mask to be programmed in the
101 * IMSIC EIEx and EIPx registers. These registers
102 * are XLEN-wide and we must not touch IDs which
103 * are < base_id and >= (base_id + num_id).
104 */
105 ireg = 0;
106 for (i = id & (__riscv_xlen - 1); id < last_id && i < __riscv_xlen; i++) {
107 ireg |= BIT(i);
108 id++;
109 }
110
111 /*
112 * The IMSIC EIEx and EIPx registers are indirectly
113 * accessed via using ISELECT and IREG CSRs so we
114 * need to access these CSRs without getting preempted.
115 *
116 * All existing users of this function call this
117 * function with local IRQs disabled so we don't
118 * need to do anything special here.
119 */
120 if (val)
121 imsic_csr_set(isel, ireg);
122 else
123 imsic_csr_clear(isel, ireg);
124 }
125 }
126
__imsic_local_sync(struct imsic_local_priv * lpriv)127 static void __imsic_local_sync(struct imsic_local_priv *lpriv)
128 {
129 struct imsic_local_config *mlocal;
130 struct imsic_vector *vec, *mvec;
131 int i;
132
133 lockdep_assert_held(&lpriv->lock);
134
135 for_each_set_bit(i, lpriv->dirty_bitmap, imsic->global.nr_ids + 1) {
136 if (!i || i == IMSIC_IPI_ID)
137 goto skip;
138 vec = &lpriv->vectors[i];
139
140 if (READ_ONCE(vec->enable))
141 __imsic_id_set_enable(i);
142 else
143 __imsic_id_clear_enable(i);
144
145 /*
146 * If the ID was being moved to a new ID on some other CPU
147 * then we can get a MSI during the movement so check the
148 * ID pending bit and re-trigger the new ID on other CPU
149 * using MMIO write.
150 */
151 mvec = READ_ONCE(vec->move);
152 WRITE_ONCE(vec->move, NULL);
153 if (mvec && mvec != vec) {
154 if (__imsic_id_read_clear_pending(i)) {
155 mlocal = per_cpu_ptr(imsic->global.local, mvec->cpu);
156 writel_relaxed(mvec->local_id, mlocal->msi_va);
157 }
158
159 imsic_vector_free(&lpriv->vectors[i]);
160 }
161
162 skip:
163 bitmap_clear(lpriv->dirty_bitmap, i, 1);
164 }
165 }
166
imsic_local_sync_all(void)167 void imsic_local_sync_all(void)
168 {
169 struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
170 unsigned long flags;
171
172 raw_spin_lock_irqsave(&lpriv->lock, flags);
173 bitmap_fill(lpriv->dirty_bitmap, imsic->global.nr_ids + 1);
174 __imsic_local_sync(lpriv);
175 raw_spin_unlock_irqrestore(&lpriv->lock, flags);
176 }
177
imsic_local_delivery(bool enable)178 void imsic_local_delivery(bool enable)
179 {
180 if (enable) {
181 imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_ENABLE_EITHRESHOLD);
182 imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_ENABLE_EIDELIVERY);
183 return;
184 }
185
186 imsic_csr_write(IMSIC_EIDELIVERY, IMSIC_DISABLE_EIDELIVERY);
187 imsic_csr_write(IMSIC_EITHRESHOLD, IMSIC_DISABLE_EITHRESHOLD);
188 }
189
190 #ifdef CONFIG_SMP
imsic_local_timer_callback(struct timer_list * timer)191 static void imsic_local_timer_callback(struct timer_list *timer)
192 {
193 struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
194 unsigned long flags;
195
196 raw_spin_lock_irqsave(&lpriv->lock, flags);
197 __imsic_local_sync(lpriv);
198 raw_spin_unlock_irqrestore(&lpriv->lock, flags);
199 }
200
__imsic_remote_sync(struct imsic_local_priv * lpriv,unsigned int cpu)201 static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
202 {
203 lockdep_assert_held(&lpriv->lock);
204
205 /*
206 * The spinlock acquire/release semantics ensure that changes
207 * to vector enable, vector move and dirty bitmap are visible
208 * to the target CPU.
209 */
210
211 /*
212 * We schedule a timer on the target CPU if the target CPU is not
213 * same as the current CPU. An offline CPU will unconditionally
214 * synchronize IDs through imsic_starting_cpu() when the
215 * CPU is brought up.
216 */
217 if (cpu_online(cpu)) {
218 if (cpu == smp_processor_id()) {
219 __imsic_local_sync(lpriv);
220 return;
221 }
222
223 if (!timer_pending(&lpriv->timer)) {
224 lpriv->timer.expires = jiffies + 1;
225 add_timer_on(&lpriv->timer, cpu);
226 }
227 }
228 }
229 #else
__imsic_remote_sync(struct imsic_local_priv * lpriv,unsigned int cpu)230 static void __imsic_remote_sync(struct imsic_local_priv *lpriv, unsigned int cpu)
231 {
232 lockdep_assert_held(&lpriv->lock);
233 __imsic_local_sync(lpriv);
234 }
235 #endif
236
imsic_vector_mask(struct imsic_vector * vec)237 void imsic_vector_mask(struct imsic_vector *vec)
238 {
239 struct imsic_local_priv *lpriv;
240
241 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
242 if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
243 return;
244
245 /*
246 * This function is called through Linux irq subsystem with
247 * irqs disabled so no need to save/restore irq flags.
248 */
249
250 raw_spin_lock(&lpriv->lock);
251
252 WRITE_ONCE(vec->enable, false);
253 bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
254 __imsic_remote_sync(lpriv, vec->cpu);
255
256 raw_spin_unlock(&lpriv->lock);
257 }
258
imsic_vector_unmask(struct imsic_vector * vec)259 void imsic_vector_unmask(struct imsic_vector *vec)
260 {
261 struct imsic_local_priv *lpriv;
262
263 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
264 if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
265 return;
266
267 /*
268 * This function is called through Linux irq subsystem with
269 * irqs disabled so no need to save/restore irq flags.
270 */
271
272 raw_spin_lock(&lpriv->lock);
273
274 WRITE_ONCE(vec->enable, true);
275 bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
276 __imsic_remote_sync(lpriv, vec->cpu);
277
278 raw_spin_unlock(&lpriv->lock);
279 }
280
imsic_vector_move_update(struct imsic_local_priv * lpriv,struct imsic_vector * vec,bool new_enable,struct imsic_vector * new_move)281 static bool imsic_vector_move_update(struct imsic_local_priv *lpriv, struct imsic_vector *vec,
282 bool new_enable, struct imsic_vector *new_move)
283 {
284 unsigned long flags;
285 bool enabled;
286
287 raw_spin_lock_irqsave(&lpriv->lock, flags);
288
289 /* Update enable and move details */
290 enabled = READ_ONCE(vec->enable);
291 WRITE_ONCE(vec->enable, new_enable);
292 WRITE_ONCE(vec->move, new_move);
293
294 /* Mark the vector as dirty and synchronize */
295 bitmap_set(lpriv->dirty_bitmap, vec->local_id, 1);
296 __imsic_remote_sync(lpriv, vec->cpu);
297
298 raw_spin_unlock_irqrestore(&lpriv->lock, flags);
299
300 return enabled;
301 }
302
imsic_vector_move(struct imsic_vector * old_vec,struct imsic_vector * new_vec)303 void imsic_vector_move(struct imsic_vector *old_vec, struct imsic_vector *new_vec)
304 {
305 struct imsic_local_priv *old_lpriv, *new_lpriv;
306 bool enabled;
307
308 if (WARN_ON_ONCE(old_vec->cpu == new_vec->cpu))
309 return;
310
311 old_lpriv = per_cpu_ptr(imsic->lpriv, old_vec->cpu);
312 if (WARN_ON_ONCE(&old_lpriv->vectors[old_vec->local_id] != old_vec))
313 return;
314
315 new_lpriv = per_cpu_ptr(imsic->lpriv, new_vec->cpu);
316 if (WARN_ON_ONCE(&new_lpriv->vectors[new_vec->local_id] != new_vec))
317 return;
318
319 /*
320 * Move and re-trigger the new vector based on the pending
321 * state of the old vector because we might get a device
322 * interrupt on the old vector while device was being moved
323 * to the new vector.
324 */
325 enabled = imsic_vector_move_update(old_lpriv, old_vec, false, new_vec);
326 imsic_vector_move_update(new_lpriv, new_vec, enabled, new_vec);
327 }
328
329 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS
imsic_vector_debug_show(struct seq_file * m,struct imsic_vector * vec,int ind)330 void imsic_vector_debug_show(struct seq_file *m, struct imsic_vector *vec, int ind)
331 {
332 struct imsic_local_priv *lpriv;
333 struct imsic_vector *mvec;
334 bool is_enabled;
335
336 lpriv = per_cpu_ptr(imsic->lpriv, vec->cpu);
337 if (WARN_ON_ONCE(&lpriv->vectors[vec->local_id] != vec))
338 return;
339
340 is_enabled = imsic_vector_isenabled(vec);
341 mvec = imsic_vector_get_move(vec);
342
343 seq_printf(m, "%*starget_cpu : %5u\n", ind, "", vec->cpu);
344 seq_printf(m, "%*starget_local_id : %5u\n", ind, "", vec->local_id);
345 seq_printf(m, "%*sis_reserved : %5u\n", ind, "",
346 (vec->local_id <= IMSIC_IPI_ID) ? 1 : 0);
347 seq_printf(m, "%*sis_enabled : %5u\n", ind, "", is_enabled ? 1 : 0);
348 seq_printf(m, "%*sis_move_pending : %5u\n", ind, "", mvec ? 1 : 0);
349 if (mvec) {
350 seq_printf(m, "%*smove_cpu : %5u\n", ind, "", mvec->cpu);
351 seq_printf(m, "%*smove_local_id : %5u\n", ind, "", mvec->local_id);
352 }
353 }
354
imsic_vector_debug_show_summary(struct seq_file * m,int ind)355 void imsic_vector_debug_show_summary(struct seq_file *m, int ind)
356 {
357 irq_matrix_debug_show(m, imsic->matrix, ind);
358 }
359 #endif
360
imsic_vector_from_local_id(unsigned int cpu,unsigned int local_id)361 struct imsic_vector *imsic_vector_from_local_id(unsigned int cpu, unsigned int local_id)
362 {
363 struct imsic_local_priv *lpriv = per_cpu_ptr(imsic->lpriv, cpu);
364
365 if (!lpriv || imsic->global.nr_ids < local_id)
366 return NULL;
367
368 return &lpriv->vectors[local_id];
369 }
370
imsic_vector_alloc(unsigned int hwirq,const struct cpumask * mask)371 struct imsic_vector *imsic_vector_alloc(unsigned int hwirq, const struct cpumask *mask)
372 {
373 struct imsic_vector *vec = NULL;
374 struct imsic_local_priv *lpriv;
375 unsigned long flags;
376 unsigned int cpu;
377 int local_id;
378
379 raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
380 local_id = irq_matrix_alloc(imsic->matrix, mask, false, &cpu);
381 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
382 if (local_id < 0)
383 return NULL;
384
385 lpriv = per_cpu_ptr(imsic->lpriv, cpu);
386 vec = &lpriv->vectors[local_id];
387 vec->hwirq = hwirq;
388 vec->enable = false;
389 vec->move = NULL;
390
391 return vec;
392 }
393
imsic_vector_free(struct imsic_vector * vec)394 void imsic_vector_free(struct imsic_vector *vec)
395 {
396 unsigned long flags;
397
398 raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
399 vec->hwirq = UINT_MAX;
400 irq_matrix_free(imsic->matrix, vec->cpu, vec->local_id, false);
401 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
402 }
403
imsic_local_cleanup(void)404 static void __init imsic_local_cleanup(void)
405 {
406 struct imsic_local_priv *lpriv;
407 int cpu;
408
409 for_each_possible_cpu(cpu) {
410 lpriv = per_cpu_ptr(imsic->lpriv, cpu);
411
412 bitmap_free(lpriv->dirty_bitmap);
413 kfree(lpriv->vectors);
414 }
415
416 free_percpu(imsic->lpriv);
417 }
418
imsic_local_init(void)419 static int __init imsic_local_init(void)
420 {
421 struct imsic_global_config *global = &imsic->global;
422 struct imsic_local_priv *lpriv;
423 struct imsic_vector *vec;
424 int cpu, i;
425
426 /* Allocate per-CPU private state */
427 imsic->lpriv = alloc_percpu(typeof(*imsic->lpriv));
428 if (!imsic->lpriv)
429 return -ENOMEM;
430
431 /* Setup per-CPU private state */
432 for_each_possible_cpu(cpu) {
433 lpriv = per_cpu_ptr(imsic->lpriv, cpu);
434
435 raw_spin_lock_init(&lpriv->lock);
436
437 /* Allocate dirty bitmap */
438 lpriv->dirty_bitmap = bitmap_zalloc(global->nr_ids + 1, GFP_KERNEL);
439 if (!lpriv->dirty_bitmap)
440 goto fail_local_cleanup;
441
442 #ifdef CONFIG_SMP
443 /* Setup lazy timer for synchronization */
444 timer_setup(&lpriv->timer, imsic_local_timer_callback, TIMER_PINNED);
445 #endif
446
447 /* Allocate vector array */
448 lpriv->vectors = kcalloc(global->nr_ids + 1, sizeof(*lpriv->vectors),
449 GFP_KERNEL);
450 if (!lpriv->vectors)
451 goto fail_local_cleanup;
452
453 /* Setup vector array */
454 for (i = 0; i <= global->nr_ids; i++) {
455 vec = &lpriv->vectors[i];
456 vec->cpu = cpu;
457 vec->local_id = i;
458 vec->hwirq = UINT_MAX;
459 }
460 }
461
462 return 0;
463
464 fail_local_cleanup:
465 imsic_local_cleanup();
466 return -ENOMEM;
467 }
468
imsic_state_online(void)469 void imsic_state_online(void)
470 {
471 unsigned long flags;
472
473 raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
474 irq_matrix_online(imsic->matrix);
475 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
476 }
477
imsic_state_offline(void)478 void imsic_state_offline(void)
479 {
480 unsigned long flags;
481
482 raw_spin_lock_irqsave(&imsic->matrix_lock, flags);
483 irq_matrix_offline(imsic->matrix);
484 raw_spin_unlock_irqrestore(&imsic->matrix_lock, flags);
485
486 #ifdef CONFIG_SMP
487 struct imsic_local_priv *lpriv = this_cpu_ptr(imsic->lpriv);
488
489 raw_spin_lock_irqsave(&lpriv->lock, flags);
490 WARN_ON_ONCE(try_to_del_timer_sync(&lpriv->timer) < 0);
491 raw_spin_unlock_irqrestore(&lpriv->lock, flags);
492 #endif
493 }
494
imsic_matrix_init(void)495 static int __init imsic_matrix_init(void)
496 {
497 struct imsic_global_config *global = &imsic->global;
498
499 raw_spin_lock_init(&imsic->matrix_lock);
500 imsic->matrix = irq_alloc_matrix(global->nr_ids + 1,
501 0, global->nr_ids + 1);
502 if (!imsic->matrix)
503 return -ENOMEM;
504
505 /* Reserve ID#0 because it is special and never implemented */
506 irq_matrix_assign_system(imsic->matrix, 0, false);
507
508 /* Reserve IPI ID because it is special and used internally */
509 irq_matrix_assign_system(imsic->matrix, IMSIC_IPI_ID, false);
510
511 return 0;
512 }
513
imsic_populate_global_dt(struct fwnode_handle * fwnode,struct imsic_global_config * global,u32 * nr_parent_irqs)514 static int __init imsic_populate_global_dt(struct fwnode_handle *fwnode,
515 struct imsic_global_config *global,
516 u32 *nr_parent_irqs)
517 {
518 int rc;
519
520 /* Find number of guest index bits in MSI address */
521 rc = of_property_read_u32(to_of_node(fwnode), "riscv,guest-index-bits",
522 &global->guest_index_bits);
523 if (rc)
524 global->guest_index_bits = 0;
525
526 /* Find number of HART index bits */
527 rc = of_property_read_u32(to_of_node(fwnode), "riscv,hart-index-bits",
528 &global->hart_index_bits);
529 if (rc) {
530 /* Assume default value */
531 global->hart_index_bits = __fls(*nr_parent_irqs);
532 if (BIT(global->hart_index_bits) < *nr_parent_irqs)
533 global->hart_index_bits++;
534 }
535
536 /* Find number of group index bits */
537 rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-bits",
538 &global->group_index_bits);
539 if (rc)
540 global->group_index_bits = 0;
541
542 /*
543 * Find first bit position of group index.
544 * If not specified assumed the default APLIC-IMSIC configuration.
545 */
546 rc = of_property_read_u32(to_of_node(fwnode), "riscv,group-index-shift",
547 &global->group_index_shift);
548 if (rc)
549 global->group_index_shift = IMSIC_MMIO_PAGE_SHIFT * 2;
550
551 /* Find number of interrupt identities */
552 rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-ids",
553 &global->nr_ids);
554 if (rc) {
555 pr_err("%pfwP: number of interrupt identities not found\n", fwnode);
556 return rc;
557 }
558
559 /* Find number of guest interrupt identities */
560 rc = of_property_read_u32(to_of_node(fwnode), "riscv,num-guest-ids",
561 &global->nr_guest_ids);
562 if (rc)
563 global->nr_guest_ids = global->nr_ids;
564
565 return 0;
566 }
567
imsic_populate_global_acpi(struct fwnode_handle * fwnode,struct imsic_global_config * global,u32 * nr_parent_irqs,void * opaque)568 static int __init imsic_populate_global_acpi(struct fwnode_handle *fwnode,
569 struct imsic_global_config *global,
570 u32 *nr_parent_irqs, void *opaque)
571 {
572 struct acpi_madt_imsic *imsic = (struct acpi_madt_imsic *)opaque;
573
574 global->guest_index_bits = imsic->guest_index_bits;
575 global->hart_index_bits = imsic->hart_index_bits;
576 global->group_index_bits = imsic->group_index_bits;
577 global->group_index_shift = imsic->group_index_shift;
578 global->nr_ids = imsic->num_ids;
579 global->nr_guest_ids = imsic->num_guest_ids;
580 return 0;
581 }
582
imsic_get_parent_hartid(struct fwnode_handle * fwnode,u32 index,unsigned long * hartid)583 static int __init imsic_get_parent_hartid(struct fwnode_handle *fwnode,
584 u32 index, unsigned long *hartid)
585 {
586 struct of_phandle_args parent;
587 int rc;
588
589 if (!is_of_node(fwnode)) {
590 if (hartid)
591 *hartid = acpi_rintc_index_to_hartid(index);
592
593 if (!hartid || (*hartid == INVALID_HARTID))
594 return -EINVAL;
595
596 return 0;
597 }
598
599 rc = of_irq_parse_one(to_of_node(fwnode), index, &parent);
600 if (rc)
601 return rc;
602
603 /*
604 * Skip interrupts other than external interrupts for
605 * current privilege level.
606 */
607 if (parent.args[0] != RV_IRQ_EXT)
608 return -EINVAL;
609
610 return riscv_of_parent_hartid(parent.np, hartid);
611 }
612
imsic_get_mmio_resource(struct fwnode_handle * fwnode,u32 index,struct resource * res)613 static int __init imsic_get_mmio_resource(struct fwnode_handle *fwnode,
614 u32 index, struct resource *res)
615 {
616 if (!is_of_node(fwnode))
617 return acpi_rintc_get_imsic_mmio_info(index, res);
618
619 return of_address_to_resource(to_of_node(fwnode), index, res);
620 }
621
imsic_parse_fwnode(struct fwnode_handle * fwnode,struct imsic_global_config * global,u32 * nr_parent_irqs,u32 * nr_mmios,void * opaque)622 static int __init imsic_parse_fwnode(struct fwnode_handle *fwnode,
623 struct imsic_global_config *global,
624 u32 *nr_parent_irqs,
625 u32 *nr_mmios,
626 void *opaque)
627 {
628 unsigned long hartid;
629 struct resource res;
630 int rc;
631 u32 i;
632
633 *nr_parent_irqs = 0;
634 *nr_mmios = 0;
635
636 /* Find number of parent interrupts */
637 while (!imsic_get_parent_hartid(fwnode, *nr_parent_irqs, &hartid))
638 (*nr_parent_irqs)++;
639 if (!*nr_parent_irqs) {
640 pr_err("%pfwP: no parent irqs available\n", fwnode);
641 return -EINVAL;
642 }
643
644 if (is_of_node(fwnode))
645 rc = imsic_populate_global_dt(fwnode, global, nr_parent_irqs);
646 else
647 rc = imsic_populate_global_acpi(fwnode, global, nr_parent_irqs, opaque);
648
649 if (rc)
650 return rc;
651
652 /* Sanity check guest index bits */
653 i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT;
654 if (i < global->guest_index_bits) {
655 pr_err("%pfwP: guest index bits too big\n", fwnode);
656 return -EINVAL;
657 }
658
659 /* Sanity check HART index bits */
660 i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT - global->guest_index_bits;
661 if (i < global->hart_index_bits) {
662 pr_err("%pfwP: HART index bits too big\n", fwnode);
663 return -EINVAL;
664 }
665
666 /* Sanity check group index bits */
667 i = BITS_PER_LONG - IMSIC_MMIO_PAGE_SHIFT -
668 global->guest_index_bits - global->hart_index_bits;
669 if (i < global->group_index_bits) {
670 pr_err("%pfwP: group index bits too big\n", fwnode);
671 return -EINVAL;
672 }
673
674 /* Sanity check group index shift */
675 i = global->group_index_bits + global->group_index_shift - 1;
676 if (i >= BITS_PER_LONG) {
677 pr_err("%pfwP: group index shift too big\n", fwnode);
678 return -EINVAL;
679 }
680
681 /* Sanity check number of interrupt identities */
682 if (global->nr_ids < IMSIC_MIN_ID ||
683 global->nr_ids >= IMSIC_MAX_ID ||
684 (global->nr_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
685 pr_err("%pfwP: invalid number of interrupt identities\n", fwnode);
686 return -EINVAL;
687 }
688
689 /* Sanity check number of guest interrupt identities */
690 if (global->nr_guest_ids < IMSIC_MIN_ID ||
691 global->nr_guest_ids >= IMSIC_MAX_ID ||
692 (global->nr_guest_ids & IMSIC_MIN_ID) != IMSIC_MIN_ID) {
693 pr_err("%pfwP: invalid number of guest interrupt identities\n", fwnode);
694 return -EINVAL;
695 }
696
697 /* Compute base address */
698 rc = imsic_get_mmio_resource(fwnode, 0, &res);
699 if (rc) {
700 pr_err("%pfwP: first MMIO resource not found\n", fwnode);
701 return -EINVAL;
702 }
703 global->base_addr = res.start;
704 global->base_addr &= ~(BIT(global->guest_index_bits +
705 global->hart_index_bits +
706 IMSIC_MMIO_PAGE_SHIFT) - 1);
707 global->base_addr &= ~((BIT(global->group_index_bits) - 1) <<
708 global->group_index_shift);
709
710 /* Find number of MMIO register sets */
711 while (!imsic_get_mmio_resource(fwnode, *nr_mmios, &res))
712 (*nr_mmios)++;
713
714 return 0;
715 }
716
imsic_setup_state(struct fwnode_handle * fwnode,void * opaque)717 int __init imsic_setup_state(struct fwnode_handle *fwnode, void *opaque)
718 {
719 u32 i, j, index, nr_parent_irqs, nr_mmios, nr_handlers = 0;
720 struct imsic_global_config *global;
721 struct imsic_local_config *local;
722 void __iomem **mmios_va = NULL;
723 struct resource *mmios = NULL;
724 unsigned long reloff, hartid;
725 phys_addr_t base_addr;
726 int rc, cpu;
727
728 /*
729 * Only one IMSIC instance allowed in a platform for clean
730 * implementation of SMP IRQ affinity and per-CPU IPIs.
731 *
732 * This means on a multi-socket (or multi-die) platform we
733 * will have multiple MMIO regions for one IMSIC instance.
734 */
735 if (imsic) {
736 pr_err("%pfwP: already initialized hence ignoring\n", fwnode);
737 return -EALREADY;
738 }
739
740 if (!riscv_isa_extension_available(NULL, SxAIA)) {
741 pr_err("%pfwP: AIA support not available\n", fwnode);
742 return -ENODEV;
743 }
744
745 imsic = kzalloc(sizeof(*imsic), GFP_KERNEL);
746 if (!imsic)
747 return -ENOMEM;
748 imsic->fwnode = fwnode;
749 global = &imsic->global;
750
751 global->local = alloc_percpu(typeof(*global->local));
752 if (!global->local) {
753 rc = -ENOMEM;
754 goto out_free_priv;
755 }
756
757 /* Parse IMSIC fwnode */
758 rc = imsic_parse_fwnode(fwnode, global, &nr_parent_irqs, &nr_mmios, opaque);
759 if (rc)
760 goto out_free_local;
761
762 /* Allocate MMIO resource array */
763 mmios = kcalloc(nr_mmios, sizeof(*mmios), GFP_KERNEL);
764 if (!mmios) {
765 rc = -ENOMEM;
766 goto out_free_local;
767 }
768
769 /* Allocate MMIO virtual address array */
770 mmios_va = kcalloc(nr_mmios, sizeof(*mmios_va), GFP_KERNEL);
771 if (!mmios_va) {
772 rc = -ENOMEM;
773 goto out_iounmap;
774 }
775
776 /* Parse and map MMIO register sets */
777 for (i = 0; i < nr_mmios; i++) {
778 rc = imsic_get_mmio_resource(fwnode, i, &mmios[i]);
779 if (rc) {
780 pr_err("%pfwP: unable to parse MMIO regset %d\n", fwnode, i);
781 goto out_iounmap;
782 }
783
784 base_addr = mmios[i].start;
785 base_addr &= ~(BIT(global->guest_index_bits +
786 global->hart_index_bits +
787 IMSIC_MMIO_PAGE_SHIFT) - 1);
788 base_addr &= ~((BIT(global->group_index_bits) - 1) <<
789 global->group_index_shift);
790 if (base_addr != global->base_addr) {
791 rc = -EINVAL;
792 pr_err("%pfwP: address mismatch for regset %d\n", fwnode, i);
793 goto out_iounmap;
794 }
795
796 mmios_va[i] = ioremap(mmios[i].start, resource_size(&mmios[i]));
797 if (!mmios_va[i]) {
798 rc = -EIO;
799 pr_err("%pfwP: unable to map MMIO regset %d\n", fwnode, i);
800 goto out_iounmap;
801 }
802 }
803
804 /* Initialize local (or per-CPU )state */
805 rc = imsic_local_init();
806 if (rc) {
807 pr_err("%pfwP: failed to initialize local state\n",
808 fwnode);
809 goto out_iounmap;
810 }
811
812 /* Configure handlers for target CPUs */
813 for (i = 0; i < nr_parent_irqs; i++) {
814 rc = imsic_get_parent_hartid(fwnode, i, &hartid);
815 if (rc) {
816 pr_warn("%pfwP: hart ID for parent irq%d not found\n", fwnode, i);
817 continue;
818 }
819
820 cpu = riscv_hartid_to_cpuid(hartid);
821 if (cpu < 0) {
822 pr_warn("%pfwP: invalid cpuid for parent irq%d\n", fwnode, i);
823 continue;
824 }
825
826 /* Find MMIO location of MSI page */
827 index = nr_mmios;
828 reloff = i * BIT(global->guest_index_bits) *
829 IMSIC_MMIO_PAGE_SZ;
830 for (j = 0; nr_mmios; j++) {
831 if (reloff < resource_size(&mmios[j])) {
832 index = j;
833 break;
834 }
835
836 /*
837 * MMIO region size may not be aligned to
838 * BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ
839 * if holes are present.
840 */
841 reloff -= ALIGN(resource_size(&mmios[j]),
842 BIT(global->guest_index_bits) * IMSIC_MMIO_PAGE_SZ);
843 }
844 if (index >= nr_mmios) {
845 pr_warn("%pfwP: MMIO not found for parent irq%d\n", fwnode, i);
846 continue;
847 }
848
849 local = per_cpu_ptr(global->local, cpu);
850 local->msi_pa = mmios[index].start + reloff;
851 local->msi_va = mmios_va[index] + reloff;
852
853 nr_handlers++;
854 }
855
856 /* If no CPU handlers found then can't take interrupts */
857 if (!nr_handlers) {
858 pr_err("%pfwP: No CPU handlers found\n", fwnode);
859 rc = -ENODEV;
860 goto out_local_cleanup;
861 }
862
863 /* Initialize matrix allocator */
864 rc = imsic_matrix_init();
865 if (rc) {
866 pr_err("%pfwP: failed to create matrix allocator\n", fwnode);
867 goto out_local_cleanup;
868 }
869
870 /* We don't need MMIO arrays anymore so let's free-up */
871 kfree(mmios_va);
872 kfree(mmios);
873
874 return 0;
875
876 out_local_cleanup:
877 imsic_local_cleanup();
878 out_iounmap:
879 for (i = 0; i < nr_mmios; i++) {
880 if (mmios_va[i])
881 iounmap(mmios_va[i]);
882 }
883 kfree(mmios_va);
884 kfree(mmios);
885 out_free_local:
886 free_percpu(imsic->global.local);
887 out_free_priv:
888 kfree(imsic);
889 imsic = NULL;
890 return rc;
891 }
892