xref: /linux/drivers/irqchip/irq-mips-gic.c (revision 9d796e66230205cd3366f5660387bd9ecca9d336)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8  */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip/mips-gic.h>
15 #include <linux/of_address.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 
19 #include <asm/mips-cm.h>
20 #include <asm/setup.h>
21 #include <asm/traps.h>
22 
23 #include <dt-bindings/interrupt-controller/mips-gic.h>
24 
25 #include "irqchip.h"
26 
27 unsigned int gic_present;
28 
29 struct gic_pcpu_mask {
30 	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 };
32 
33 static void __iomem *gic_base;
34 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35 static DEFINE_SPINLOCK(gic_lock);
36 static struct irq_domain *gic_irq_domain;
37 static int gic_shared_intrs;
38 static int gic_vpes;
39 static unsigned int gic_cpu_pin;
40 static unsigned int timer_cpu_pin;
41 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
42 
43 static void __gic_irq_dispatch(void);
44 
45 static inline unsigned int gic_read(unsigned int reg)
46 {
47 	return __raw_readl(gic_base + reg);
48 }
49 
50 static inline void gic_write(unsigned int reg, unsigned int val)
51 {
52 	__raw_writel(val, gic_base + reg);
53 }
54 
55 static inline void gic_update_bits(unsigned int reg, unsigned int mask,
56 				   unsigned int val)
57 {
58 	unsigned int regval;
59 
60 	regval = gic_read(reg);
61 	regval &= ~mask;
62 	regval |= val;
63 	gic_write(reg, regval);
64 }
65 
66 static inline void gic_reset_mask(unsigned int intr)
67 {
68 	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
69 		  1 << GIC_INTR_BIT(intr));
70 }
71 
72 static inline void gic_set_mask(unsigned int intr)
73 {
74 	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
75 		  1 << GIC_INTR_BIT(intr));
76 }
77 
78 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
79 {
80 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
81 			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
82 			pol << GIC_INTR_BIT(intr));
83 }
84 
85 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
86 {
87 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
88 			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
89 			trig << GIC_INTR_BIT(intr));
90 }
91 
92 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
93 {
94 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
95 			1 << GIC_INTR_BIT(intr),
96 			dual << GIC_INTR_BIT(intr));
97 }
98 
99 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
100 {
101 	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
102 		  GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
103 }
104 
105 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
106 {
107 	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
108 		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
109 		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
110 }
111 
112 #ifdef CONFIG_CLKSRC_MIPS_GIC
113 cycle_t gic_read_count(void)
114 {
115 	unsigned int hi, hi2, lo;
116 
117 	do {
118 		hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
119 		lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
120 		hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
121 	} while (hi2 != hi);
122 
123 	return (((cycle_t) hi) << 32) + lo;
124 }
125 
126 unsigned int gic_get_count_width(void)
127 {
128 	unsigned int bits, config;
129 
130 	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
131 	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
132 			 GIC_SH_CONFIG_COUNTBITS_SHF);
133 
134 	return bits;
135 }
136 
137 void gic_write_compare(cycle_t cnt)
138 {
139 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
140 				(int)(cnt >> 32));
141 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
142 				(int)(cnt & 0xffffffff));
143 }
144 
145 void gic_write_cpu_compare(cycle_t cnt, int cpu)
146 {
147 	unsigned long flags;
148 
149 	local_irq_save(flags);
150 
151 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
152 	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
153 				(int)(cnt >> 32));
154 	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
155 				(int)(cnt & 0xffffffff));
156 
157 	local_irq_restore(flags);
158 }
159 
160 cycle_t gic_read_compare(void)
161 {
162 	unsigned int hi, lo;
163 
164 	hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
165 	lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
166 
167 	return (((cycle_t) hi) << 32) + lo;
168 }
169 
170 void gic_start_count(void)
171 {
172 	u32 gicconfig;
173 
174 	/* Start the counter */
175 	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
176 	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
177 	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
178 }
179 
180 void gic_stop_count(void)
181 {
182 	u32 gicconfig;
183 
184 	/* Stop the counter */
185 	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
186 	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
187 	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
188 }
189 
190 #endif
191 
192 static bool gic_local_irq_is_routable(int intr)
193 {
194 	u32 vpe_ctl;
195 
196 	/* All local interrupts are routable in EIC mode. */
197 	if (cpu_has_veic)
198 		return true;
199 
200 	vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
201 	switch (intr) {
202 	case GIC_LOCAL_INT_TIMER:
203 		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
204 	case GIC_LOCAL_INT_PERFCTR:
205 		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
206 	case GIC_LOCAL_INT_FDC:
207 		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
208 	case GIC_LOCAL_INT_SWINT0:
209 	case GIC_LOCAL_INT_SWINT1:
210 		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
211 	default:
212 		return true;
213 	}
214 }
215 
216 static void gic_bind_eic_interrupt(int irq, int set)
217 {
218 	/* Convert irq vector # to hw int # */
219 	irq -= GIC_PIN_TO_VEC_OFFSET;
220 
221 	/* Set irq to use shadow set */
222 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
223 		  GIC_VPE_EIC_SS(irq), set);
224 }
225 
226 void gic_send_ipi(unsigned int intr)
227 {
228 	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
229 }
230 
231 int gic_get_c0_compare_int(void)
232 {
233 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
234 		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
235 	return irq_create_mapping(gic_irq_domain,
236 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
237 }
238 
239 int gic_get_c0_perfcount_int(void)
240 {
241 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
242 		/* Is the erformance counter shared with the timer? */
243 		if (cp0_perfcount_irq < 0)
244 			return -1;
245 		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
246 	}
247 	return irq_create_mapping(gic_irq_domain,
248 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
249 }
250 
251 static void gic_handle_shared_int(void)
252 {
253 	unsigned int i, intr, virq;
254 	unsigned long *pcpu_mask;
255 	unsigned long pending_reg, intrmask_reg;
256 	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
257 	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
258 
259 	/* Get per-cpu bitmaps */
260 	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
261 
262 	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
263 	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
264 
265 	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
266 		pending[i] = gic_read(pending_reg);
267 		intrmask[i] = gic_read(intrmask_reg);
268 		pending_reg += 0x4;
269 		intrmask_reg += 0x4;
270 	}
271 
272 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
273 	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
274 
275 	intr = find_first_bit(pending, gic_shared_intrs);
276 	while (intr != gic_shared_intrs) {
277 		virq = irq_linear_revmap(gic_irq_domain,
278 					 GIC_SHARED_TO_HWIRQ(intr));
279 		do_IRQ(virq);
280 
281 		/* go to next pending bit */
282 		bitmap_clear(pending, intr, 1);
283 		intr = find_first_bit(pending, gic_shared_intrs);
284 	}
285 }
286 
287 static void gic_mask_irq(struct irq_data *d)
288 {
289 	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
290 }
291 
292 static void gic_unmask_irq(struct irq_data *d)
293 {
294 	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
295 }
296 
297 static void gic_ack_irq(struct irq_data *d)
298 {
299 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
300 
301 	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
302 }
303 
304 static int gic_set_type(struct irq_data *d, unsigned int type)
305 {
306 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
307 	unsigned long flags;
308 	bool is_edge;
309 
310 	spin_lock_irqsave(&gic_lock, flags);
311 	switch (type & IRQ_TYPE_SENSE_MASK) {
312 	case IRQ_TYPE_EDGE_FALLING:
313 		gic_set_polarity(irq, GIC_POL_NEG);
314 		gic_set_trigger(irq, GIC_TRIG_EDGE);
315 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
316 		is_edge = true;
317 		break;
318 	case IRQ_TYPE_EDGE_RISING:
319 		gic_set_polarity(irq, GIC_POL_POS);
320 		gic_set_trigger(irq, GIC_TRIG_EDGE);
321 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
322 		is_edge = true;
323 		break;
324 	case IRQ_TYPE_EDGE_BOTH:
325 		/* polarity is irrelevant in this case */
326 		gic_set_trigger(irq, GIC_TRIG_EDGE);
327 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
328 		is_edge = true;
329 		break;
330 	case IRQ_TYPE_LEVEL_LOW:
331 		gic_set_polarity(irq, GIC_POL_NEG);
332 		gic_set_trigger(irq, GIC_TRIG_LEVEL);
333 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
334 		is_edge = false;
335 		break;
336 	case IRQ_TYPE_LEVEL_HIGH:
337 	default:
338 		gic_set_polarity(irq, GIC_POL_POS);
339 		gic_set_trigger(irq, GIC_TRIG_LEVEL);
340 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
341 		is_edge = false;
342 		break;
343 	}
344 
345 	if (is_edge) {
346 		__irq_set_chip_handler_name_locked(d->irq,
347 						   &gic_edge_irq_controller,
348 						   handle_edge_irq, NULL);
349 	} else {
350 		__irq_set_chip_handler_name_locked(d->irq,
351 						   &gic_level_irq_controller,
352 						   handle_level_irq, NULL);
353 	}
354 	spin_unlock_irqrestore(&gic_lock, flags);
355 
356 	return 0;
357 }
358 
359 #ifdef CONFIG_SMP
360 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
361 			    bool force)
362 {
363 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
364 	cpumask_t	tmp = CPU_MASK_NONE;
365 	unsigned long	flags;
366 	int		i;
367 
368 	cpumask_and(&tmp, cpumask, cpu_online_mask);
369 	if (cpus_empty(tmp))
370 		return -EINVAL;
371 
372 	/* Assumption : cpumask refers to a single CPU */
373 	spin_lock_irqsave(&gic_lock, flags);
374 
375 	/* Re-route this IRQ */
376 	gic_map_to_vpe(irq, first_cpu(tmp));
377 
378 	/* Update the pcpu_masks */
379 	for (i = 0; i < NR_CPUS; i++)
380 		clear_bit(irq, pcpu_masks[i].pcpu_mask);
381 	set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
382 
383 	cpumask_copy(d->affinity, cpumask);
384 	spin_unlock_irqrestore(&gic_lock, flags);
385 
386 	return IRQ_SET_MASK_OK_NOCOPY;
387 }
388 #endif
389 
390 static struct irq_chip gic_level_irq_controller = {
391 	.name			=	"MIPS GIC",
392 	.irq_mask		=	gic_mask_irq,
393 	.irq_unmask		=	gic_unmask_irq,
394 	.irq_set_type		=	gic_set_type,
395 #ifdef CONFIG_SMP
396 	.irq_set_affinity	=	gic_set_affinity,
397 #endif
398 };
399 
400 static struct irq_chip gic_edge_irq_controller = {
401 	.name			=	"MIPS GIC",
402 	.irq_ack		=	gic_ack_irq,
403 	.irq_mask		=	gic_mask_irq,
404 	.irq_unmask		=	gic_unmask_irq,
405 	.irq_set_type		=	gic_set_type,
406 #ifdef CONFIG_SMP
407 	.irq_set_affinity	=	gic_set_affinity,
408 #endif
409 };
410 
411 static void gic_handle_local_int(void)
412 {
413 	unsigned long pending, masked;
414 	unsigned int intr, virq;
415 
416 	pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
417 	masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
418 
419 	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
420 
421 	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
422 	while (intr != GIC_NUM_LOCAL_INTRS) {
423 		virq = irq_linear_revmap(gic_irq_domain,
424 					 GIC_LOCAL_TO_HWIRQ(intr));
425 		do_IRQ(virq);
426 
427 		/* go to next pending bit */
428 		bitmap_clear(&pending, intr, 1);
429 		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
430 	}
431 }
432 
433 static void gic_mask_local_irq(struct irq_data *d)
434 {
435 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
436 
437 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
438 }
439 
440 static void gic_unmask_local_irq(struct irq_data *d)
441 {
442 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
443 
444 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
445 }
446 
447 static struct irq_chip gic_local_irq_controller = {
448 	.name			=	"MIPS GIC Local",
449 	.irq_mask		=	gic_mask_local_irq,
450 	.irq_unmask		=	gic_unmask_local_irq,
451 };
452 
453 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
454 {
455 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
456 	int i;
457 	unsigned long flags;
458 
459 	spin_lock_irqsave(&gic_lock, flags);
460 	for (i = 0; i < gic_vpes; i++) {
461 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
462 		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
463 	}
464 	spin_unlock_irqrestore(&gic_lock, flags);
465 }
466 
467 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
468 {
469 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
470 	int i;
471 	unsigned long flags;
472 
473 	spin_lock_irqsave(&gic_lock, flags);
474 	for (i = 0; i < gic_vpes; i++) {
475 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
476 		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
477 	}
478 	spin_unlock_irqrestore(&gic_lock, flags);
479 }
480 
481 static struct irq_chip gic_all_vpes_local_irq_controller = {
482 	.name			=	"MIPS GIC Local",
483 	.irq_mask		=	gic_mask_local_irq_all_vpes,
484 	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
485 };
486 
487 static void __gic_irq_dispatch(void)
488 {
489 	gic_handle_local_int();
490 	gic_handle_shared_int();
491 }
492 
493 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
494 {
495 	__gic_irq_dispatch();
496 }
497 
498 #ifdef CONFIG_MIPS_GIC_IPI
499 static int gic_resched_int_base;
500 static int gic_call_int_base;
501 
502 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
503 {
504 	return gic_resched_int_base + cpu;
505 }
506 
507 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
508 {
509 	return gic_call_int_base + cpu;
510 }
511 
512 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
513 {
514 	scheduler_ipi();
515 
516 	return IRQ_HANDLED;
517 }
518 
519 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
520 {
521 	smp_call_function_interrupt();
522 
523 	return IRQ_HANDLED;
524 }
525 
526 static struct irqaction irq_resched = {
527 	.handler	= ipi_resched_interrupt,
528 	.flags		= IRQF_PERCPU,
529 	.name		= "IPI resched"
530 };
531 
532 static struct irqaction irq_call = {
533 	.handler	= ipi_call_interrupt,
534 	.flags		= IRQF_PERCPU,
535 	.name		= "IPI call"
536 };
537 
538 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
539 				    struct irqaction *action)
540 {
541 	int virq = irq_create_mapping(gic_irq_domain,
542 				      GIC_SHARED_TO_HWIRQ(intr));
543 	int i;
544 
545 	gic_map_to_vpe(intr, cpu);
546 	for (i = 0; i < NR_CPUS; i++)
547 		clear_bit(intr, pcpu_masks[i].pcpu_mask);
548 	set_bit(intr, pcpu_masks[cpu].pcpu_mask);
549 
550 	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
551 
552 	irq_set_handler(virq, handle_percpu_irq);
553 	setup_irq(virq, action);
554 }
555 
556 static __init void gic_ipi_init(void)
557 {
558 	int i;
559 
560 	/* Use last 2 * NR_CPUS interrupts as IPIs */
561 	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
562 	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
563 
564 	for (i = 0; i < nr_cpu_ids; i++) {
565 		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
566 		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
567 	}
568 }
569 #else
570 static inline void gic_ipi_init(void)
571 {
572 }
573 #endif
574 
575 static void __init gic_basic_init(void)
576 {
577 	unsigned int i;
578 
579 	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
580 
581 	/* Setup defaults */
582 	for (i = 0; i < gic_shared_intrs; i++) {
583 		gic_set_polarity(i, GIC_POL_POS);
584 		gic_set_trigger(i, GIC_TRIG_LEVEL);
585 		gic_reset_mask(i);
586 	}
587 
588 	for (i = 0; i < gic_vpes; i++) {
589 		unsigned int j;
590 
591 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
592 		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
593 			if (!gic_local_irq_is_routable(j))
594 				continue;
595 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
596 		}
597 	}
598 }
599 
600 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
601 				    irq_hw_number_t hw)
602 {
603 	int intr = GIC_HWIRQ_TO_LOCAL(hw);
604 	int ret = 0;
605 	int i;
606 	unsigned long flags;
607 
608 	if (!gic_local_irq_is_routable(intr))
609 		return -EPERM;
610 
611 	/*
612 	 * HACK: These are all really percpu interrupts, but the rest
613 	 * of the MIPS kernel code does not use the percpu IRQ API for
614 	 * the CP0 timer and performance counter interrupts.
615 	 */
616 	if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
617 		irq_set_chip_and_handler(virq,
618 					 &gic_local_irq_controller,
619 					 handle_percpu_devid_irq);
620 		irq_set_percpu_devid(virq);
621 	} else {
622 		irq_set_chip_and_handler(virq,
623 					 &gic_all_vpes_local_irq_controller,
624 					 handle_percpu_irq);
625 	}
626 
627 	spin_lock_irqsave(&gic_lock, flags);
628 	for (i = 0; i < gic_vpes; i++) {
629 		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
630 
631 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
632 
633 		switch (intr) {
634 		case GIC_LOCAL_INT_WD:
635 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
636 			break;
637 		case GIC_LOCAL_INT_COMPARE:
638 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
639 			break;
640 		case GIC_LOCAL_INT_TIMER:
641 			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
642 			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
643 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
644 			break;
645 		case GIC_LOCAL_INT_PERFCTR:
646 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
647 			break;
648 		case GIC_LOCAL_INT_SWINT0:
649 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
650 			break;
651 		case GIC_LOCAL_INT_SWINT1:
652 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
653 			break;
654 		case GIC_LOCAL_INT_FDC:
655 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
656 			break;
657 		default:
658 			pr_err("Invalid local IRQ %d\n", intr);
659 			ret = -EINVAL;
660 			break;
661 		}
662 	}
663 	spin_unlock_irqrestore(&gic_lock, flags);
664 
665 	return ret;
666 }
667 
668 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
669 				     irq_hw_number_t hw)
670 {
671 	int intr = GIC_HWIRQ_TO_SHARED(hw);
672 	unsigned long flags;
673 
674 	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
675 				 handle_level_irq);
676 
677 	spin_lock_irqsave(&gic_lock, flags);
678 	gic_map_to_pin(intr, gic_cpu_pin);
679 	/* Map to VPE 0 by default */
680 	gic_map_to_vpe(intr, 0);
681 	set_bit(intr, pcpu_masks[0].pcpu_mask);
682 	spin_unlock_irqrestore(&gic_lock, flags);
683 
684 	return 0;
685 }
686 
687 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
688 			      irq_hw_number_t hw)
689 {
690 	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
691 		return gic_local_irq_domain_map(d, virq, hw);
692 	return gic_shared_irq_domain_map(d, virq, hw);
693 }
694 
695 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
696 				const u32 *intspec, unsigned int intsize,
697 				irq_hw_number_t *out_hwirq,
698 				unsigned int *out_type)
699 {
700 	if (intsize != 3)
701 		return -EINVAL;
702 
703 	if (intspec[0] == GIC_SHARED)
704 		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
705 	else if (intspec[0] == GIC_LOCAL)
706 		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
707 	else
708 		return -EINVAL;
709 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
710 
711 	return 0;
712 }
713 
714 static struct irq_domain_ops gic_irq_domain_ops = {
715 	.map = gic_irq_domain_map,
716 	.xlate = gic_irq_domain_xlate,
717 };
718 
719 static void __init __gic_init(unsigned long gic_base_addr,
720 			      unsigned long gic_addrspace_size,
721 			      unsigned int cpu_vec, unsigned int irqbase,
722 			      struct device_node *node)
723 {
724 	unsigned int gicconfig;
725 
726 	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
727 
728 	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
729 	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
730 		   GIC_SH_CONFIG_NUMINTRS_SHF;
731 	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
732 
733 	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
734 		  GIC_SH_CONFIG_NUMVPES_SHF;
735 	gic_vpes = gic_vpes + 1;
736 
737 	if (cpu_has_veic) {
738 		/* Always use vector 1 in EIC mode */
739 		gic_cpu_pin = 0;
740 		timer_cpu_pin = gic_cpu_pin;
741 		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
742 			       __gic_irq_dispatch);
743 	} else {
744 		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
745 		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
746 					gic_irq_dispatch);
747 		/*
748 		 * With the CMP implementation of SMP (deprecated), other CPUs
749 		 * are started by the bootloader and put into a timer based
750 		 * waiting poll loop. We must not re-route those CPU's local
751 		 * timer interrupts as the wait instruction will never finish,
752 		 * so just handle whatever CPU interrupt it is routed to by
753 		 * default.
754 		 *
755 		 * This workaround should be removed when CMP support is
756 		 * dropped.
757 		 */
758 		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
759 		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
760 			timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
761 							 GIC_VPE_TIMER_MAP)) &
762 					GIC_MAP_MSK;
763 			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
764 						GIC_CPU_PIN_OFFSET +
765 						timer_cpu_pin,
766 						gic_irq_dispatch);
767 		} else {
768 			timer_cpu_pin = gic_cpu_pin;
769 		}
770 	}
771 
772 	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
773 					       gic_shared_intrs, irqbase,
774 					       &gic_irq_domain_ops, NULL);
775 	if (!gic_irq_domain)
776 		panic("Failed to add GIC IRQ domain");
777 
778 	gic_basic_init();
779 
780 	gic_ipi_init();
781 }
782 
783 void __init gic_init(unsigned long gic_base_addr,
784 		     unsigned long gic_addrspace_size,
785 		     unsigned int cpu_vec, unsigned int irqbase)
786 {
787 	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
788 }
789 
790 static int __init gic_of_init(struct device_node *node,
791 			      struct device_node *parent)
792 {
793 	struct resource res;
794 	unsigned int cpu_vec, i = 0, reserved = 0;
795 	phys_addr_t gic_base;
796 	size_t gic_len;
797 
798 	/* Find the first available CPU vector. */
799 	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
800 					   i++, &cpu_vec))
801 		reserved |= BIT(cpu_vec);
802 	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
803 		if (!(reserved & BIT(cpu_vec)))
804 			break;
805 	}
806 	if (cpu_vec == 8) {
807 		pr_err("No CPU vectors available for GIC\n");
808 		return -ENODEV;
809 	}
810 
811 	if (of_address_to_resource(node, 0, &res)) {
812 		/*
813 		 * Probe the CM for the GIC base address if not specified
814 		 * in the device-tree.
815 		 */
816 		if (mips_cm_present()) {
817 			gic_base = read_gcr_gic_base() &
818 				~CM_GCR_GIC_BASE_GICEN_MSK;
819 			gic_len = 0x20000;
820 		} else {
821 			pr_err("Failed to get GIC memory range\n");
822 			return -ENODEV;
823 		}
824 	} else {
825 		gic_base = res.start;
826 		gic_len = resource_size(&res);
827 	}
828 
829 	if (mips_cm_present())
830 		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
831 	gic_present = true;
832 
833 	__gic_init(gic_base, gic_len, cpu_vec, 0, node);
834 
835 	return 0;
836 }
837 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
838