xref: /linux/drivers/irqchip/irq-mips-gic.c (revision 2ba9268dd603d23e17643437b2246acb6844953b)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8  */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip/mips-gic.h>
15 #include <linux/of_address.h>
16 #include <linux/sched.h>
17 #include <linux/smp.h>
18 
19 #include <asm/mips-cm.h>
20 #include <asm/setup.h>
21 #include <asm/traps.h>
22 
23 #include <dt-bindings/interrupt-controller/mips-gic.h>
24 
25 #include "irqchip.h"
26 
27 unsigned int gic_present;
28 
29 struct gic_pcpu_mask {
30 	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
31 };
32 
33 static void __iomem *gic_base;
34 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
35 static DEFINE_SPINLOCK(gic_lock);
36 static struct irq_domain *gic_irq_domain;
37 static int gic_shared_intrs;
38 static int gic_vpes;
39 static unsigned int gic_cpu_pin;
40 static unsigned int timer_cpu_pin;
41 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
42 
43 static void __gic_irq_dispatch(void);
44 
45 static inline unsigned int gic_read(unsigned int reg)
46 {
47 	return __raw_readl(gic_base + reg);
48 }
49 
50 static inline void gic_write(unsigned int reg, unsigned int val)
51 {
52 	__raw_writel(val, gic_base + reg);
53 }
54 
55 static inline void gic_update_bits(unsigned int reg, unsigned int mask,
56 				   unsigned int val)
57 {
58 	unsigned int regval;
59 
60 	regval = gic_read(reg);
61 	regval &= ~mask;
62 	regval |= val;
63 	gic_write(reg, regval);
64 }
65 
66 static inline void gic_reset_mask(unsigned int intr)
67 {
68 	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
69 		  1 << GIC_INTR_BIT(intr));
70 }
71 
72 static inline void gic_set_mask(unsigned int intr)
73 {
74 	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
75 		  1 << GIC_INTR_BIT(intr));
76 }
77 
78 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
79 {
80 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
81 			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
82 			pol << GIC_INTR_BIT(intr));
83 }
84 
85 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
86 {
87 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
88 			GIC_INTR_OFS(intr), 1 << GIC_INTR_BIT(intr),
89 			trig << GIC_INTR_BIT(intr));
90 }
91 
92 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
93 {
94 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
95 			1 << GIC_INTR_BIT(intr),
96 			dual << GIC_INTR_BIT(intr));
97 }
98 
99 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
100 {
101 	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
102 		  GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
103 }
104 
105 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
106 {
107 	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
108 		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
109 		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
110 }
111 
112 #ifdef CONFIG_CLKSRC_MIPS_GIC
113 cycle_t gic_read_count(void)
114 {
115 	unsigned int hi, hi2, lo;
116 
117 	do {
118 		hi = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
119 		lo = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
120 		hi2 = gic_read(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
121 	} while (hi2 != hi);
122 
123 	return (((cycle_t) hi) << 32) + lo;
124 }
125 
126 unsigned int gic_get_count_width(void)
127 {
128 	unsigned int bits, config;
129 
130 	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
131 	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
132 			 GIC_SH_CONFIG_COUNTBITS_SHF);
133 
134 	return bits;
135 }
136 
137 void gic_write_compare(cycle_t cnt)
138 {
139 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
140 				(int)(cnt >> 32));
141 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
142 				(int)(cnt & 0xffffffff));
143 }
144 
145 void gic_write_cpu_compare(cycle_t cnt, int cpu)
146 {
147 	unsigned long flags;
148 
149 	local_irq_save(flags);
150 
151 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), cpu);
152 	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
153 				(int)(cnt >> 32));
154 	gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
155 				(int)(cnt & 0xffffffff));
156 
157 	local_irq_restore(flags);
158 }
159 
160 cycle_t gic_read_compare(void)
161 {
162 	unsigned int hi, lo;
163 
164 	hi = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
165 	lo = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
166 
167 	return (((cycle_t) hi) << 32) + lo;
168 }
169 #endif
170 
171 static bool gic_local_irq_is_routable(int intr)
172 {
173 	u32 vpe_ctl;
174 
175 	/* All local interrupts are routable in EIC mode. */
176 	if (cpu_has_veic)
177 		return true;
178 
179 	vpe_ctl = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
180 	switch (intr) {
181 	case GIC_LOCAL_INT_TIMER:
182 		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
183 	case GIC_LOCAL_INT_PERFCTR:
184 		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
185 	case GIC_LOCAL_INT_FDC:
186 		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
187 	case GIC_LOCAL_INT_SWINT0:
188 	case GIC_LOCAL_INT_SWINT1:
189 		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
190 	default:
191 		return true;
192 	}
193 }
194 
195 static void gic_bind_eic_interrupt(int irq, int set)
196 {
197 	/* Convert irq vector # to hw int # */
198 	irq -= GIC_PIN_TO_VEC_OFFSET;
199 
200 	/* Set irq to use shadow set */
201 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
202 		  GIC_VPE_EIC_SS(irq), set);
203 }
204 
205 void gic_send_ipi(unsigned int intr)
206 {
207 	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(intr));
208 }
209 
210 int gic_get_c0_compare_int(void)
211 {
212 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
213 		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
214 	return irq_create_mapping(gic_irq_domain,
215 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
216 }
217 
218 int gic_get_c0_perfcount_int(void)
219 {
220 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
221 		/* Is the erformance counter shared with the timer? */
222 		if (cp0_perfcount_irq < 0)
223 			return -1;
224 		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
225 	}
226 	return irq_create_mapping(gic_irq_domain,
227 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
228 }
229 
230 static void gic_handle_shared_int(void)
231 {
232 	unsigned int i, intr, virq;
233 	unsigned long *pcpu_mask;
234 	unsigned long pending_reg, intrmask_reg;
235 	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
236 	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
237 
238 	/* Get per-cpu bitmaps */
239 	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
240 
241 	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
242 	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
243 
244 	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
245 		pending[i] = gic_read(pending_reg);
246 		intrmask[i] = gic_read(intrmask_reg);
247 		pending_reg += 0x4;
248 		intrmask_reg += 0x4;
249 	}
250 
251 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
252 	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
253 
254 	intr = find_first_bit(pending, gic_shared_intrs);
255 	while (intr != gic_shared_intrs) {
256 		virq = irq_linear_revmap(gic_irq_domain,
257 					 GIC_SHARED_TO_HWIRQ(intr));
258 		do_IRQ(virq);
259 
260 		/* go to next pending bit */
261 		bitmap_clear(pending, intr, 1);
262 		intr = find_first_bit(pending, gic_shared_intrs);
263 	}
264 }
265 
266 static void gic_mask_irq(struct irq_data *d)
267 {
268 	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
269 }
270 
271 static void gic_unmask_irq(struct irq_data *d)
272 {
273 	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
274 }
275 
276 static void gic_ack_irq(struct irq_data *d)
277 {
278 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
279 
280 	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
281 }
282 
283 static int gic_set_type(struct irq_data *d, unsigned int type)
284 {
285 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
286 	unsigned long flags;
287 	bool is_edge;
288 
289 	spin_lock_irqsave(&gic_lock, flags);
290 	switch (type & IRQ_TYPE_SENSE_MASK) {
291 	case IRQ_TYPE_EDGE_FALLING:
292 		gic_set_polarity(irq, GIC_POL_NEG);
293 		gic_set_trigger(irq, GIC_TRIG_EDGE);
294 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
295 		is_edge = true;
296 		break;
297 	case IRQ_TYPE_EDGE_RISING:
298 		gic_set_polarity(irq, GIC_POL_POS);
299 		gic_set_trigger(irq, GIC_TRIG_EDGE);
300 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
301 		is_edge = true;
302 		break;
303 	case IRQ_TYPE_EDGE_BOTH:
304 		/* polarity is irrelevant in this case */
305 		gic_set_trigger(irq, GIC_TRIG_EDGE);
306 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
307 		is_edge = true;
308 		break;
309 	case IRQ_TYPE_LEVEL_LOW:
310 		gic_set_polarity(irq, GIC_POL_NEG);
311 		gic_set_trigger(irq, GIC_TRIG_LEVEL);
312 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
313 		is_edge = false;
314 		break;
315 	case IRQ_TYPE_LEVEL_HIGH:
316 	default:
317 		gic_set_polarity(irq, GIC_POL_POS);
318 		gic_set_trigger(irq, GIC_TRIG_LEVEL);
319 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
320 		is_edge = false;
321 		break;
322 	}
323 
324 	if (is_edge) {
325 		__irq_set_chip_handler_name_locked(d->irq,
326 						   &gic_edge_irq_controller,
327 						   handle_edge_irq, NULL);
328 	} else {
329 		__irq_set_chip_handler_name_locked(d->irq,
330 						   &gic_level_irq_controller,
331 						   handle_level_irq, NULL);
332 	}
333 	spin_unlock_irqrestore(&gic_lock, flags);
334 
335 	return 0;
336 }
337 
338 #ifdef CONFIG_SMP
339 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
340 			    bool force)
341 {
342 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
343 	cpumask_t	tmp = CPU_MASK_NONE;
344 	unsigned long	flags;
345 	int		i;
346 
347 	cpumask_and(&tmp, cpumask, cpu_online_mask);
348 	if (cpus_empty(tmp))
349 		return -EINVAL;
350 
351 	/* Assumption : cpumask refers to a single CPU */
352 	spin_lock_irqsave(&gic_lock, flags);
353 
354 	/* Re-route this IRQ */
355 	gic_map_to_vpe(irq, first_cpu(tmp));
356 
357 	/* Update the pcpu_masks */
358 	for (i = 0; i < NR_CPUS; i++)
359 		clear_bit(irq, pcpu_masks[i].pcpu_mask);
360 	set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
361 
362 	cpumask_copy(d->affinity, cpumask);
363 	spin_unlock_irqrestore(&gic_lock, flags);
364 
365 	return IRQ_SET_MASK_OK_NOCOPY;
366 }
367 #endif
368 
369 static struct irq_chip gic_level_irq_controller = {
370 	.name			=	"MIPS GIC",
371 	.irq_mask		=	gic_mask_irq,
372 	.irq_unmask		=	gic_unmask_irq,
373 	.irq_set_type		=	gic_set_type,
374 #ifdef CONFIG_SMP
375 	.irq_set_affinity	=	gic_set_affinity,
376 #endif
377 };
378 
379 static struct irq_chip gic_edge_irq_controller = {
380 	.name			=	"MIPS GIC",
381 	.irq_ack		=	gic_ack_irq,
382 	.irq_mask		=	gic_mask_irq,
383 	.irq_unmask		=	gic_unmask_irq,
384 	.irq_set_type		=	gic_set_type,
385 #ifdef CONFIG_SMP
386 	.irq_set_affinity	=	gic_set_affinity,
387 #endif
388 };
389 
390 static void gic_handle_local_int(void)
391 {
392 	unsigned long pending, masked;
393 	unsigned int intr, virq;
394 
395 	pending = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
396 	masked = gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
397 
398 	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
399 
400 	intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
401 	while (intr != GIC_NUM_LOCAL_INTRS) {
402 		virq = irq_linear_revmap(gic_irq_domain,
403 					 GIC_LOCAL_TO_HWIRQ(intr));
404 		do_IRQ(virq);
405 
406 		/* go to next pending bit */
407 		bitmap_clear(&pending, intr, 1);
408 		intr = find_first_bit(&pending, GIC_NUM_LOCAL_INTRS);
409 	}
410 }
411 
412 static void gic_mask_local_irq(struct irq_data *d)
413 {
414 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
415 
416 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
417 }
418 
419 static void gic_unmask_local_irq(struct irq_data *d)
420 {
421 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
422 
423 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
424 }
425 
426 static struct irq_chip gic_local_irq_controller = {
427 	.name			=	"MIPS GIC Local",
428 	.irq_mask		=	gic_mask_local_irq,
429 	.irq_unmask		=	gic_unmask_local_irq,
430 };
431 
432 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
433 {
434 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
435 	int i;
436 	unsigned long flags;
437 
438 	spin_lock_irqsave(&gic_lock, flags);
439 	for (i = 0; i < gic_vpes; i++) {
440 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
441 		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
442 	}
443 	spin_unlock_irqrestore(&gic_lock, flags);
444 }
445 
446 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
447 {
448 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
449 	int i;
450 	unsigned long flags;
451 
452 	spin_lock_irqsave(&gic_lock, flags);
453 	for (i = 0; i < gic_vpes; i++) {
454 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
455 		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
456 	}
457 	spin_unlock_irqrestore(&gic_lock, flags);
458 }
459 
460 static struct irq_chip gic_all_vpes_local_irq_controller = {
461 	.name			=	"MIPS GIC Local",
462 	.irq_mask		=	gic_mask_local_irq_all_vpes,
463 	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
464 };
465 
466 static void __gic_irq_dispatch(void)
467 {
468 	gic_handle_local_int();
469 	gic_handle_shared_int();
470 }
471 
472 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
473 {
474 	__gic_irq_dispatch();
475 }
476 
477 #ifdef CONFIG_MIPS_GIC_IPI
478 static int gic_resched_int_base;
479 static int gic_call_int_base;
480 
481 unsigned int plat_ipi_resched_int_xlate(unsigned int cpu)
482 {
483 	return gic_resched_int_base + cpu;
484 }
485 
486 unsigned int plat_ipi_call_int_xlate(unsigned int cpu)
487 {
488 	return gic_call_int_base + cpu;
489 }
490 
491 static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
492 {
493 	scheduler_ipi();
494 
495 	return IRQ_HANDLED;
496 }
497 
498 static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
499 {
500 	smp_call_function_interrupt();
501 
502 	return IRQ_HANDLED;
503 }
504 
505 static struct irqaction irq_resched = {
506 	.handler	= ipi_resched_interrupt,
507 	.flags		= IRQF_PERCPU,
508 	.name		= "IPI resched"
509 };
510 
511 static struct irqaction irq_call = {
512 	.handler	= ipi_call_interrupt,
513 	.flags		= IRQF_PERCPU,
514 	.name		= "IPI call"
515 };
516 
517 static __init void gic_ipi_init_one(unsigned int intr, int cpu,
518 				    struct irqaction *action)
519 {
520 	int virq = irq_create_mapping(gic_irq_domain,
521 				      GIC_SHARED_TO_HWIRQ(intr));
522 	int i;
523 
524 	gic_map_to_vpe(intr, cpu);
525 	for (i = 0; i < NR_CPUS; i++)
526 		clear_bit(intr, pcpu_masks[i].pcpu_mask);
527 	set_bit(intr, pcpu_masks[cpu].pcpu_mask);
528 
529 	irq_set_irq_type(virq, IRQ_TYPE_EDGE_RISING);
530 
531 	irq_set_handler(virq, handle_percpu_irq);
532 	setup_irq(virq, action);
533 }
534 
535 static __init void gic_ipi_init(void)
536 {
537 	int i;
538 
539 	/* Use last 2 * NR_CPUS interrupts as IPIs */
540 	gic_resched_int_base = gic_shared_intrs - nr_cpu_ids;
541 	gic_call_int_base = gic_resched_int_base - nr_cpu_ids;
542 
543 	for (i = 0; i < nr_cpu_ids; i++) {
544 		gic_ipi_init_one(gic_call_int_base + i, i, &irq_call);
545 		gic_ipi_init_one(gic_resched_int_base + i, i, &irq_resched);
546 	}
547 }
548 #else
549 static inline void gic_ipi_init(void)
550 {
551 }
552 #endif
553 
554 static void __init gic_basic_init(void)
555 {
556 	unsigned int i;
557 
558 	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
559 
560 	/* Setup defaults */
561 	for (i = 0; i < gic_shared_intrs; i++) {
562 		gic_set_polarity(i, GIC_POL_POS);
563 		gic_set_trigger(i, GIC_TRIG_LEVEL);
564 		gic_reset_mask(i);
565 	}
566 
567 	for (i = 0; i < gic_vpes; i++) {
568 		unsigned int j;
569 
570 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
571 		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
572 			if (!gic_local_irq_is_routable(j))
573 				continue;
574 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
575 		}
576 	}
577 }
578 
579 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
580 				    irq_hw_number_t hw)
581 {
582 	int intr = GIC_HWIRQ_TO_LOCAL(hw);
583 	int ret = 0;
584 	int i;
585 	unsigned long flags;
586 
587 	if (!gic_local_irq_is_routable(intr))
588 		return -EPERM;
589 
590 	/*
591 	 * HACK: These are all really percpu interrupts, but the rest
592 	 * of the MIPS kernel code does not use the percpu IRQ API for
593 	 * the CP0 timer and performance counter interrupts.
594 	 */
595 	if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
596 		irq_set_chip_and_handler(virq,
597 					 &gic_local_irq_controller,
598 					 handle_percpu_devid_irq);
599 		irq_set_percpu_devid(virq);
600 	} else {
601 		irq_set_chip_and_handler(virq,
602 					 &gic_all_vpes_local_irq_controller,
603 					 handle_percpu_irq);
604 	}
605 
606 	spin_lock_irqsave(&gic_lock, flags);
607 	for (i = 0; i < gic_vpes; i++) {
608 		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
609 
610 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), i);
611 
612 		switch (intr) {
613 		case GIC_LOCAL_INT_WD:
614 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
615 			break;
616 		case GIC_LOCAL_INT_COMPARE:
617 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP), val);
618 			break;
619 		case GIC_LOCAL_INT_TIMER:
620 			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
621 			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
622 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP), val);
623 			break;
624 		case GIC_LOCAL_INT_PERFCTR:
625 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP), val);
626 			break;
627 		case GIC_LOCAL_INT_SWINT0:
628 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP), val);
629 			break;
630 		case GIC_LOCAL_INT_SWINT1:
631 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP), val);
632 			break;
633 		case GIC_LOCAL_INT_FDC:
634 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
635 			break;
636 		default:
637 			pr_err("Invalid local IRQ %d\n", intr);
638 			ret = -EINVAL;
639 			break;
640 		}
641 	}
642 	spin_unlock_irqrestore(&gic_lock, flags);
643 
644 	return ret;
645 }
646 
647 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
648 				     irq_hw_number_t hw)
649 {
650 	int intr = GIC_HWIRQ_TO_SHARED(hw);
651 	unsigned long flags;
652 
653 	irq_set_chip_and_handler(virq, &gic_level_irq_controller,
654 				 handle_level_irq);
655 
656 	spin_lock_irqsave(&gic_lock, flags);
657 	gic_map_to_pin(intr, gic_cpu_pin);
658 	/* Map to VPE 0 by default */
659 	gic_map_to_vpe(intr, 0);
660 	set_bit(intr, pcpu_masks[0].pcpu_mask);
661 	spin_unlock_irqrestore(&gic_lock, flags);
662 
663 	return 0;
664 }
665 
666 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
667 			      irq_hw_number_t hw)
668 {
669 	if (GIC_HWIRQ_TO_LOCAL(hw) < GIC_NUM_LOCAL_INTRS)
670 		return gic_local_irq_domain_map(d, virq, hw);
671 	return gic_shared_irq_domain_map(d, virq, hw);
672 }
673 
674 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
675 				const u32 *intspec, unsigned int intsize,
676 				irq_hw_number_t *out_hwirq,
677 				unsigned int *out_type)
678 {
679 	if (intsize != 3)
680 		return -EINVAL;
681 
682 	if (intspec[0] == GIC_SHARED)
683 		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
684 	else if (intspec[0] == GIC_LOCAL)
685 		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
686 	else
687 		return -EINVAL;
688 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
689 
690 	return 0;
691 }
692 
693 static struct irq_domain_ops gic_irq_domain_ops = {
694 	.map = gic_irq_domain_map,
695 	.xlate = gic_irq_domain_xlate,
696 };
697 
698 static void __init __gic_init(unsigned long gic_base_addr,
699 			      unsigned long gic_addrspace_size,
700 			      unsigned int cpu_vec, unsigned int irqbase,
701 			      struct device_node *node)
702 {
703 	unsigned int gicconfig;
704 
705 	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
706 
707 	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
708 	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
709 		   GIC_SH_CONFIG_NUMINTRS_SHF;
710 	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
711 
712 	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
713 		  GIC_SH_CONFIG_NUMVPES_SHF;
714 	gic_vpes = gic_vpes + 1;
715 
716 	if (cpu_has_veic) {
717 		/* Always use vector 1 in EIC mode */
718 		gic_cpu_pin = 0;
719 		timer_cpu_pin = gic_cpu_pin;
720 		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
721 			       __gic_irq_dispatch);
722 	} else {
723 		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
724 		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
725 					gic_irq_dispatch);
726 		/*
727 		 * With the CMP implementation of SMP (deprecated), other CPUs
728 		 * are started by the bootloader and put into a timer based
729 		 * waiting poll loop. We must not re-route those CPU's local
730 		 * timer interrupts as the wait instruction will never finish,
731 		 * so just handle whatever CPU interrupt it is routed to by
732 		 * default.
733 		 *
734 		 * This workaround should be removed when CMP support is
735 		 * dropped.
736 		 */
737 		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
738 		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
739 			timer_cpu_pin = gic_read(GIC_REG(VPE_LOCAL,
740 							 GIC_VPE_TIMER_MAP)) &
741 					GIC_MAP_MSK;
742 			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
743 						GIC_CPU_PIN_OFFSET +
744 						timer_cpu_pin,
745 						gic_irq_dispatch);
746 		} else {
747 			timer_cpu_pin = gic_cpu_pin;
748 		}
749 	}
750 
751 	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
752 					       gic_shared_intrs, irqbase,
753 					       &gic_irq_domain_ops, NULL);
754 	if (!gic_irq_domain)
755 		panic("Failed to add GIC IRQ domain");
756 
757 	gic_basic_init();
758 
759 	gic_ipi_init();
760 }
761 
762 void __init gic_init(unsigned long gic_base_addr,
763 		     unsigned long gic_addrspace_size,
764 		     unsigned int cpu_vec, unsigned int irqbase)
765 {
766 	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
767 }
768 
769 static int __init gic_of_init(struct device_node *node,
770 			      struct device_node *parent)
771 {
772 	struct resource res;
773 	unsigned int cpu_vec, i = 0, reserved = 0;
774 	phys_addr_t gic_base;
775 	size_t gic_len;
776 
777 	/* Find the first available CPU vector. */
778 	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
779 					   i++, &cpu_vec))
780 		reserved |= BIT(cpu_vec);
781 	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
782 		if (!(reserved & BIT(cpu_vec)))
783 			break;
784 	}
785 	if (cpu_vec == 8) {
786 		pr_err("No CPU vectors available for GIC\n");
787 		return -ENODEV;
788 	}
789 
790 	if (of_address_to_resource(node, 0, &res)) {
791 		/*
792 		 * Probe the CM for the GIC base address if not specified
793 		 * in the device-tree.
794 		 */
795 		if (mips_cm_present()) {
796 			gic_base = read_gcr_gic_base() &
797 				~CM_GCR_GIC_BASE_GICEN_MSK;
798 			gic_len = 0x20000;
799 		} else {
800 			pr_err("Failed to get GIC memory range\n");
801 			return -ENODEV;
802 		}
803 	} else {
804 		gic_base = res.start;
805 		gic_len = resource_size(&res);
806 	}
807 
808 	if (mips_cm_present())
809 		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
810 	gic_present = true;
811 
812 	__gic_init(gic_base, gic_len, cpu_vec, 0, node);
813 
814 	return 0;
815 }
816 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
817