xref: /linux/drivers/irqchip/irq-mips-gic.c (revision 6faadbbb7f9da70ce484f98f72223c20125a1009)
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * Copyright (C) 2008 Ralf Baechle (ralf@linux-mips.org)
7  * Copyright (C) 2012 MIPS Technologies, Inc.  All rights reserved.
8  */
9 #include <linux/bitmap.h>
10 #include <linux/clocksource.h>
11 #include <linux/init.h>
12 #include <linux/interrupt.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/mips-gic.h>
16 #include <linux/of_address.h>
17 #include <linux/sched.h>
18 #include <linux/smp.h>
19 
20 #include <asm/mips-cm.h>
21 #include <asm/setup.h>
22 #include <asm/traps.h>
23 
24 #include <dt-bindings/interrupt-controller/mips-gic.h>
25 
26 unsigned int gic_present;
27 
28 struct gic_pcpu_mask {
29 	DECLARE_BITMAP(pcpu_mask, GIC_MAX_INTRS);
30 };
31 
32 static unsigned long __gic_base_addr;
33 
34 static void __iomem *gic_base;
35 static struct gic_pcpu_mask pcpu_masks[NR_CPUS];
36 static DEFINE_SPINLOCK(gic_lock);
37 static struct irq_domain *gic_irq_domain;
38 static struct irq_domain *gic_ipi_domain;
39 static int gic_shared_intrs;
40 static int gic_vpes;
41 static unsigned int gic_cpu_pin;
42 static unsigned int timer_cpu_pin;
43 static struct irq_chip gic_level_irq_controller, gic_edge_irq_controller;
44 DECLARE_BITMAP(ipi_resrv, GIC_MAX_INTRS);
45 DECLARE_BITMAP(ipi_available, GIC_MAX_INTRS);
46 
47 static void __gic_irq_dispatch(void);
48 
49 static inline u32 gic_read32(unsigned int reg)
50 {
51 	return __raw_readl(gic_base + reg);
52 }
53 
54 static inline u64 gic_read64(unsigned int reg)
55 {
56 	return __raw_readq(gic_base + reg);
57 }
58 
59 static inline unsigned long gic_read(unsigned int reg)
60 {
61 	if (!mips_cm_is64)
62 		return gic_read32(reg);
63 	else
64 		return gic_read64(reg);
65 }
66 
67 static inline void gic_write32(unsigned int reg, u32 val)
68 {
69 	return __raw_writel(val, gic_base + reg);
70 }
71 
72 static inline void gic_write64(unsigned int reg, u64 val)
73 {
74 	return __raw_writeq(val, gic_base + reg);
75 }
76 
77 static inline void gic_write(unsigned int reg, unsigned long val)
78 {
79 	if (!mips_cm_is64)
80 		return gic_write32(reg, (u32)val);
81 	else
82 		return gic_write64(reg, (u64)val);
83 }
84 
85 static inline void gic_update_bits(unsigned int reg, unsigned long mask,
86 				   unsigned long val)
87 {
88 	unsigned long regval;
89 
90 	regval = gic_read(reg);
91 	regval &= ~mask;
92 	regval |= val;
93 	gic_write(reg, regval);
94 }
95 
96 static inline void gic_reset_mask(unsigned int intr)
97 {
98 	gic_write(GIC_REG(SHARED, GIC_SH_RMASK) + GIC_INTR_OFS(intr),
99 		  1ul << GIC_INTR_BIT(intr));
100 }
101 
102 static inline void gic_set_mask(unsigned int intr)
103 {
104 	gic_write(GIC_REG(SHARED, GIC_SH_SMASK) + GIC_INTR_OFS(intr),
105 		  1ul << GIC_INTR_BIT(intr));
106 }
107 
108 static inline void gic_set_polarity(unsigned int intr, unsigned int pol)
109 {
110 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_POLARITY) +
111 			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
112 			(unsigned long)pol << GIC_INTR_BIT(intr));
113 }
114 
115 static inline void gic_set_trigger(unsigned int intr, unsigned int trig)
116 {
117 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_TRIGGER) +
118 			GIC_INTR_OFS(intr), 1ul << GIC_INTR_BIT(intr),
119 			(unsigned long)trig << GIC_INTR_BIT(intr));
120 }
121 
122 static inline void gic_set_dual_edge(unsigned int intr, unsigned int dual)
123 {
124 	gic_update_bits(GIC_REG(SHARED, GIC_SH_SET_DUAL) + GIC_INTR_OFS(intr),
125 			1ul << GIC_INTR_BIT(intr),
126 			(unsigned long)dual << GIC_INTR_BIT(intr));
127 }
128 
129 static inline void gic_map_to_pin(unsigned int intr, unsigned int pin)
130 {
131 	gic_write32(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_PIN_BASE) +
132 		    GIC_SH_MAP_TO_PIN(intr), GIC_MAP_TO_PIN_MSK | pin);
133 }
134 
135 static inline void gic_map_to_vpe(unsigned int intr, unsigned int vpe)
136 {
137 	gic_write(GIC_REG(SHARED, GIC_SH_INTR_MAP_TO_VPE_BASE) +
138 		  GIC_SH_MAP_TO_VPE_REG_OFF(intr, vpe),
139 		  GIC_SH_MAP_TO_VPE_REG_BIT(vpe));
140 }
141 
142 #ifdef CONFIG_CLKSRC_MIPS_GIC
143 u64 notrace gic_read_count(void)
144 {
145 	unsigned int hi, hi2, lo;
146 
147 	if (mips_cm_is64)
148 		return (u64)gic_read(GIC_REG(SHARED, GIC_SH_COUNTER));
149 
150 	do {
151 		hi = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
152 		lo = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_31_00));
153 		hi2 = gic_read32(GIC_REG(SHARED, GIC_SH_COUNTER_63_32));
154 	} while (hi2 != hi);
155 
156 	return (((u64) hi) << 32) + lo;
157 }
158 
159 unsigned int gic_get_count_width(void)
160 {
161 	unsigned int bits, config;
162 
163 	config = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
164 	bits = 32 + 4 * ((config & GIC_SH_CONFIG_COUNTBITS_MSK) >>
165 			 GIC_SH_CONFIG_COUNTBITS_SHF);
166 
167 	return bits;
168 }
169 
170 void notrace gic_write_compare(u64 cnt)
171 {
172 	if (mips_cm_is64) {
173 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE), cnt);
174 	} else {
175 		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI),
176 					(int)(cnt >> 32));
177 		gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO),
178 					(int)(cnt & 0xffffffff));
179 	}
180 }
181 
182 void notrace gic_write_cpu_compare(u64 cnt, int cpu)
183 {
184 	unsigned long flags;
185 
186 	local_irq_save(flags);
187 
188 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR), mips_cm_vp_id(cpu));
189 
190 	if (mips_cm_is64) {
191 		gic_write(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE), cnt);
192 	} else {
193 		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_HI),
194 					(int)(cnt >> 32));
195 		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_LO),
196 					(int)(cnt & 0xffffffff));
197 	}
198 
199 	local_irq_restore(flags);
200 }
201 
202 u64 gic_read_compare(void)
203 {
204 	unsigned int hi, lo;
205 
206 	if (mips_cm_is64)
207 		return (u64)gic_read(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE));
208 
209 	hi = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_HI));
210 	lo = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_COMPARE_LO));
211 
212 	return (((u64) hi) << 32) + lo;
213 }
214 
215 void gic_start_count(void)
216 {
217 	u32 gicconfig;
218 
219 	/* Start the counter */
220 	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
221 	gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
222 	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
223 }
224 
225 void gic_stop_count(void)
226 {
227 	u32 gicconfig;
228 
229 	/* Stop the counter */
230 	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
231 	gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
232 	gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
233 }
234 
235 #endif
236 
237 unsigned gic_read_local_vp_id(void)
238 {
239 	unsigned long ident;
240 
241 	ident = gic_read(GIC_REG(VPE_LOCAL, GIC_VP_IDENT));
242 	return ident & GIC_VP_IDENT_VCNUM_MSK;
243 }
244 
245 static bool gic_local_irq_is_routable(int intr)
246 {
247 	u32 vpe_ctl;
248 
249 	/* All local interrupts are routable in EIC mode. */
250 	if (cpu_has_veic)
251 		return true;
252 
253 	vpe_ctl = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_CTL));
254 	switch (intr) {
255 	case GIC_LOCAL_INT_TIMER:
256 		return vpe_ctl & GIC_VPE_CTL_TIMER_RTBL_MSK;
257 	case GIC_LOCAL_INT_PERFCTR:
258 		return vpe_ctl & GIC_VPE_CTL_PERFCNT_RTBL_MSK;
259 	case GIC_LOCAL_INT_FDC:
260 		return vpe_ctl & GIC_VPE_CTL_FDC_RTBL_MSK;
261 	case GIC_LOCAL_INT_SWINT0:
262 	case GIC_LOCAL_INT_SWINT1:
263 		return vpe_ctl & GIC_VPE_CTL_SWINT_RTBL_MSK;
264 	default:
265 		return true;
266 	}
267 }
268 
269 static void gic_bind_eic_interrupt(int irq, int set)
270 {
271 	/* Convert irq vector # to hw int # */
272 	irq -= GIC_PIN_TO_VEC_OFFSET;
273 
274 	/* Set irq to use shadow set */
275 	gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_EIC_SHADOW_SET_BASE) +
276 		  GIC_VPE_EIC_SS(irq), set);
277 }
278 
279 static void gic_send_ipi(struct irq_data *d, unsigned int cpu)
280 {
281 	irq_hw_number_t hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(d));
282 
283 	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_SET(hwirq));
284 }
285 
286 int gic_get_c0_compare_int(void)
287 {
288 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER))
289 		return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
290 	return irq_create_mapping(gic_irq_domain,
291 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_TIMER));
292 }
293 
294 int gic_get_c0_perfcount_int(void)
295 {
296 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
297 		/* Is the performance counter shared with the timer? */
298 		if (cp0_perfcount_irq < 0)
299 			return -1;
300 		return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
301 	}
302 	return irq_create_mapping(gic_irq_domain,
303 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
304 }
305 
306 int gic_get_c0_fdc_int(void)
307 {
308 	if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
309 		/* Is the FDC IRQ even present? */
310 		if (cp0_fdc_irq < 0)
311 			return -1;
312 		return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
313 	}
314 
315 	return irq_create_mapping(gic_irq_domain,
316 				  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
317 }
318 
319 int gic_get_usm_range(struct resource *gic_usm_res)
320 {
321 	if (!gic_present)
322 		return -1;
323 
324 	gic_usm_res->start = __gic_base_addr + USM_VISIBLE_SECTION_OFS;
325 	gic_usm_res->end = gic_usm_res->start + (USM_VISIBLE_SECTION_SIZE - 1);
326 
327 	return 0;
328 }
329 
330 static void gic_handle_shared_int(bool chained)
331 {
332 	unsigned int i, intr, virq, gic_reg_step = mips_cm_is64 ? 8 : 4;
333 	unsigned long *pcpu_mask;
334 	unsigned long pending_reg, intrmask_reg;
335 	DECLARE_BITMAP(pending, GIC_MAX_INTRS);
336 	DECLARE_BITMAP(intrmask, GIC_MAX_INTRS);
337 
338 	/* Get per-cpu bitmaps */
339 	pcpu_mask = pcpu_masks[smp_processor_id()].pcpu_mask;
340 
341 	pending_reg = GIC_REG(SHARED, GIC_SH_PEND);
342 	intrmask_reg = GIC_REG(SHARED, GIC_SH_MASK);
343 
344 	for (i = 0; i < BITS_TO_LONGS(gic_shared_intrs); i++) {
345 		pending[i] = gic_read(pending_reg);
346 		intrmask[i] = gic_read(intrmask_reg);
347 		pending_reg += gic_reg_step;
348 		intrmask_reg += gic_reg_step;
349 
350 		if (!IS_ENABLED(CONFIG_64BIT) || mips_cm_is64)
351 			continue;
352 
353 		pending[i] |= (u64)gic_read(pending_reg) << 32;
354 		intrmask[i] |= (u64)gic_read(intrmask_reg) << 32;
355 		pending_reg += gic_reg_step;
356 		intrmask_reg += gic_reg_step;
357 	}
358 
359 	bitmap_and(pending, pending, intrmask, gic_shared_intrs);
360 	bitmap_and(pending, pending, pcpu_mask, gic_shared_intrs);
361 
362 	for_each_set_bit(intr, pending, gic_shared_intrs) {
363 		virq = irq_linear_revmap(gic_irq_domain,
364 					 GIC_SHARED_TO_HWIRQ(intr));
365 		if (chained)
366 			generic_handle_irq(virq);
367 		else
368 			do_IRQ(virq);
369 	}
370 }
371 
372 static void gic_mask_irq(struct irq_data *d)
373 {
374 	gic_reset_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
375 }
376 
377 static void gic_unmask_irq(struct irq_data *d)
378 {
379 	gic_set_mask(GIC_HWIRQ_TO_SHARED(d->hwirq));
380 }
381 
382 static void gic_ack_irq(struct irq_data *d)
383 {
384 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
385 
386 	gic_write(GIC_REG(SHARED, GIC_SH_WEDGE), GIC_SH_WEDGE_CLR(irq));
387 }
388 
389 static int gic_set_type(struct irq_data *d, unsigned int type)
390 {
391 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
392 	unsigned long flags;
393 	bool is_edge;
394 
395 	spin_lock_irqsave(&gic_lock, flags);
396 	switch (type & IRQ_TYPE_SENSE_MASK) {
397 	case IRQ_TYPE_EDGE_FALLING:
398 		gic_set_polarity(irq, GIC_POL_NEG);
399 		gic_set_trigger(irq, GIC_TRIG_EDGE);
400 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
401 		is_edge = true;
402 		break;
403 	case IRQ_TYPE_EDGE_RISING:
404 		gic_set_polarity(irq, GIC_POL_POS);
405 		gic_set_trigger(irq, GIC_TRIG_EDGE);
406 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
407 		is_edge = true;
408 		break;
409 	case IRQ_TYPE_EDGE_BOTH:
410 		/* polarity is irrelevant in this case */
411 		gic_set_trigger(irq, GIC_TRIG_EDGE);
412 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_ENABLE);
413 		is_edge = true;
414 		break;
415 	case IRQ_TYPE_LEVEL_LOW:
416 		gic_set_polarity(irq, GIC_POL_NEG);
417 		gic_set_trigger(irq, GIC_TRIG_LEVEL);
418 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
419 		is_edge = false;
420 		break;
421 	case IRQ_TYPE_LEVEL_HIGH:
422 	default:
423 		gic_set_polarity(irq, GIC_POL_POS);
424 		gic_set_trigger(irq, GIC_TRIG_LEVEL);
425 		gic_set_dual_edge(irq, GIC_TRIG_DUAL_DISABLE);
426 		is_edge = false;
427 		break;
428 	}
429 
430 	if (is_edge)
431 		irq_set_chip_handler_name_locked(d, &gic_edge_irq_controller,
432 						 handle_edge_irq, NULL);
433 	else
434 		irq_set_chip_handler_name_locked(d, &gic_level_irq_controller,
435 						 handle_level_irq, NULL);
436 	spin_unlock_irqrestore(&gic_lock, flags);
437 
438 	return 0;
439 }
440 
441 #ifdef CONFIG_SMP
442 static int gic_set_affinity(struct irq_data *d, const struct cpumask *cpumask,
443 			    bool force)
444 {
445 	unsigned int irq = GIC_HWIRQ_TO_SHARED(d->hwirq);
446 	cpumask_t	tmp = CPU_MASK_NONE;
447 	unsigned long	flags;
448 	int		i, cpu;
449 
450 	cpumask_and(&tmp, cpumask, cpu_online_mask);
451 	if (cpumask_empty(&tmp))
452 		return -EINVAL;
453 
454 	cpu = cpumask_first(&tmp);
455 
456 	/* Assumption : cpumask refers to a single CPU */
457 	spin_lock_irqsave(&gic_lock, flags);
458 
459 	/* Re-route this IRQ */
460 	gic_map_to_vpe(irq, mips_cm_vp_id(cpu));
461 
462 	/* Update the pcpu_masks */
463 	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
464 		clear_bit(irq, pcpu_masks[i].pcpu_mask);
465 	set_bit(irq, pcpu_masks[cpu].pcpu_mask);
466 
467 	cpumask_copy(irq_data_get_affinity_mask(d), cpumask);
468 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
469 	spin_unlock_irqrestore(&gic_lock, flags);
470 
471 	return IRQ_SET_MASK_OK_NOCOPY;
472 }
473 #endif
474 
475 static struct irq_chip gic_level_irq_controller = {
476 	.name			=	"MIPS GIC",
477 	.irq_mask		=	gic_mask_irq,
478 	.irq_unmask		=	gic_unmask_irq,
479 	.irq_set_type		=	gic_set_type,
480 #ifdef CONFIG_SMP
481 	.irq_set_affinity	=	gic_set_affinity,
482 #endif
483 };
484 
485 static struct irq_chip gic_edge_irq_controller = {
486 	.name			=	"MIPS GIC",
487 	.irq_ack		=	gic_ack_irq,
488 	.irq_mask		=	gic_mask_irq,
489 	.irq_unmask		=	gic_unmask_irq,
490 	.irq_set_type		=	gic_set_type,
491 #ifdef CONFIG_SMP
492 	.irq_set_affinity	=	gic_set_affinity,
493 #endif
494 	.ipi_send_single	=	gic_send_ipi,
495 };
496 
497 static void gic_handle_local_int(bool chained)
498 {
499 	unsigned long pending, masked;
500 	unsigned int intr, virq;
501 
502 	pending = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_PEND));
503 	masked = gic_read32(GIC_REG(VPE_LOCAL, GIC_VPE_MASK));
504 
505 	bitmap_and(&pending, &pending, &masked, GIC_NUM_LOCAL_INTRS);
506 
507 	for_each_set_bit(intr, &pending, GIC_NUM_LOCAL_INTRS) {
508 		virq = irq_linear_revmap(gic_irq_domain,
509 					 GIC_LOCAL_TO_HWIRQ(intr));
510 		if (chained)
511 			generic_handle_irq(virq);
512 		else
513 			do_IRQ(virq);
514 	}
515 }
516 
517 static void gic_mask_local_irq(struct irq_data *d)
518 {
519 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
520 
521 	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_RMASK), 1 << intr);
522 }
523 
524 static void gic_unmask_local_irq(struct irq_data *d)
525 {
526 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
527 
528 	gic_write32(GIC_REG(VPE_LOCAL, GIC_VPE_SMASK), 1 << intr);
529 }
530 
531 static struct irq_chip gic_local_irq_controller = {
532 	.name			=	"MIPS GIC Local",
533 	.irq_mask		=	gic_mask_local_irq,
534 	.irq_unmask		=	gic_unmask_local_irq,
535 };
536 
537 static void gic_mask_local_irq_all_vpes(struct irq_data *d)
538 {
539 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
540 	int i;
541 	unsigned long flags;
542 
543 	spin_lock_irqsave(&gic_lock, flags);
544 	for (i = 0; i < gic_vpes; i++) {
545 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
546 			  mips_cm_vp_id(i));
547 		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << intr);
548 	}
549 	spin_unlock_irqrestore(&gic_lock, flags);
550 }
551 
552 static void gic_unmask_local_irq_all_vpes(struct irq_data *d)
553 {
554 	int intr = GIC_HWIRQ_TO_LOCAL(d->hwirq);
555 	int i;
556 	unsigned long flags;
557 
558 	spin_lock_irqsave(&gic_lock, flags);
559 	for (i = 0; i < gic_vpes; i++) {
560 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
561 			  mips_cm_vp_id(i));
562 		gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SMASK), 1 << intr);
563 	}
564 	spin_unlock_irqrestore(&gic_lock, flags);
565 }
566 
567 static struct irq_chip gic_all_vpes_local_irq_controller = {
568 	.name			=	"MIPS GIC Local",
569 	.irq_mask		=	gic_mask_local_irq_all_vpes,
570 	.irq_unmask		=	gic_unmask_local_irq_all_vpes,
571 };
572 
573 static void __gic_irq_dispatch(void)
574 {
575 	gic_handle_local_int(false);
576 	gic_handle_shared_int(false);
577 }
578 
579 static void gic_irq_dispatch(struct irq_desc *desc)
580 {
581 	gic_handle_local_int(true);
582 	gic_handle_shared_int(true);
583 }
584 
585 static void __init gic_basic_init(void)
586 {
587 	unsigned int i;
588 
589 	board_bind_eic_interrupt = &gic_bind_eic_interrupt;
590 
591 	/* Setup defaults */
592 	for (i = 0; i < gic_shared_intrs; i++) {
593 		gic_set_polarity(i, GIC_POL_POS);
594 		gic_set_trigger(i, GIC_TRIG_LEVEL);
595 		gic_reset_mask(i);
596 	}
597 
598 	for (i = 0; i < gic_vpes; i++) {
599 		unsigned int j;
600 
601 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
602 			  mips_cm_vp_id(i));
603 		for (j = 0; j < GIC_NUM_LOCAL_INTRS; j++) {
604 			if (!gic_local_irq_is_routable(j))
605 				continue;
606 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_RMASK), 1 << j);
607 		}
608 	}
609 }
610 
611 static int gic_local_irq_domain_map(struct irq_domain *d, unsigned int virq,
612 				    irq_hw_number_t hw)
613 {
614 	int intr = GIC_HWIRQ_TO_LOCAL(hw);
615 	int ret = 0;
616 	int i;
617 	unsigned long flags;
618 
619 	if (!gic_local_irq_is_routable(intr))
620 		return -EPERM;
621 
622 	spin_lock_irqsave(&gic_lock, flags);
623 	for (i = 0; i < gic_vpes; i++) {
624 		u32 val = GIC_MAP_TO_PIN_MSK | gic_cpu_pin;
625 
626 		gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
627 			  mips_cm_vp_id(i));
628 
629 		switch (intr) {
630 		case GIC_LOCAL_INT_WD:
631 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_WD_MAP), val);
632 			break;
633 		case GIC_LOCAL_INT_COMPARE:
634 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_COMPARE_MAP),
635 				    val);
636 			break;
637 		case GIC_LOCAL_INT_TIMER:
638 			/* CONFIG_MIPS_CMP workaround (see __gic_init) */
639 			val = GIC_MAP_TO_PIN_MSK | timer_cpu_pin;
640 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_TIMER_MAP),
641 				    val);
642 			break;
643 		case GIC_LOCAL_INT_PERFCTR:
644 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_PERFCTR_MAP),
645 				    val);
646 			break;
647 		case GIC_LOCAL_INT_SWINT0:
648 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT0_MAP),
649 				    val);
650 			break;
651 		case GIC_LOCAL_INT_SWINT1:
652 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_SWINT1_MAP),
653 				    val);
654 			break;
655 		case GIC_LOCAL_INT_FDC:
656 			gic_write32(GIC_REG(VPE_OTHER, GIC_VPE_FDC_MAP), val);
657 			break;
658 		default:
659 			pr_err("Invalid local IRQ %d\n", intr);
660 			ret = -EINVAL;
661 			break;
662 		}
663 	}
664 	spin_unlock_irqrestore(&gic_lock, flags);
665 
666 	return ret;
667 }
668 
669 static int gic_shared_irq_domain_map(struct irq_domain *d, unsigned int virq,
670 				     irq_hw_number_t hw, unsigned int vpe)
671 {
672 	int intr = GIC_HWIRQ_TO_SHARED(hw);
673 	unsigned long flags;
674 	int i;
675 
676 	spin_lock_irqsave(&gic_lock, flags);
677 	gic_map_to_pin(intr, gic_cpu_pin);
678 	gic_map_to_vpe(intr, mips_cm_vp_id(vpe));
679 	for (i = 0; i < min(gic_vpes, NR_CPUS); i++)
680 		clear_bit(intr, pcpu_masks[i].pcpu_mask);
681 	set_bit(intr, pcpu_masks[vpe].pcpu_mask);
682 	spin_unlock_irqrestore(&gic_lock, flags);
683 
684 	return 0;
685 }
686 
687 static int gic_irq_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
688 				const u32 *intspec, unsigned int intsize,
689 				irq_hw_number_t *out_hwirq,
690 				unsigned int *out_type)
691 {
692 	if (intsize != 3)
693 		return -EINVAL;
694 
695 	if (intspec[0] == GIC_SHARED)
696 		*out_hwirq = GIC_SHARED_TO_HWIRQ(intspec[1]);
697 	else if (intspec[0] == GIC_LOCAL)
698 		*out_hwirq = GIC_LOCAL_TO_HWIRQ(intspec[1]);
699 	else
700 		return -EINVAL;
701 	*out_type = intspec[2] & IRQ_TYPE_SENSE_MASK;
702 
703 	return 0;
704 }
705 
706 static int gic_irq_domain_map(struct irq_domain *d, unsigned int virq,
707 			      irq_hw_number_t hwirq)
708 {
709 	int err;
710 
711 	if (hwirq >= GIC_SHARED_HWIRQ_BASE) {
712 		/* verify that shared irqs don't conflict with an IPI irq */
713 		if (test_bit(GIC_HWIRQ_TO_SHARED(hwirq), ipi_resrv))
714 			return -EBUSY;
715 
716 		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
717 						    &gic_level_irq_controller,
718 						    NULL);
719 		if (err)
720 			return err;
721 
722 		irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq)));
723 		return gic_shared_irq_domain_map(d, virq, hwirq, 0);
724 	}
725 
726 	switch (GIC_HWIRQ_TO_LOCAL(hwirq)) {
727 	case GIC_LOCAL_INT_TIMER:
728 	case GIC_LOCAL_INT_PERFCTR:
729 	case GIC_LOCAL_INT_FDC:
730 		/*
731 		 * HACK: These are all really percpu interrupts, but
732 		 * the rest of the MIPS kernel code does not use the
733 		 * percpu IRQ API for them.
734 		 */
735 		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
736 						    &gic_all_vpes_local_irq_controller,
737 						    NULL);
738 		if (err)
739 			return err;
740 
741 		irq_set_handler(virq, handle_percpu_irq);
742 		break;
743 
744 	default:
745 		err = irq_domain_set_hwirq_and_chip(d, virq, hwirq,
746 						    &gic_local_irq_controller,
747 						    NULL);
748 		if (err)
749 			return err;
750 
751 		irq_set_handler(virq, handle_percpu_devid_irq);
752 		irq_set_percpu_devid(virq);
753 		break;
754 	}
755 
756 	return gic_local_irq_domain_map(d, virq, hwirq);
757 }
758 
759 static int gic_irq_domain_alloc(struct irq_domain *d, unsigned int virq,
760 				unsigned int nr_irqs, void *arg)
761 {
762 	struct irq_fwspec *fwspec = arg;
763 	irq_hw_number_t hwirq;
764 
765 	if (fwspec->param[0] == GIC_SHARED)
766 		hwirq = GIC_SHARED_TO_HWIRQ(fwspec->param[1]);
767 	else
768 		hwirq = GIC_LOCAL_TO_HWIRQ(fwspec->param[1]);
769 
770 	return gic_irq_domain_map(d, virq, hwirq);
771 }
772 
773 void gic_irq_domain_free(struct irq_domain *d, unsigned int virq,
774 			 unsigned int nr_irqs)
775 {
776 }
777 
778 static const struct irq_domain_ops gic_irq_domain_ops = {
779 	.xlate = gic_irq_domain_xlate,
780 	.alloc = gic_irq_domain_alloc,
781 	.free = gic_irq_domain_free,
782 	.map = gic_irq_domain_map,
783 };
784 
785 static int gic_ipi_domain_xlate(struct irq_domain *d, struct device_node *ctrlr,
786 				const u32 *intspec, unsigned int intsize,
787 				irq_hw_number_t *out_hwirq,
788 				unsigned int *out_type)
789 {
790 	/*
791 	 * There's nothing to translate here. hwirq is dynamically allocated and
792 	 * the irq type is always edge triggered.
793 	 * */
794 	*out_hwirq = 0;
795 	*out_type = IRQ_TYPE_EDGE_RISING;
796 
797 	return 0;
798 }
799 
800 static int gic_ipi_domain_alloc(struct irq_domain *d, unsigned int virq,
801 				unsigned int nr_irqs, void *arg)
802 {
803 	struct cpumask *ipimask = arg;
804 	irq_hw_number_t hwirq, base_hwirq;
805 	int cpu, ret, i;
806 
807 	base_hwirq = find_first_bit(ipi_available, gic_shared_intrs);
808 	if (base_hwirq == gic_shared_intrs)
809 		return -ENOMEM;
810 
811 	/* check that we have enough space */
812 	for (i = base_hwirq; i < nr_irqs; i++) {
813 		if (!test_bit(i, ipi_available))
814 			return -EBUSY;
815 	}
816 	bitmap_clear(ipi_available, base_hwirq, nr_irqs);
817 
818 	/* map the hwirq for each cpu consecutively */
819 	i = 0;
820 	for_each_cpu(cpu, ipimask) {
821 		hwirq = GIC_SHARED_TO_HWIRQ(base_hwirq + i);
822 
823 		ret = irq_domain_set_hwirq_and_chip(d, virq + i, hwirq,
824 						    &gic_edge_irq_controller,
825 						    NULL);
826 		if (ret)
827 			goto error;
828 
829 		ret = irq_domain_set_hwirq_and_chip(d->parent, virq + i, hwirq,
830 						    &gic_edge_irq_controller,
831 						    NULL);
832 		if (ret)
833 			goto error;
834 
835 		ret = irq_set_irq_type(virq + i, IRQ_TYPE_EDGE_RISING);
836 		if (ret)
837 			goto error;
838 
839 		ret = gic_shared_irq_domain_map(d, virq + i, hwirq, cpu);
840 		if (ret)
841 			goto error;
842 
843 		i++;
844 	}
845 
846 	return 0;
847 error:
848 	bitmap_set(ipi_available, base_hwirq, nr_irqs);
849 	return ret;
850 }
851 
852 void gic_ipi_domain_free(struct irq_domain *d, unsigned int virq,
853 			 unsigned int nr_irqs)
854 {
855 	irq_hw_number_t base_hwirq;
856 	struct irq_data *data;
857 
858 	data = irq_get_irq_data(virq);
859 	if (!data)
860 		return;
861 
862 	base_hwirq = GIC_HWIRQ_TO_SHARED(irqd_to_hwirq(data));
863 	bitmap_set(ipi_available, base_hwirq, nr_irqs);
864 }
865 
866 int gic_ipi_domain_match(struct irq_domain *d, struct device_node *node,
867 			 enum irq_domain_bus_token bus_token)
868 {
869 	bool is_ipi;
870 
871 	switch (bus_token) {
872 	case DOMAIN_BUS_IPI:
873 		is_ipi = d->bus_token == bus_token;
874 		return (!node || to_of_node(d->fwnode) == node) && is_ipi;
875 		break;
876 	default:
877 		return 0;
878 	}
879 }
880 
881 static const struct irq_domain_ops gic_ipi_domain_ops = {
882 	.xlate = gic_ipi_domain_xlate,
883 	.alloc = gic_ipi_domain_alloc,
884 	.free = gic_ipi_domain_free,
885 	.match = gic_ipi_domain_match,
886 };
887 
888 static void __init __gic_init(unsigned long gic_base_addr,
889 			      unsigned long gic_addrspace_size,
890 			      unsigned int cpu_vec, unsigned int irqbase,
891 			      struct device_node *node)
892 {
893 	unsigned int gicconfig, cpu;
894 	unsigned int v[2];
895 
896 	__gic_base_addr = gic_base_addr;
897 
898 	gic_base = ioremap_nocache(gic_base_addr, gic_addrspace_size);
899 
900 	gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
901 	gic_shared_intrs = (gicconfig & GIC_SH_CONFIG_NUMINTRS_MSK) >>
902 		   GIC_SH_CONFIG_NUMINTRS_SHF;
903 	gic_shared_intrs = ((gic_shared_intrs + 1) * 8);
904 
905 	gic_vpes = (gicconfig & GIC_SH_CONFIG_NUMVPES_MSK) >>
906 		  GIC_SH_CONFIG_NUMVPES_SHF;
907 	gic_vpes = gic_vpes + 1;
908 
909 	if (cpu_has_veic) {
910 		/* Set EIC mode for all VPEs */
911 		for_each_present_cpu(cpu) {
912 			gic_write(GIC_REG(VPE_LOCAL, GIC_VPE_OTHER_ADDR),
913 				  mips_cm_vp_id(cpu));
914 			gic_write(GIC_REG(VPE_OTHER, GIC_VPE_CTL),
915 				  GIC_VPE_CTL_EIC_MODE_MSK);
916 		}
917 
918 		/* Always use vector 1 in EIC mode */
919 		gic_cpu_pin = 0;
920 		timer_cpu_pin = gic_cpu_pin;
921 		set_vi_handler(gic_cpu_pin + GIC_PIN_TO_VEC_OFFSET,
922 			       __gic_irq_dispatch);
923 	} else {
924 		gic_cpu_pin = cpu_vec - GIC_CPU_PIN_OFFSET;
925 		irq_set_chained_handler(MIPS_CPU_IRQ_BASE + cpu_vec,
926 					gic_irq_dispatch);
927 		/*
928 		 * With the CMP implementation of SMP (deprecated), other CPUs
929 		 * are started by the bootloader and put into a timer based
930 		 * waiting poll loop. We must not re-route those CPU's local
931 		 * timer interrupts as the wait instruction will never finish,
932 		 * so just handle whatever CPU interrupt it is routed to by
933 		 * default.
934 		 *
935 		 * This workaround should be removed when CMP support is
936 		 * dropped.
937 		 */
938 		if (IS_ENABLED(CONFIG_MIPS_CMP) &&
939 		    gic_local_irq_is_routable(GIC_LOCAL_INT_TIMER)) {
940 			timer_cpu_pin = gic_read32(GIC_REG(VPE_LOCAL,
941 							 GIC_VPE_TIMER_MAP)) &
942 					GIC_MAP_MSK;
943 			irq_set_chained_handler(MIPS_CPU_IRQ_BASE +
944 						GIC_CPU_PIN_OFFSET +
945 						timer_cpu_pin,
946 						gic_irq_dispatch);
947 		} else {
948 			timer_cpu_pin = gic_cpu_pin;
949 		}
950 	}
951 
952 	gic_irq_domain = irq_domain_add_simple(node, GIC_NUM_LOCAL_INTRS +
953 					       gic_shared_intrs, irqbase,
954 					       &gic_irq_domain_ops, NULL);
955 	if (!gic_irq_domain)
956 		panic("Failed to add GIC IRQ domain");
957 
958 	gic_ipi_domain = irq_domain_add_hierarchy(gic_irq_domain,
959 						  IRQ_DOMAIN_FLAG_IPI_PER_CPU,
960 						  GIC_NUM_LOCAL_INTRS + gic_shared_intrs,
961 						  node, &gic_ipi_domain_ops, NULL);
962 	if (!gic_ipi_domain)
963 		panic("Failed to add GIC IPI domain");
964 
965 	irq_domain_update_bus_token(gic_ipi_domain, DOMAIN_BUS_IPI);
966 
967 	if (node &&
968 	    !of_property_read_u32_array(node, "mti,reserved-ipi-vectors", v, 2)) {
969 		bitmap_set(ipi_resrv, v[0], v[1]);
970 	} else {
971 		/* Make the last 2 * gic_vpes available for IPIs */
972 		bitmap_set(ipi_resrv,
973 			   gic_shared_intrs - 2 * gic_vpes,
974 			   2 * gic_vpes);
975 	}
976 
977 	bitmap_copy(ipi_available, ipi_resrv, GIC_MAX_INTRS);
978 	gic_basic_init();
979 }
980 
981 void __init gic_init(unsigned long gic_base_addr,
982 		     unsigned long gic_addrspace_size,
983 		     unsigned int cpu_vec, unsigned int irqbase)
984 {
985 	__gic_init(gic_base_addr, gic_addrspace_size, cpu_vec, irqbase, NULL);
986 }
987 
988 static int __init gic_of_init(struct device_node *node,
989 			      struct device_node *parent)
990 {
991 	struct resource res;
992 	unsigned int cpu_vec, i = 0, reserved = 0;
993 	phys_addr_t gic_base;
994 	size_t gic_len;
995 
996 	/* Find the first available CPU vector. */
997 	while (!of_property_read_u32_index(node, "mti,reserved-cpu-vectors",
998 					   i++, &cpu_vec))
999 		reserved |= BIT(cpu_vec);
1000 	for (cpu_vec = 2; cpu_vec < 8; cpu_vec++) {
1001 		if (!(reserved & BIT(cpu_vec)))
1002 			break;
1003 	}
1004 	if (cpu_vec == 8) {
1005 		pr_err("No CPU vectors available for GIC\n");
1006 		return -ENODEV;
1007 	}
1008 
1009 	if (of_address_to_resource(node, 0, &res)) {
1010 		/*
1011 		 * Probe the CM for the GIC base address if not specified
1012 		 * in the device-tree.
1013 		 */
1014 		if (mips_cm_present()) {
1015 			gic_base = read_gcr_gic_base() &
1016 				~CM_GCR_GIC_BASE_GICEN_MSK;
1017 			gic_len = 0x20000;
1018 		} else {
1019 			pr_err("Failed to get GIC memory range\n");
1020 			return -ENODEV;
1021 		}
1022 	} else {
1023 		gic_base = res.start;
1024 		gic_len = resource_size(&res);
1025 	}
1026 
1027 	if (mips_cm_present()) {
1028 		write_gcr_gic_base(gic_base | CM_GCR_GIC_BASE_GICEN_MSK);
1029 		/* Ensure GIC region is enabled before trying to access it */
1030 		__sync();
1031 	}
1032 	gic_present = true;
1033 
1034 	__gic_init(gic_base, gic_len, cpu_vec, 0, node);
1035 
1036 	return 0;
1037 }
1038 IRQCHIP_DECLARE(mips_gic, "mti,gic", gic_of_init);
1039