xref: /linux/drivers/irqchip/irq-xtensa-mx.c (revision a06c3fad49a50d5d5eb078f93e70f4d3eca5d5a5)
1 /*
2  * Xtensa MX interrupt distributor
3  *
4  * Copyright (C) 2002 - 2013 Tensilica, Inc.
5  *
6  * This file is subject to the terms and conditions of the GNU General Public
7  * License.  See the file "COPYING" in the main directory of this archive
8  * for more details.
9  */
10 
11 #include <linux/interrupt.h>
12 #include <linux/irqdomain.h>
13 #include <linux/irq.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqchip/xtensa-mx.h>
16 #include <linux/of.h>
17 
18 #include <asm/mxregs.h>
19 
20 #define HW_IRQ_IPI_COUNT 2
21 #define HW_IRQ_MX_BASE 2
22 #define HW_IRQ_EXTERN_BASE 3
23 
24 static DEFINE_PER_CPU(unsigned int, cached_irq_mask);
25 
26 static int xtensa_mx_irq_map(struct irq_domain *d, unsigned int irq,
27 		irq_hw_number_t hw)
28 {
29 	if (hw < HW_IRQ_IPI_COUNT) {
30 		struct irq_chip *irq_chip = d->host_data;
31 		irq_set_chip_and_handler_name(irq, irq_chip,
32 				handle_percpu_irq, "ipi");
33 		irq_set_status_flags(irq, IRQ_LEVEL);
34 		return 0;
35 	}
36 	irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(irq)));
37 	return xtensa_irq_map(d, irq, hw);
38 }
39 
40 /*
41  * Device Tree IRQ specifier translation function which works with one or
42  * two cell bindings. First cell value maps directly to the hwirq number.
43  * Second cell if present specifies whether hwirq number is external (1) or
44  * internal (0).
45  */
46 static int xtensa_mx_irq_domain_xlate(struct irq_domain *d,
47 		struct device_node *ctrlr,
48 		const u32 *intspec, unsigned int intsize,
49 		unsigned long *out_hwirq, unsigned int *out_type)
50 {
51 	return xtensa_irq_domain_xlate(intspec, intsize,
52 			intspec[0], intspec[0] + HW_IRQ_EXTERN_BASE,
53 			out_hwirq, out_type);
54 }
55 
56 static const struct irq_domain_ops xtensa_mx_irq_domain_ops = {
57 	.xlate = xtensa_mx_irq_domain_xlate,
58 	.map = xtensa_mx_irq_map,
59 };
60 
61 void secondary_init_irq(void)
62 {
63 	__this_cpu_write(cached_irq_mask,
64 			XCHAL_INTTYPE_MASK_EXTERN_EDGE |
65 			XCHAL_INTTYPE_MASK_EXTERN_LEVEL);
66 	xtensa_set_sr(XCHAL_INTTYPE_MASK_EXTERN_EDGE |
67 			XCHAL_INTTYPE_MASK_EXTERN_LEVEL, intenable);
68 }
69 
70 static void xtensa_mx_irq_mask(struct irq_data *d)
71 {
72 	unsigned int mask = 1u << d->hwirq;
73 
74 	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
75 		    XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
76 		unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
77 
78 		if (ext_irq >= HW_IRQ_MX_BASE) {
79 			set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENG);
80 			return;
81 		}
82 	}
83 	mask = __this_cpu_read(cached_irq_mask) & ~mask;
84 	__this_cpu_write(cached_irq_mask, mask);
85 	xtensa_set_sr(mask, intenable);
86 }
87 
88 static void xtensa_mx_irq_unmask(struct irq_data *d)
89 {
90 	unsigned int mask = 1u << d->hwirq;
91 
92 	if (mask & (XCHAL_INTTYPE_MASK_EXTERN_EDGE |
93 		    XCHAL_INTTYPE_MASK_EXTERN_LEVEL)) {
94 		unsigned int ext_irq = xtensa_get_ext_irq_no(d->hwirq);
95 
96 		if (ext_irq >= HW_IRQ_MX_BASE) {
97 			set_er(1u << (ext_irq - HW_IRQ_MX_BASE), MIENGSET);
98 			return;
99 		}
100 	}
101 	mask |= __this_cpu_read(cached_irq_mask);
102 	__this_cpu_write(cached_irq_mask, mask);
103 	xtensa_set_sr(mask, intenable);
104 }
105 
106 static void xtensa_mx_irq_enable(struct irq_data *d)
107 {
108 	xtensa_mx_irq_unmask(d);
109 }
110 
111 static void xtensa_mx_irq_disable(struct irq_data *d)
112 {
113 	xtensa_mx_irq_mask(d);
114 }
115 
116 static void xtensa_mx_irq_ack(struct irq_data *d)
117 {
118 	xtensa_set_sr(1 << d->hwirq, intclear);
119 }
120 
121 static int xtensa_mx_irq_retrigger(struct irq_data *d)
122 {
123 	unsigned int mask = 1u << d->hwirq;
124 
125 	if (WARN_ON(mask & ~XCHAL_INTTYPE_MASK_SOFTWARE))
126 		return 0;
127 	xtensa_set_sr(mask, intset);
128 	return 1;
129 }
130 
131 static int xtensa_mx_irq_set_affinity(struct irq_data *d,
132 		const struct cpumask *dest, bool force)
133 {
134 	int cpu = cpumask_any_and(dest, cpu_online_mask);
135 	unsigned mask = 1u << cpu;
136 
137 	set_er(mask, MIROUT(d->hwirq - HW_IRQ_MX_BASE));
138 	irq_data_update_effective_affinity(d, cpumask_of(cpu));
139 
140 	return 0;
141 
142 }
143 
144 static struct irq_chip xtensa_mx_irq_chip = {
145 	.name		= "xtensa-mx",
146 	.irq_enable	= xtensa_mx_irq_enable,
147 	.irq_disable	= xtensa_mx_irq_disable,
148 	.irq_mask	= xtensa_mx_irq_mask,
149 	.irq_unmask	= xtensa_mx_irq_unmask,
150 	.irq_ack	= xtensa_mx_irq_ack,
151 	.irq_retrigger	= xtensa_mx_irq_retrigger,
152 	.irq_set_affinity = xtensa_mx_irq_set_affinity,
153 };
154 
155 static void __init xtensa_mx_init_common(struct irq_domain *root_domain)
156 {
157 	unsigned int i;
158 
159 	irq_set_default_host(root_domain);
160 	secondary_init_irq();
161 
162 	/* Initialize default IRQ routing to CPU 0 */
163 	for (i = 0; i < XCHAL_NUM_EXTINTERRUPTS; ++i)
164 		set_er(1, MIROUT(i));
165 }
166 
167 int __init xtensa_mx_init_legacy(struct device_node *interrupt_parent)
168 {
169 	struct irq_domain *root_domain =
170 		irq_domain_add_legacy(NULL, NR_IRQS - 1, 1, 0,
171 				&xtensa_mx_irq_domain_ops,
172 				&xtensa_mx_irq_chip);
173 	xtensa_mx_init_common(root_domain);
174 	return 0;
175 }
176 
177 static int __init xtensa_mx_init(struct device_node *np,
178 		struct device_node *interrupt_parent)
179 {
180 	struct irq_domain *root_domain =
181 		irq_domain_add_linear(np, NR_IRQS, &xtensa_mx_irq_domain_ops,
182 				&xtensa_mx_irq_chip);
183 	xtensa_mx_init_common(root_domain);
184 	return 0;
185 }
186 IRQCHIP_DECLARE(xtensa_mx_irq_chip, "cdns,xtensa-mx", xtensa_mx_init);
187