xref: /linux/drivers/irqchip/irq-sunxi-nmi.c (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 /*
2  * Allwinner A20/A31 SoCs NMI IRQ chip driver.
3  *
4  * Carlo Caione <carlo.caione@gmail.com>
5  *
6  * This file is licensed under the terms of the GNU General Public
7  * License version 2.  This program is licensed "as is" without any
8  * warranty of any kind, whether express or implied.
9  */
10 
11 #define DRV_NAME	"sunxi-nmi"
12 #define pr_fmt(fmt)	DRV_NAME ": " fmt
13 
14 #include <linux/bitops.h>
15 #include <linux/device.h>
16 #include <linux/io.h>
17 #include <linux/irq.h>
18 #include <linux/interrupt.h>
19 #include <linux/irqdomain.h>
20 #include <linux/of_irq.h>
21 #include <linux/of_address.h>
22 #include <linux/irqchip.h>
23 #include <linux/irqchip/chained_irq.h>
24 
25 #define SUNXI_NMI_SRC_TYPE_MASK	0x00000003
26 
27 #define SUNXI_NMI_IRQ_BIT	BIT(0)
28 
29 /*
30  * For deprecated sun6i-a31-sc-nmi compatible.
31  */
32 #define SUN6I_NMI_CTRL		0x00
33 #define SUN6I_NMI_PENDING	0x04
34 #define SUN6I_NMI_ENABLE	0x34
35 
36 #define SUN7I_NMI_CTRL		0x00
37 #define SUN7I_NMI_PENDING	0x04
38 #define SUN7I_NMI_ENABLE	0x08
39 
40 #define SUN9I_NMI_CTRL		0x00
41 #define SUN9I_NMI_ENABLE	0x04
42 #define SUN9I_NMI_PENDING	0x08
43 
44 enum {
45 	SUNXI_SRC_TYPE_LEVEL_LOW = 0,
46 	SUNXI_SRC_TYPE_EDGE_FALLING,
47 	SUNXI_SRC_TYPE_LEVEL_HIGH,
48 	SUNXI_SRC_TYPE_EDGE_RISING,
49 };
50 
51 struct sunxi_sc_nmi_data {
52 	struct {
53 		u32	ctrl;
54 		u32	pend;
55 		u32	enable;
56 	} reg_offs;
57 	u32		enable_val;
58 };
59 
60 static const struct sunxi_sc_nmi_data sun6i_data __initconst = {
61 	.reg_offs.ctrl		= SUN6I_NMI_CTRL,
62 	.reg_offs.pend		= SUN6I_NMI_PENDING,
63 	.reg_offs.enable	= SUN6I_NMI_ENABLE,
64 };
65 
66 static const struct sunxi_sc_nmi_data sun7i_data __initconst = {
67 	.reg_offs.ctrl		= SUN7I_NMI_CTRL,
68 	.reg_offs.pend		= SUN7I_NMI_PENDING,
69 	.reg_offs.enable	= SUN7I_NMI_ENABLE,
70 };
71 
72 static const struct sunxi_sc_nmi_data sun9i_data __initconst = {
73 	.reg_offs.ctrl		= SUN9I_NMI_CTRL,
74 	.reg_offs.pend		= SUN9I_NMI_PENDING,
75 	.reg_offs.enable	= SUN9I_NMI_ENABLE,
76 };
77 
78 static const struct sunxi_sc_nmi_data sun55i_a523_data __initconst = {
79 	.reg_offs.ctrl		= SUN9I_NMI_CTRL,
80 	.reg_offs.pend		= SUN9I_NMI_PENDING,
81 	.reg_offs.enable	= SUN9I_NMI_ENABLE,
82 	.enable_val		= BIT(31),
83 };
84 
85 static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, u32 val)
86 {
87 	irq_reg_writel(gc, val, off);
88 }
89 
90 static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off)
91 {
92 	return irq_reg_readl(gc, off);
93 }
94 
95 static void sunxi_sc_nmi_handle_irq(struct irq_desc *desc)
96 {
97 	struct irq_domain *domain = irq_desc_get_handler_data(desc);
98 	struct irq_chip *chip = irq_desc_get_chip(desc);
99 
100 	chained_irq_enter(chip, desc);
101 	generic_handle_domain_irq(domain, 0);
102 	chained_irq_exit(chip, desc);
103 }
104 
105 static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
106 {
107 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
108 	struct irq_chip_type *ct = gc->chip_types;
109 	u32 src_type_reg;
110 	u32 ctrl_off = ct->regs.type;
111 	unsigned int src_type;
112 	unsigned int i;
113 
114 	irq_gc_lock(gc);
115 
116 	switch (flow_type & IRQF_TRIGGER_MASK) {
117 	case IRQ_TYPE_EDGE_FALLING:
118 		src_type = SUNXI_SRC_TYPE_EDGE_FALLING;
119 		break;
120 	case IRQ_TYPE_EDGE_RISING:
121 		src_type = SUNXI_SRC_TYPE_EDGE_RISING;
122 		break;
123 	case IRQ_TYPE_LEVEL_HIGH:
124 		src_type = SUNXI_SRC_TYPE_LEVEL_HIGH;
125 		break;
126 	case IRQ_TYPE_NONE:
127 	case IRQ_TYPE_LEVEL_LOW:
128 		src_type = SUNXI_SRC_TYPE_LEVEL_LOW;
129 		break;
130 	default:
131 		irq_gc_unlock(gc);
132 		pr_err("Cannot assign multiple trigger modes to IRQ %d.\n",
133 			data->irq);
134 		return -EBADR;
135 	}
136 
137 	irqd_set_trigger_type(data, flow_type);
138 	irq_setup_alt_chip(data, flow_type);
139 
140 	for (i = 0; i < gc->num_ct; i++, ct++)
141 		if (ct->type & flow_type)
142 			ctrl_off = ct->regs.type;
143 
144 	src_type_reg = sunxi_sc_nmi_read(gc, ctrl_off);
145 	src_type_reg &= ~SUNXI_NMI_SRC_TYPE_MASK;
146 	src_type_reg |= src_type;
147 	sunxi_sc_nmi_write(gc, ctrl_off, src_type_reg);
148 
149 	irq_gc_unlock(gc);
150 
151 	return IRQ_SET_MASK_OK;
152 }
153 
154 static int __init sunxi_sc_nmi_irq_init(struct device_node *node,
155 					const struct sunxi_sc_nmi_data *data)
156 {
157 	unsigned int irq, clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN;
158 	struct irq_chip_generic *gc;
159 	struct irq_domain *domain;
160 	int ret;
161 
162 	domain = irq_domain_add_linear(node, 1, &irq_generic_chip_ops, NULL);
163 	if (!domain) {
164 		pr_err("Could not register interrupt domain.\n");
165 		return -ENOMEM;
166 	}
167 
168 	ret = irq_alloc_domain_generic_chips(domain, 1, 2, DRV_NAME,
169 					     handle_fasteoi_irq, clr, 0,
170 					     IRQ_GC_INIT_MASK_CACHE);
171 	if (ret) {
172 		pr_err("Could not allocate generic interrupt chip.\n");
173 		goto fail_irqd_remove;
174 	}
175 
176 	irq = irq_of_parse_and_map(node, 0);
177 	if (irq <= 0) {
178 		pr_err("unable to parse irq\n");
179 		ret = -EINVAL;
180 		goto fail_irqd_remove;
181 	}
182 
183 	gc = irq_get_domain_generic_chip(domain, 0);
184 	gc->reg_base = of_io_request_and_map(node, 0, of_node_full_name(node));
185 	if (IS_ERR(gc->reg_base)) {
186 		pr_err("unable to map resource\n");
187 		ret = PTR_ERR(gc->reg_base);
188 		goto fail_irqd_remove;
189 	}
190 
191 	gc->chip_types[0].type			= IRQ_TYPE_LEVEL_MASK;
192 	gc->chip_types[0].chip.irq_mask		= irq_gc_mask_clr_bit;
193 	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
194 	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
195 	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
196 	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED |
197 						  IRQCHIP_EOI_IF_HANDLED |
198 						  IRQCHIP_SKIP_SET_WAKE;
199 	gc->chip_types[0].regs.ack		= data->reg_offs.pend;
200 	gc->chip_types[0].regs.mask		= data->reg_offs.enable;
201 	gc->chip_types[0].regs.type		= data->reg_offs.ctrl;
202 
203 	gc->chip_types[1].type			= IRQ_TYPE_EDGE_BOTH;
204 	gc->chip_types[1].chip.irq_ack		= irq_gc_ack_set_bit;
205 	gc->chip_types[1].chip.irq_mask		= irq_gc_mask_clr_bit;
206 	gc->chip_types[1].chip.irq_unmask	= irq_gc_mask_set_bit;
207 	gc->chip_types[1].chip.irq_set_type	= sunxi_sc_nmi_set_type;
208 	gc->chip_types[1].regs.ack		= data->reg_offs.pend;
209 	gc->chip_types[1].regs.mask		= data->reg_offs.enable;
210 	gc->chip_types[1].regs.type		= data->reg_offs.ctrl;
211 	gc->chip_types[1].handler		= handle_edge_irq;
212 
213 	/* Disable any active interrupts */
214 	sunxi_sc_nmi_write(gc, data->reg_offs.enable, data->enable_val);
215 
216 	/* Clear any pending NMI interrupts */
217 	sunxi_sc_nmi_write(gc, data->reg_offs.pend, SUNXI_NMI_IRQ_BIT);
218 
219 	irq_set_chained_handler_and_data(irq, sunxi_sc_nmi_handle_irq, domain);
220 
221 	return 0;
222 
223 fail_irqd_remove:
224 	irq_domain_remove(domain);
225 
226 	return ret;
227 }
228 
229 static int __init sun6i_sc_nmi_irq_init(struct device_node *node,
230 					struct device_node *parent)
231 {
232 	return sunxi_sc_nmi_irq_init(node, &sun6i_data);
233 }
234 IRQCHIP_DECLARE(sun6i_sc_nmi, "allwinner,sun6i-a31-sc-nmi", sun6i_sc_nmi_irq_init);
235 
236 static int __init sun7i_sc_nmi_irq_init(struct device_node *node,
237 					struct device_node *parent)
238 {
239 	return sunxi_sc_nmi_irq_init(node, &sun7i_data);
240 }
241 IRQCHIP_DECLARE(sun7i_sc_nmi, "allwinner,sun7i-a20-sc-nmi", sun7i_sc_nmi_irq_init);
242 
243 static int __init sun9i_nmi_irq_init(struct device_node *node,
244 				     struct device_node *parent)
245 {
246 	return sunxi_sc_nmi_irq_init(node, &sun9i_data);
247 }
248 IRQCHIP_DECLARE(sun9i_nmi, "allwinner,sun9i-a80-nmi", sun9i_nmi_irq_init);
249 
250 static int __init sun55i_nmi_irq_init(struct device_node *node,
251 				      struct device_node *parent)
252 {
253 	return sunxi_sc_nmi_irq_init(node, &sun55i_a523_data);
254 }
255 IRQCHIP_DECLARE(sun55i_nmi, "allwinner,sun55i-a523-nmi", sun55i_nmi_irq_init);
256