xref: /linux/drivers/irqchip/irq-sp7021-intc.c (revision 9f2c9170934eace462499ba0bfe042cc72900173)
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /*
3  * Copyright (C) Sunplus Technology Co., Ltd.
4  *       All rights reserved.
5  */
6 #include <linux/irq.h>
7 #include <linux/irqdomain.h>
8 #include <linux/io.h>
9 #include <linux/irqchip.h>
10 #include <linux/irqchip/chained_irq.h>
11 #include <linux/of_address.h>
12 #include <linux/of_irq.h>
13 
14 #define SP_INTC_HWIRQ_MIN	0
15 #define SP_INTC_HWIRQ_MAX	223
16 
17 #define SP_INTC_NR_IRQS		(SP_INTC_HWIRQ_MAX - SP_INTC_HWIRQ_MIN + 1)
18 #define SP_INTC_NR_GROUPS	DIV_ROUND_UP(SP_INTC_NR_IRQS, 32)
19 #define SP_INTC_REG_SIZE	(SP_INTC_NR_GROUPS * 4)
20 
21 /* REG_GROUP_0 regs */
22 #define REG_INTR_TYPE		(sp_intc.g0)
23 #define REG_INTR_POLARITY	(REG_INTR_TYPE     + SP_INTC_REG_SIZE)
24 #define REG_INTR_PRIORITY	(REG_INTR_POLARITY + SP_INTC_REG_SIZE)
25 #define REG_INTR_MASK		(REG_INTR_PRIORITY + SP_INTC_REG_SIZE)
26 
27 /* REG_GROUP_1 regs */
28 #define REG_INTR_CLEAR		(sp_intc.g1)
29 #define REG_MASKED_EXT1		(REG_INTR_CLEAR    + SP_INTC_REG_SIZE)
30 #define REG_MASKED_EXT0		(REG_MASKED_EXT1   + SP_INTC_REG_SIZE)
31 #define REG_INTR_GROUP		(REG_INTR_CLEAR    + 31 * 4)
32 
33 #define GROUP_MASK		(BIT(SP_INTC_NR_GROUPS) - 1)
34 #define GROUP_SHIFT_EXT1	(0)
35 #define GROUP_SHIFT_EXT0	(8)
36 
37 /*
38  * When GPIO_INT0~7 set to edge trigger, doesn't work properly.
39  * WORKAROUND: change it to level trigger, and toggle the polarity
40  * at ACK/Handler to make the HW work.
41  */
42 #define GPIO_INT0_HWIRQ		120
43 #define GPIO_INT7_HWIRQ		127
44 #define IS_GPIO_INT(irq)					\
45 ({								\
46 	u32 i = irq;						\
47 	(i >= GPIO_INT0_HWIRQ) && (i <= GPIO_INT7_HWIRQ);	\
48 })
49 
50 /* index of states */
51 enum {
52 	_IS_EDGE = 0,
53 	_IS_LOW,
54 	_IS_ACTIVE
55 };
56 
57 #define STATE_BIT(irq, idx)		(((irq) - GPIO_INT0_HWIRQ) * 3 + (idx))
58 #define ASSIGN_STATE(irq, idx, v)	assign_bit(STATE_BIT(irq, idx), sp_intc.states, v)
59 #define TEST_STATE(irq, idx)		test_bit(STATE_BIT(irq, idx), sp_intc.states)
60 
61 static struct sp_intctl {
62 	/*
63 	 * REG_GROUP_0: include type/polarity/priority/mask regs.
64 	 * REG_GROUP_1: include clear/masked_ext0/masked_ext1/group regs.
65 	 */
66 	void __iomem *g0; // REG_GROUP_0 base
67 	void __iomem *g1; // REG_GROUP_1 base
68 
69 	struct irq_domain *domain;
70 	raw_spinlock_t lock;
71 
72 	/*
73 	 * store GPIO_INT states
74 	 * each interrupt has 3 states: is_edge, is_low, is_active
75 	 */
76 	DECLARE_BITMAP(states, (GPIO_INT7_HWIRQ - GPIO_INT0_HWIRQ + 1) * 3);
77 } sp_intc;
78 
79 static struct irq_chip sp_intc_chip;
80 
81 static void sp_intc_assign_bit(u32 hwirq, void __iomem *base, bool value)
82 {
83 	u32 offset, mask;
84 	unsigned long flags;
85 	void __iomem *reg;
86 
87 	offset = (hwirq / 32) * 4;
88 	reg = base + offset;
89 
90 	raw_spin_lock_irqsave(&sp_intc.lock, flags);
91 	mask = readl_relaxed(reg);
92 	if (value)
93 		mask |= BIT(hwirq % 32);
94 	else
95 		mask &= ~BIT(hwirq % 32);
96 	writel_relaxed(mask, reg);
97 	raw_spin_unlock_irqrestore(&sp_intc.lock, flags);
98 }
99 
100 static void sp_intc_ack_irq(struct irq_data *d)
101 {
102 	u32 hwirq = d->hwirq;
103 
104 	if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_EDGE))) { // WORKAROUND
105 		sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, !TEST_STATE(hwirq, _IS_LOW));
106 		ASSIGN_STATE(hwirq, _IS_ACTIVE, true);
107 	}
108 
109 	sp_intc_assign_bit(hwirq, REG_INTR_CLEAR, 1);
110 }
111 
112 static void sp_intc_mask_irq(struct irq_data *d)
113 {
114 	sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 0);
115 }
116 
117 static void sp_intc_unmask_irq(struct irq_data *d)
118 {
119 	sp_intc_assign_bit(d->hwirq, REG_INTR_MASK, 1);
120 }
121 
122 static int sp_intc_set_type(struct irq_data *d, unsigned int type)
123 {
124 	u32 hwirq = d->hwirq;
125 	bool is_edge = !(type & IRQ_TYPE_LEVEL_MASK);
126 	bool is_low = (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING);
127 
128 	irq_set_handler_locked(d, is_edge ? handle_edge_irq : handle_level_irq);
129 
130 	if (unlikely(IS_GPIO_INT(hwirq) && is_edge)) { // WORKAROUND
131 		/* store states */
132 		ASSIGN_STATE(hwirq, _IS_EDGE, is_edge);
133 		ASSIGN_STATE(hwirq, _IS_LOW, is_low);
134 		ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
135 		/* change to level */
136 		is_edge = false;
137 	}
138 
139 	sp_intc_assign_bit(hwirq, REG_INTR_TYPE, is_edge);
140 	sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, is_low);
141 
142 	return 0;
143 }
144 
145 static int sp_intc_get_ext_irq(int ext_num)
146 {
147 	void __iomem *base = ext_num ? REG_MASKED_EXT1 : REG_MASKED_EXT0;
148 	u32 shift = ext_num ? GROUP_SHIFT_EXT1 : GROUP_SHIFT_EXT0;
149 	u32 groups;
150 	u32 pending_group;
151 	u32 group;
152 	u32 pending_irq;
153 
154 	groups = readl_relaxed(REG_INTR_GROUP);
155 	pending_group = (groups >> shift) & GROUP_MASK;
156 	if (!pending_group)
157 		return -1;
158 
159 	group = fls(pending_group) - 1;
160 	pending_irq = readl_relaxed(base + group * 4);
161 	if (!pending_irq)
162 		return -1;
163 
164 	return (group * 32) + fls(pending_irq) - 1;
165 }
166 
167 static void sp_intc_handle_ext_cascaded(struct irq_desc *desc)
168 {
169 	struct irq_chip *chip = irq_desc_get_chip(desc);
170 	int ext_num = (uintptr_t)irq_desc_get_handler_data(desc);
171 	int hwirq;
172 
173 	chained_irq_enter(chip, desc);
174 
175 	while ((hwirq = sp_intc_get_ext_irq(ext_num)) >= 0) {
176 		if (unlikely(IS_GPIO_INT(hwirq) && TEST_STATE(hwirq, _IS_ACTIVE))) { // WORKAROUND
177 			ASSIGN_STATE(hwirq, _IS_ACTIVE, false);
178 			sp_intc_assign_bit(hwirq, REG_INTR_POLARITY, TEST_STATE(hwirq, _IS_LOW));
179 		} else {
180 			generic_handle_domain_irq(sp_intc.domain, hwirq);
181 		}
182 	}
183 
184 	chained_irq_exit(chip, desc);
185 }
186 
187 static struct irq_chip sp_intc_chip = {
188 	.name = "sp_intc",
189 	.irq_ack = sp_intc_ack_irq,
190 	.irq_mask = sp_intc_mask_irq,
191 	.irq_unmask = sp_intc_unmask_irq,
192 	.irq_set_type = sp_intc_set_type,
193 };
194 
195 static int sp_intc_irq_domain_map(struct irq_domain *domain,
196 				  unsigned int irq, irq_hw_number_t hwirq)
197 {
198 	irq_set_chip_and_handler(irq, &sp_intc_chip, handle_level_irq);
199 	irq_set_chip_data(irq, &sp_intc_chip);
200 	irq_set_noprobe(irq);
201 
202 	return 0;
203 }
204 
205 static const struct irq_domain_ops sp_intc_dm_ops = {
206 	.xlate = irq_domain_xlate_twocell,
207 	.map = sp_intc_irq_domain_map,
208 };
209 
210 static int sp_intc_irq_map(struct device_node *node, int i)
211 {
212 	unsigned int irq;
213 
214 	irq = irq_of_parse_and_map(node, i);
215 	if (!irq)
216 		return -ENOENT;
217 
218 	irq_set_chained_handler_and_data(irq, sp_intc_handle_ext_cascaded, (void *)(uintptr_t)i);
219 
220 	return 0;
221 }
222 
223 static int __init sp_intc_init_dt(struct device_node *node, struct device_node *parent)
224 {
225 	int i, ret;
226 
227 	sp_intc.g0 = of_iomap(node, 0);
228 	if (!sp_intc.g0)
229 		return -ENXIO;
230 
231 	sp_intc.g1 = of_iomap(node, 1);
232 	if (!sp_intc.g1) {
233 		ret = -ENXIO;
234 		goto out_unmap0;
235 	}
236 
237 	ret = sp_intc_irq_map(node, 0); // EXT_INT0
238 	if (ret)
239 		goto out_unmap1;
240 
241 	ret = sp_intc_irq_map(node, 1); // EXT_INT1
242 	if (ret)
243 		goto out_unmap1;
244 
245 	/* initial regs */
246 	for (i = 0; i < SP_INTC_NR_GROUPS; i++) {
247 		/* all mask */
248 		writel_relaxed(0, REG_INTR_MASK + i * 4);
249 		/* all edge */
250 		writel_relaxed(~0, REG_INTR_TYPE + i * 4);
251 		/* all high-active */
252 		writel_relaxed(0, REG_INTR_POLARITY + i * 4);
253 		/* all EXT_INT0 */
254 		writel_relaxed(~0, REG_INTR_PRIORITY + i * 4);
255 		/* all clear */
256 		writel_relaxed(~0, REG_INTR_CLEAR + i * 4);
257 	}
258 
259 	sp_intc.domain = irq_domain_add_linear(node, SP_INTC_NR_IRQS,
260 					       &sp_intc_dm_ops, &sp_intc);
261 	if (!sp_intc.domain) {
262 		ret = -ENOMEM;
263 		goto out_unmap1;
264 	}
265 
266 	raw_spin_lock_init(&sp_intc.lock);
267 
268 	return 0;
269 
270 out_unmap1:
271 	iounmap(sp_intc.g1);
272 out_unmap0:
273 	iounmap(sp_intc.g0);
274 
275 	return ret;
276 }
277 
278 IRQCHIP_DECLARE(sp_intc, "sunplus,sp7021-intc", sp_intc_init_dt);
279