xref: /linux/drivers/irqchip/irq-renesas-rzg2l.c (revision fc5ced75d6dffc9e2a441520b7dc587b95281f86)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas RZ/G2L IRQC Driver
4  *
5  * Copyright (C) 2022 Renesas Electronics Corporation.
6  *
7  * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/cleanup.h>
12 #include <linux/clk.h>
13 #include <linux/err.h>
14 #include <linux/io.h>
15 #include <linux/irqchip.h>
16 #include <linux/irqdomain.h>
17 #include <linux/of_address.h>
18 #include <linux/of_platform.h>
19 #include <linux/pm_runtime.h>
20 #include <linux/reset.h>
21 #include <linux/spinlock.h>
22 #include <linux/syscore_ops.h>
23 
24 #define IRQC_IRQ_START			1
25 #define IRQC_IRQ_COUNT			8
26 #define IRQC_TINT_START			(IRQC_IRQ_START + IRQC_IRQ_COUNT)
27 #define IRQC_TINT_COUNT			32
28 #define IRQC_NUM_IRQ			(IRQC_TINT_START + IRQC_TINT_COUNT)
29 
30 #define ISCR				0x10
31 #define IITSR				0x14
32 #define TSCR				0x20
33 #define TITSR(n)			(0x24 + (n) * 4)
34 #define TITSR0_MAX_INT			16
35 #define TITSEL_WIDTH			0x2
36 #define TSSR(n)				(0x30 + ((n) * 4))
37 #define TIEN				BIT(7)
38 #define TSSEL_SHIFT(n)			(8 * (n))
39 #define TSSEL_MASK			GENMASK(7, 0)
40 #define IRQ_MASK			0x3
41 #define IMSK				0x10010
42 #define TMSK				0x10020
43 
44 #define TSSR_OFFSET(n)			((n) % 4)
45 #define TSSR_INDEX(n)			((n) / 4)
46 
47 #define TITSR_TITSEL_EDGE_RISING	0
48 #define TITSR_TITSEL_EDGE_FALLING	1
49 #define TITSR_TITSEL_LEVEL_HIGH		2
50 #define TITSR_TITSEL_LEVEL_LOW		3
51 
52 #define IITSR_IITSEL(n, sense)		((sense) << ((n) * 2))
53 #define IITSR_IITSEL_LEVEL_LOW		0
54 #define IITSR_IITSEL_EDGE_FALLING	1
55 #define IITSR_IITSEL_EDGE_RISING	2
56 #define IITSR_IITSEL_EDGE_BOTH		3
57 #define IITSR_IITSEL_MASK(n)		IITSR_IITSEL((n), 3)
58 
59 #define TINT_EXTRACT_HWIRQ(x)		FIELD_GET(GENMASK(15, 0), (x))
60 #define TINT_EXTRACT_GPIOINT(x)		FIELD_GET(GENMASK(31, 16), (x))
61 
62 /**
63  * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
64  * @iitsr: IITSR register
65  * @titsr: TITSR registers
66  */
67 struct rzg2l_irqc_reg_cache {
68 	u32	iitsr;
69 	u32	titsr[2];
70 };
71 
72 /**
73  * struct rzg2l_irqc_priv - IRQ controller private data structure
74  * @base:	Controller's base address
75  * @irqchip:	Pointer to struct irq_chip
76  * @fwspec:	IRQ firmware specific data
77  * @lock:	Lock to serialize access to hardware registers
78  * @cache:	Registers cache for suspend/resume
79  */
80 static struct rzg2l_irqc_priv {
81 	void __iomem			*base;
82 	const struct irq_chip		*irqchip;
83 	struct irq_fwspec		fwspec[IRQC_NUM_IRQ];
84 	raw_spinlock_t			lock;
85 	struct rzg2l_irqc_reg_cache	cache;
86 } *rzg2l_irqc_data;
87 
irq_data_to_priv(struct irq_data * data)88 static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
89 {
90 	return data->domain->host_data;
91 }
92 
rzg2l_clear_irq_int(struct rzg2l_irqc_priv * priv,unsigned int hwirq)93 static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
94 {
95 	unsigned int hw_irq = hwirq - IRQC_IRQ_START;
96 	u32 bit = BIT(hw_irq);
97 	u32 iitsr, iscr;
98 
99 	iscr = readl_relaxed(priv->base + ISCR);
100 	iitsr = readl_relaxed(priv->base + IITSR);
101 
102 	/*
103 	 * ISCR can only be cleared if the type is falling-edge, rising-edge or
104 	 * falling/rising-edge.
105 	 */
106 	if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
107 		writel_relaxed(iscr & ~bit, priv->base + ISCR);
108 		/*
109 		 * Enforce that the posted write is flushed to prevent that the
110 		 * just handled interrupt is raised again.
111 		 */
112 		readl_relaxed(priv->base + ISCR);
113 	}
114 }
115 
rzg2l_clear_tint_int(struct rzg2l_irqc_priv * priv,unsigned int hwirq)116 static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
117 {
118 	u32 bit = BIT(hwirq - IRQC_TINT_START);
119 	u32 reg;
120 
121 	reg = readl_relaxed(priv->base + TSCR);
122 	if (reg & bit) {
123 		writel_relaxed(reg & ~bit, priv->base + TSCR);
124 		/*
125 		 * Enforce that the posted write is flushed to prevent that the
126 		 * just handled interrupt is raised again.
127 		 */
128 		readl_relaxed(priv->base + TSCR);
129 	}
130 }
131 
rzg2l_irqc_eoi(struct irq_data * d)132 static void rzg2l_irqc_eoi(struct irq_data *d)
133 {
134 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
135 	unsigned int hw_irq = irqd_to_hwirq(d);
136 
137 	raw_spin_lock(&priv->lock);
138 	if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
139 		rzg2l_clear_irq_int(priv, hw_irq);
140 	else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
141 		rzg2l_clear_tint_int(priv, hw_irq);
142 	raw_spin_unlock(&priv->lock);
143 	irq_chip_eoi_parent(d);
144 }
145 
rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv * priv,unsigned int hwirq)146 static void rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv *priv,
147 					   unsigned int hwirq)
148 {
149 	u32 bit = BIT(hwirq - IRQC_IRQ_START);
150 
151 	writel_relaxed(readl_relaxed(priv->base + IMSK) | bit, priv->base + IMSK);
152 }
153 
rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv * priv,unsigned int hwirq)154 static void rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv *priv,
155 					     unsigned int hwirq)
156 {
157 	u32 bit = BIT(hwirq - IRQC_IRQ_START);
158 
159 	writel_relaxed(readl_relaxed(priv->base + IMSK) & ~bit, priv->base + IMSK);
160 }
161 
rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv * priv,unsigned int hwirq)162 static void rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv *priv,
163 					    unsigned int hwirq)
164 {
165 	u32 bit = BIT(hwirq - IRQC_TINT_START);
166 
167 	writel_relaxed(readl_relaxed(priv->base + TMSK) | bit, priv->base + TMSK);
168 }
169 
rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv * priv,unsigned int hwirq)170 static void rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv *priv,
171 					      unsigned int hwirq)
172 {
173 	u32 bit = BIT(hwirq - IRQC_TINT_START);
174 
175 	writel_relaxed(readl_relaxed(priv->base + TMSK) & ~bit, priv->base + TMSK);
176 }
177 
rzfive_irqc_mask(struct irq_data * d)178 static void rzfive_irqc_mask(struct irq_data *d)
179 {
180 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
181 	unsigned int hwirq = irqd_to_hwirq(d);
182 
183 	raw_spin_lock(&priv->lock);
184 	if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
185 		rzfive_irqc_mask_irq_interrupt(priv, hwirq);
186 	else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
187 		rzfive_irqc_mask_tint_interrupt(priv, hwirq);
188 	raw_spin_unlock(&priv->lock);
189 	irq_chip_mask_parent(d);
190 }
191 
rzfive_irqc_unmask(struct irq_data * d)192 static void rzfive_irqc_unmask(struct irq_data *d)
193 {
194 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
195 	unsigned int hwirq = irqd_to_hwirq(d);
196 
197 	raw_spin_lock(&priv->lock);
198 	if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
199 		rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
200 	else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
201 		rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
202 	raw_spin_unlock(&priv->lock);
203 	irq_chip_unmask_parent(d);
204 }
205 
rzfive_tint_irq_endisable(struct irq_data * d,bool enable)206 static void rzfive_tint_irq_endisable(struct irq_data *d, bool enable)
207 {
208 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
209 	unsigned int hwirq = irqd_to_hwirq(d);
210 
211 	if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) {
212 		u32 offset = hwirq - IRQC_TINT_START;
213 		u32 tssr_offset = TSSR_OFFSET(offset);
214 		u8 tssr_index = TSSR_INDEX(offset);
215 		u32 reg;
216 
217 		raw_spin_lock(&priv->lock);
218 		if (enable)
219 			rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
220 		else
221 			rzfive_irqc_mask_tint_interrupt(priv, hwirq);
222 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
223 		if (enable)
224 			reg |= TIEN << TSSEL_SHIFT(tssr_offset);
225 		else
226 			reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
227 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
228 		raw_spin_unlock(&priv->lock);
229 	} else {
230 		raw_spin_lock(&priv->lock);
231 		if (enable)
232 			rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
233 		else
234 			rzfive_irqc_mask_irq_interrupt(priv, hwirq);
235 		raw_spin_unlock(&priv->lock);
236 	}
237 }
238 
rzfive_irqc_irq_disable(struct irq_data * d)239 static void rzfive_irqc_irq_disable(struct irq_data *d)
240 {
241 	irq_chip_disable_parent(d);
242 	rzfive_tint_irq_endisable(d, false);
243 }
244 
rzfive_irqc_irq_enable(struct irq_data * d)245 static void rzfive_irqc_irq_enable(struct irq_data *d)
246 {
247 	rzfive_tint_irq_endisable(d, true);
248 	irq_chip_enable_parent(d);
249 }
250 
rzg2l_tint_irq_endisable(struct irq_data * d,bool enable)251 static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
252 {
253 	unsigned int hw_irq = irqd_to_hwirq(d);
254 
255 	if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
256 		struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
257 		u32 offset = hw_irq - IRQC_TINT_START;
258 		u32 tssr_offset = TSSR_OFFSET(offset);
259 		u8 tssr_index = TSSR_INDEX(offset);
260 		u32 reg;
261 
262 		raw_spin_lock(&priv->lock);
263 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
264 		if (enable)
265 			reg |= TIEN << TSSEL_SHIFT(tssr_offset);
266 		else
267 			reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
268 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
269 		raw_spin_unlock(&priv->lock);
270 	}
271 }
272 
rzg2l_irqc_irq_disable(struct irq_data * d)273 static void rzg2l_irqc_irq_disable(struct irq_data *d)
274 {
275 	irq_chip_disable_parent(d);
276 	rzg2l_tint_irq_endisable(d, false);
277 }
278 
rzg2l_irqc_irq_enable(struct irq_data * d)279 static void rzg2l_irqc_irq_enable(struct irq_data *d)
280 {
281 	rzg2l_tint_irq_endisable(d, true);
282 	irq_chip_enable_parent(d);
283 }
284 
rzg2l_irq_set_type(struct irq_data * d,unsigned int type)285 static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
286 {
287 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
288 	unsigned int hwirq = irqd_to_hwirq(d);
289 	u32 iitseln = hwirq - IRQC_IRQ_START;
290 	bool clear_irq_int = false;
291 	u16 sense, tmp;
292 
293 	switch (type & IRQ_TYPE_SENSE_MASK) {
294 	case IRQ_TYPE_LEVEL_LOW:
295 		sense = IITSR_IITSEL_LEVEL_LOW;
296 		break;
297 
298 	case IRQ_TYPE_EDGE_FALLING:
299 		sense = IITSR_IITSEL_EDGE_FALLING;
300 		clear_irq_int = true;
301 		break;
302 
303 	case IRQ_TYPE_EDGE_RISING:
304 		sense = IITSR_IITSEL_EDGE_RISING;
305 		clear_irq_int = true;
306 		break;
307 
308 	case IRQ_TYPE_EDGE_BOTH:
309 		sense = IITSR_IITSEL_EDGE_BOTH;
310 		clear_irq_int = true;
311 		break;
312 
313 	default:
314 		return -EINVAL;
315 	}
316 
317 	raw_spin_lock(&priv->lock);
318 	tmp = readl_relaxed(priv->base + IITSR);
319 	tmp &= ~IITSR_IITSEL_MASK(iitseln);
320 	tmp |= IITSR_IITSEL(iitseln, sense);
321 	if (clear_irq_int)
322 		rzg2l_clear_irq_int(priv, hwirq);
323 	writel_relaxed(tmp, priv->base + IITSR);
324 	raw_spin_unlock(&priv->lock);
325 
326 	return 0;
327 }
328 
rzg2l_disable_tint_and_set_tint_source(struct irq_data * d,struct rzg2l_irqc_priv * priv,u32 reg,u32 tssr_offset,u8 tssr_index)329 static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
330 						  u32 reg, u32 tssr_offset, u8 tssr_index)
331 {
332 	u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
333 	u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
334 
335 	/* Clear the relevant byte in reg */
336 	reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
337 	/* Set TINT and leave TIEN clear */
338 	reg |= tint << TSSEL_SHIFT(tssr_offset);
339 	writel_relaxed(reg, priv->base + TSSR(tssr_index));
340 
341 	return reg | tien;
342 }
343 
rzg2l_tint_set_edge(struct irq_data * d,unsigned int type)344 static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
345 {
346 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
347 	unsigned int hwirq = irqd_to_hwirq(d);
348 	u32 titseln = hwirq - IRQC_TINT_START;
349 	u32 tssr_offset = TSSR_OFFSET(titseln);
350 	u8 tssr_index = TSSR_INDEX(titseln);
351 	u8 index, sense;
352 	u32 reg, tssr;
353 
354 	switch (type & IRQ_TYPE_SENSE_MASK) {
355 	case IRQ_TYPE_EDGE_RISING:
356 		sense = TITSR_TITSEL_EDGE_RISING;
357 		break;
358 
359 	case IRQ_TYPE_EDGE_FALLING:
360 		sense = TITSR_TITSEL_EDGE_FALLING;
361 		break;
362 
363 	default:
364 		return -EINVAL;
365 	}
366 
367 	index = 0;
368 	if (titseln >= TITSR0_MAX_INT) {
369 		titseln -= TITSR0_MAX_INT;
370 		index = 1;
371 	}
372 
373 	raw_spin_lock(&priv->lock);
374 	tssr = readl_relaxed(priv->base + TSSR(tssr_index));
375 	tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
376 	reg = readl_relaxed(priv->base + TITSR(index));
377 	reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
378 	reg |= sense << (titseln * TITSEL_WIDTH);
379 	writel_relaxed(reg, priv->base + TITSR(index));
380 	rzg2l_clear_tint_int(priv, hwirq);
381 	writel_relaxed(tssr, priv->base + TSSR(tssr_index));
382 	raw_spin_unlock(&priv->lock);
383 
384 	return 0;
385 }
386 
rzg2l_irqc_set_type(struct irq_data * d,unsigned int type)387 static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
388 {
389 	unsigned int hw_irq = irqd_to_hwirq(d);
390 	int ret = -EINVAL;
391 
392 	if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
393 		ret = rzg2l_irq_set_type(d, type);
394 	else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
395 		ret = rzg2l_tint_set_edge(d, type);
396 	if (ret)
397 		return ret;
398 
399 	return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
400 }
401 
rzg2l_irqc_irq_suspend(void)402 static int rzg2l_irqc_irq_suspend(void)
403 {
404 	struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
405 	void __iomem *base = rzg2l_irqc_data->base;
406 
407 	cache->iitsr = readl_relaxed(base + IITSR);
408 	for (u8 i = 0; i < 2; i++)
409 		cache->titsr[i] = readl_relaxed(base + TITSR(i));
410 
411 	return 0;
412 }
413 
rzg2l_irqc_irq_resume(void)414 static void rzg2l_irqc_irq_resume(void)
415 {
416 	struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
417 	void __iomem *base = rzg2l_irqc_data->base;
418 
419 	/*
420 	 * Restore only interrupt type. TSSRx will be restored at the
421 	 * request of pin controller to avoid spurious interrupts due
422 	 * to invalid PIN states.
423 	 */
424 	for (u8 i = 0; i < 2; i++)
425 		writel_relaxed(cache->titsr[i], base + TITSR(i));
426 	writel_relaxed(cache->iitsr, base + IITSR);
427 }
428 
429 static struct syscore_ops rzg2l_irqc_syscore_ops = {
430 	.suspend	= rzg2l_irqc_irq_suspend,
431 	.resume		= rzg2l_irqc_irq_resume,
432 };
433 
434 static const struct irq_chip rzg2l_irqc_chip = {
435 	.name			= "rzg2l-irqc",
436 	.irq_eoi		= rzg2l_irqc_eoi,
437 	.irq_mask		= irq_chip_mask_parent,
438 	.irq_unmask		= irq_chip_unmask_parent,
439 	.irq_disable		= rzg2l_irqc_irq_disable,
440 	.irq_enable		= rzg2l_irqc_irq_enable,
441 	.irq_get_irqchip_state	= irq_chip_get_parent_state,
442 	.irq_set_irqchip_state	= irq_chip_set_parent_state,
443 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
444 	.irq_set_type		= rzg2l_irqc_set_type,
445 	.irq_set_affinity	= irq_chip_set_affinity_parent,
446 	.flags			= IRQCHIP_MASK_ON_SUSPEND |
447 				  IRQCHIP_SET_TYPE_MASKED |
448 				  IRQCHIP_SKIP_SET_WAKE,
449 };
450 
451 static const struct irq_chip rzfive_irqc_chip = {
452 	.name			= "rzfive-irqc",
453 	.irq_eoi		= rzg2l_irqc_eoi,
454 	.irq_mask		= rzfive_irqc_mask,
455 	.irq_unmask		= rzfive_irqc_unmask,
456 	.irq_disable		= rzfive_irqc_irq_disable,
457 	.irq_enable		= rzfive_irqc_irq_enable,
458 	.irq_get_irqchip_state	= irq_chip_get_parent_state,
459 	.irq_set_irqchip_state	= irq_chip_set_parent_state,
460 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
461 	.irq_set_type		= rzg2l_irqc_set_type,
462 	.irq_set_affinity	= irq_chip_set_affinity_parent,
463 	.flags			= IRQCHIP_MASK_ON_SUSPEND |
464 				  IRQCHIP_SET_TYPE_MASKED |
465 				  IRQCHIP_SKIP_SET_WAKE,
466 };
467 
rzg2l_irqc_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)468 static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
469 			    unsigned int nr_irqs, void *arg)
470 {
471 	struct rzg2l_irqc_priv *priv = domain->host_data;
472 	unsigned long tint = 0;
473 	irq_hw_number_t hwirq;
474 	unsigned int type;
475 	int ret;
476 
477 	ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
478 	if (ret)
479 		return ret;
480 
481 	/*
482 	 * For TINT interrupts ie where pinctrl driver is child of irqc domain
483 	 * the hwirq and TINT are encoded in fwspec->param[0].
484 	 * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT
485 	 * from 16-31 bits. TINT from the pinctrl driver needs to be programmed
486 	 * in IRQC registers to enable a given gpio pin as interrupt.
487 	 */
488 	if (hwirq > IRQC_IRQ_COUNT) {
489 		tint = TINT_EXTRACT_GPIOINT(hwirq);
490 		hwirq = TINT_EXTRACT_HWIRQ(hwirq);
491 
492 		if (hwirq < IRQC_TINT_START)
493 			return -EINVAL;
494 	}
495 
496 	if (hwirq > (IRQC_NUM_IRQ - 1))
497 		return -EINVAL;
498 
499 	ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
500 					    (void *)(uintptr_t)tint);
501 	if (ret)
502 		return ret;
503 
504 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
505 }
506 
507 static const struct irq_domain_ops rzg2l_irqc_domain_ops = {
508 	.alloc = rzg2l_irqc_alloc,
509 	.free = irq_domain_free_irqs_common,
510 	.translate = irq_domain_translate_twocell,
511 };
512 
rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv * priv,struct device_node * np)513 static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
514 				       struct device_node *np)
515 {
516 	struct of_phandle_args map;
517 	unsigned int i;
518 	int ret;
519 
520 	for (i = 0; i < IRQC_NUM_IRQ; i++) {
521 		ret = of_irq_parse_one(np, i, &map);
522 		if (ret)
523 			return ret;
524 		of_phandle_args_to_fwspec(np, map.args, map.args_count,
525 					  &priv->fwspec[i]);
526 	}
527 
528 	return 0;
529 }
530 
rzg2l_irqc_common_init(struct device_node * node,struct device_node * parent,const struct irq_chip * irq_chip)531 static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
532 				  const struct irq_chip *irq_chip)
533 {
534 	struct platform_device *pdev = of_find_device_by_node(node);
535 	struct device *dev __free(put_device) = pdev ? &pdev->dev : NULL;
536 	struct irq_domain *irq_domain, *parent_domain;
537 	struct reset_control *resetn;
538 	int ret;
539 
540 	if (!pdev)
541 		return -ENODEV;
542 
543 	parent_domain = irq_find_host(parent);
544 	if (!parent_domain) {
545 		dev_err(&pdev->dev, "cannot find parent domain\n");
546 		return -ENODEV;
547 	}
548 
549 	rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
550 	if (!rzg2l_irqc_data)
551 		return -ENOMEM;
552 
553 	rzg2l_irqc_data->irqchip = irq_chip;
554 
555 	rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
556 	if (IS_ERR(rzg2l_irqc_data->base))
557 		return PTR_ERR(rzg2l_irqc_data->base);
558 
559 	ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
560 	if (ret) {
561 		dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
562 		return ret;
563 	}
564 
565 	resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
566 	if (IS_ERR(resetn))
567 		return PTR_ERR(resetn);
568 
569 	ret = reset_control_deassert(resetn);
570 	if (ret) {
571 		dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
572 		return ret;
573 	}
574 
575 	pm_runtime_enable(&pdev->dev);
576 	ret = pm_runtime_resume_and_get(&pdev->dev);
577 	if (ret < 0) {
578 		dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
579 		goto pm_disable;
580 	}
581 
582 	raw_spin_lock_init(&rzg2l_irqc_data->lock);
583 
584 	irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
585 					      node, &rzg2l_irqc_domain_ops,
586 					      rzg2l_irqc_data);
587 	if (!irq_domain) {
588 		dev_err(&pdev->dev, "failed to add irq domain\n");
589 		ret = -ENOMEM;
590 		goto pm_put;
591 	}
592 
593 	register_syscore_ops(&rzg2l_irqc_syscore_ops);
594 
595 	/*
596 	 * Prevent the cleanup function from invoking put_device by assigning
597 	 * NULL to dev.
598 	 *
599 	 * make coccicheck will complain about missing put_device calls, but
600 	 * those are false positives, as dev will be automatically "put" via
601 	 * __free_put_device on the failing path.
602 	 * On the successful path we don't actually want to "put" dev.
603 	 */
604 	dev = NULL;
605 
606 	return 0;
607 
608 pm_put:
609 	pm_runtime_put(&pdev->dev);
610 pm_disable:
611 	pm_runtime_disable(&pdev->dev);
612 	reset_control_assert(resetn);
613 	return ret;
614 }
615 
rzg2l_irqc_init(struct device_node * node,struct device_node * parent)616 static int __init rzg2l_irqc_init(struct device_node *node,
617 				  struct device_node *parent)
618 {
619 	return rzg2l_irqc_common_init(node, parent, &rzg2l_irqc_chip);
620 }
621 
rzfive_irqc_init(struct device_node * node,struct device_node * parent)622 static int __init rzfive_irqc_init(struct device_node *node,
623 				   struct device_node *parent)
624 {
625 	return rzg2l_irqc_common_init(node, parent, &rzfive_irqc_chip);
626 }
627 
628 IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
629 IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
630 IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init)
631 IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
632 MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
633 MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
634