xref: /linux/drivers/irqchip/irq-renesas-rzg2l.c (revision a3a02a52bcfcbcc4a637d4b68bf1bc391c9fad02)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Renesas RZ/G2L IRQC Driver
4  *
5  * Copyright (C) 2022 Renesas Electronics Corporation.
6  *
7  * Author: Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>
8  */
9 
10 #include <linux/bitfield.h>
11 #include <linux/clk.h>
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/irqchip.h>
15 #include <linux/irqdomain.h>
16 #include <linux/of_address.h>
17 #include <linux/of_platform.h>
18 #include <linux/pm_runtime.h>
19 #include <linux/reset.h>
20 #include <linux/spinlock.h>
21 #include <linux/syscore_ops.h>
22 
23 #define IRQC_IRQ_START			1
24 #define IRQC_IRQ_COUNT			8
25 #define IRQC_TINT_START			(IRQC_IRQ_START + IRQC_IRQ_COUNT)
26 #define IRQC_TINT_COUNT			32
27 #define IRQC_NUM_IRQ			(IRQC_TINT_START + IRQC_TINT_COUNT)
28 
29 #define ISCR				0x10
30 #define IITSR				0x14
31 #define TSCR				0x20
32 #define TITSR(n)			(0x24 + (n) * 4)
33 #define TITSR0_MAX_INT			16
34 #define TITSEL_WIDTH			0x2
35 #define TSSR(n)				(0x30 + ((n) * 4))
36 #define TIEN				BIT(7)
37 #define TSSEL_SHIFT(n)			(8 * (n))
38 #define TSSEL_MASK			GENMASK(7, 0)
39 #define IRQ_MASK			0x3
40 #define IMSK				0x10010
41 #define TMSK				0x10020
42 
43 #define TSSR_OFFSET(n)			((n) % 4)
44 #define TSSR_INDEX(n)			((n) / 4)
45 
46 #define TITSR_TITSEL_EDGE_RISING	0
47 #define TITSR_TITSEL_EDGE_FALLING	1
48 #define TITSR_TITSEL_LEVEL_HIGH		2
49 #define TITSR_TITSEL_LEVEL_LOW		3
50 
51 #define IITSR_IITSEL(n, sense)		((sense) << ((n) * 2))
52 #define IITSR_IITSEL_LEVEL_LOW		0
53 #define IITSR_IITSEL_EDGE_FALLING	1
54 #define IITSR_IITSEL_EDGE_RISING	2
55 #define IITSR_IITSEL_EDGE_BOTH		3
56 #define IITSR_IITSEL_MASK(n)		IITSR_IITSEL((n), 3)
57 
58 #define TINT_EXTRACT_HWIRQ(x)		FIELD_GET(GENMASK(15, 0), (x))
59 #define TINT_EXTRACT_GPIOINT(x)		FIELD_GET(GENMASK(31, 16), (x))
60 
61 /**
62  * struct rzg2l_irqc_reg_cache - registers cache (necessary for suspend/resume)
63  * @iitsr: IITSR register
64  * @titsr: TITSR registers
65  */
66 struct rzg2l_irqc_reg_cache {
67 	u32	iitsr;
68 	u32	titsr[2];
69 };
70 
71 /**
72  * struct rzg2l_irqc_priv - IRQ controller private data structure
73  * @base:	Controller's base address
74  * @irqchip:	Pointer to struct irq_chip
75  * @fwspec:	IRQ firmware specific data
76  * @lock:	Lock to serialize access to hardware registers
77  * @cache:	Registers cache for suspend/resume
78  */
79 static struct rzg2l_irqc_priv {
80 	void __iomem			*base;
81 	const struct irq_chip		*irqchip;
82 	struct irq_fwspec		fwspec[IRQC_NUM_IRQ];
83 	raw_spinlock_t			lock;
84 	struct rzg2l_irqc_reg_cache	cache;
85 } *rzg2l_irqc_data;
86 
87 static struct rzg2l_irqc_priv *irq_data_to_priv(struct irq_data *data)
88 {
89 	return data->domain->host_data;
90 }
91 
92 static void rzg2l_clear_irq_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
93 {
94 	unsigned int hw_irq = hwirq - IRQC_IRQ_START;
95 	u32 bit = BIT(hw_irq);
96 	u32 iitsr, iscr;
97 
98 	iscr = readl_relaxed(priv->base + ISCR);
99 	iitsr = readl_relaxed(priv->base + IITSR);
100 
101 	/*
102 	 * ISCR can only be cleared if the type is falling-edge, rising-edge or
103 	 * falling/rising-edge.
104 	 */
105 	if ((iscr & bit) && (iitsr & IITSR_IITSEL_MASK(hw_irq))) {
106 		writel_relaxed(iscr & ~bit, priv->base + ISCR);
107 		/*
108 		 * Enforce that the posted write is flushed to prevent that the
109 		 * just handled interrupt is raised again.
110 		 */
111 		readl_relaxed(priv->base + ISCR);
112 	}
113 }
114 
115 static void rzg2l_clear_tint_int(struct rzg2l_irqc_priv *priv, unsigned int hwirq)
116 {
117 	u32 bit = BIT(hwirq - IRQC_TINT_START);
118 	u32 reg;
119 
120 	reg = readl_relaxed(priv->base + TSCR);
121 	if (reg & bit) {
122 		writel_relaxed(reg & ~bit, priv->base + TSCR);
123 		/*
124 		 * Enforce that the posted write is flushed to prevent that the
125 		 * just handled interrupt is raised again.
126 		 */
127 		readl_relaxed(priv->base + TSCR);
128 	}
129 }
130 
131 static void rzg2l_irqc_eoi(struct irq_data *d)
132 {
133 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
134 	unsigned int hw_irq = irqd_to_hwirq(d);
135 
136 	raw_spin_lock(&priv->lock);
137 	if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
138 		rzg2l_clear_irq_int(priv, hw_irq);
139 	else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
140 		rzg2l_clear_tint_int(priv, hw_irq);
141 	raw_spin_unlock(&priv->lock);
142 	irq_chip_eoi_parent(d);
143 }
144 
145 static void rzfive_irqc_mask_irq_interrupt(struct rzg2l_irqc_priv *priv,
146 					   unsigned int hwirq)
147 {
148 	u32 bit = BIT(hwirq - IRQC_IRQ_START);
149 
150 	writel_relaxed(readl_relaxed(priv->base + IMSK) | bit, priv->base + IMSK);
151 }
152 
153 static void rzfive_irqc_unmask_irq_interrupt(struct rzg2l_irqc_priv *priv,
154 					     unsigned int hwirq)
155 {
156 	u32 bit = BIT(hwirq - IRQC_IRQ_START);
157 
158 	writel_relaxed(readl_relaxed(priv->base + IMSK) & ~bit, priv->base + IMSK);
159 }
160 
161 static void rzfive_irqc_mask_tint_interrupt(struct rzg2l_irqc_priv *priv,
162 					    unsigned int hwirq)
163 {
164 	u32 bit = BIT(hwirq - IRQC_TINT_START);
165 
166 	writel_relaxed(readl_relaxed(priv->base + TMSK) | bit, priv->base + TMSK);
167 }
168 
169 static void rzfive_irqc_unmask_tint_interrupt(struct rzg2l_irqc_priv *priv,
170 					      unsigned int hwirq)
171 {
172 	u32 bit = BIT(hwirq - IRQC_TINT_START);
173 
174 	writel_relaxed(readl_relaxed(priv->base + TMSK) & ~bit, priv->base + TMSK);
175 }
176 
177 static void rzfive_irqc_mask(struct irq_data *d)
178 {
179 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
180 	unsigned int hwirq = irqd_to_hwirq(d);
181 
182 	raw_spin_lock(&priv->lock);
183 	if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
184 		rzfive_irqc_mask_irq_interrupt(priv, hwirq);
185 	else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
186 		rzfive_irqc_mask_tint_interrupt(priv, hwirq);
187 	raw_spin_unlock(&priv->lock);
188 	irq_chip_mask_parent(d);
189 }
190 
191 static void rzfive_irqc_unmask(struct irq_data *d)
192 {
193 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
194 	unsigned int hwirq = irqd_to_hwirq(d);
195 
196 	raw_spin_lock(&priv->lock);
197 	if (hwirq >= IRQC_IRQ_START && hwirq <= IRQC_IRQ_COUNT)
198 		rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
199 	else if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ)
200 		rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
201 	raw_spin_unlock(&priv->lock);
202 	irq_chip_unmask_parent(d);
203 }
204 
205 static void rzfive_tint_irq_endisable(struct irq_data *d, bool enable)
206 {
207 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
208 	unsigned int hwirq = irqd_to_hwirq(d);
209 
210 	if (hwirq >= IRQC_TINT_START && hwirq < IRQC_NUM_IRQ) {
211 		u32 offset = hwirq - IRQC_TINT_START;
212 		u32 tssr_offset = TSSR_OFFSET(offset);
213 		u8 tssr_index = TSSR_INDEX(offset);
214 		u32 reg;
215 
216 		raw_spin_lock(&priv->lock);
217 		if (enable)
218 			rzfive_irqc_unmask_tint_interrupt(priv, hwirq);
219 		else
220 			rzfive_irqc_mask_tint_interrupt(priv, hwirq);
221 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
222 		if (enable)
223 			reg |= TIEN << TSSEL_SHIFT(tssr_offset);
224 		else
225 			reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
226 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
227 		raw_spin_unlock(&priv->lock);
228 	} else {
229 		raw_spin_lock(&priv->lock);
230 		if (enable)
231 			rzfive_irqc_unmask_irq_interrupt(priv, hwirq);
232 		else
233 			rzfive_irqc_mask_irq_interrupt(priv, hwirq);
234 		raw_spin_unlock(&priv->lock);
235 	}
236 }
237 
238 static void rzfive_irqc_irq_disable(struct irq_data *d)
239 {
240 	irq_chip_disable_parent(d);
241 	rzfive_tint_irq_endisable(d, false);
242 }
243 
244 static void rzfive_irqc_irq_enable(struct irq_data *d)
245 {
246 	rzfive_tint_irq_endisable(d, true);
247 	irq_chip_enable_parent(d);
248 }
249 
250 static void rzg2l_tint_irq_endisable(struct irq_data *d, bool enable)
251 {
252 	unsigned int hw_irq = irqd_to_hwirq(d);
253 
254 	if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ) {
255 		struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
256 		u32 offset = hw_irq - IRQC_TINT_START;
257 		u32 tssr_offset = TSSR_OFFSET(offset);
258 		u8 tssr_index = TSSR_INDEX(offset);
259 		u32 reg;
260 
261 		raw_spin_lock(&priv->lock);
262 		reg = readl_relaxed(priv->base + TSSR(tssr_index));
263 		if (enable)
264 			reg |= TIEN << TSSEL_SHIFT(tssr_offset);
265 		else
266 			reg &= ~(TIEN << TSSEL_SHIFT(tssr_offset));
267 		writel_relaxed(reg, priv->base + TSSR(tssr_index));
268 		raw_spin_unlock(&priv->lock);
269 	}
270 }
271 
272 static void rzg2l_irqc_irq_disable(struct irq_data *d)
273 {
274 	irq_chip_disable_parent(d);
275 	rzg2l_tint_irq_endisable(d, false);
276 }
277 
278 static void rzg2l_irqc_irq_enable(struct irq_data *d)
279 {
280 	rzg2l_tint_irq_endisable(d, true);
281 	irq_chip_enable_parent(d);
282 }
283 
284 static int rzg2l_irq_set_type(struct irq_data *d, unsigned int type)
285 {
286 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
287 	unsigned int hwirq = irqd_to_hwirq(d);
288 	u32 iitseln = hwirq - IRQC_IRQ_START;
289 	bool clear_irq_int = false;
290 	u16 sense, tmp;
291 
292 	switch (type & IRQ_TYPE_SENSE_MASK) {
293 	case IRQ_TYPE_LEVEL_LOW:
294 		sense = IITSR_IITSEL_LEVEL_LOW;
295 		break;
296 
297 	case IRQ_TYPE_EDGE_FALLING:
298 		sense = IITSR_IITSEL_EDGE_FALLING;
299 		clear_irq_int = true;
300 		break;
301 
302 	case IRQ_TYPE_EDGE_RISING:
303 		sense = IITSR_IITSEL_EDGE_RISING;
304 		clear_irq_int = true;
305 		break;
306 
307 	case IRQ_TYPE_EDGE_BOTH:
308 		sense = IITSR_IITSEL_EDGE_BOTH;
309 		clear_irq_int = true;
310 		break;
311 
312 	default:
313 		return -EINVAL;
314 	}
315 
316 	raw_spin_lock(&priv->lock);
317 	tmp = readl_relaxed(priv->base + IITSR);
318 	tmp &= ~IITSR_IITSEL_MASK(iitseln);
319 	tmp |= IITSR_IITSEL(iitseln, sense);
320 	if (clear_irq_int)
321 		rzg2l_clear_irq_int(priv, hwirq);
322 	writel_relaxed(tmp, priv->base + IITSR);
323 	raw_spin_unlock(&priv->lock);
324 
325 	return 0;
326 }
327 
328 static u32 rzg2l_disable_tint_and_set_tint_source(struct irq_data *d, struct rzg2l_irqc_priv *priv,
329 						  u32 reg, u32 tssr_offset, u8 tssr_index)
330 {
331 	u32 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
332 	u32 tien = reg & (TIEN << TSSEL_SHIFT(tssr_offset));
333 
334 	/* Clear the relevant byte in reg */
335 	reg &= ~(TSSEL_MASK << TSSEL_SHIFT(tssr_offset));
336 	/* Set TINT and leave TIEN clear */
337 	reg |= tint << TSSEL_SHIFT(tssr_offset);
338 	writel_relaxed(reg, priv->base + TSSR(tssr_index));
339 
340 	return reg | tien;
341 }
342 
343 static int rzg2l_tint_set_edge(struct irq_data *d, unsigned int type)
344 {
345 	struct rzg2l_irqc_priv *priv = irq_data_to_priv(d);
346 	unsigned int hwirq = irqd_to_hwirq(d);
347 	u32 titseln = hwirq - IRQC_TINT_START;
348 	u32 tssr_offset = TSSR_OFFSET(titseln);
349 	u8 tssr_index = TSSR_INDEX(titseln);
350 	u8 index, sense;
351 	u32 reg, tssr;
352 
353 	switch (type & IRQ_TYPE_SENSE_MASK) {
354 	case IRQ_TYPE_EDGE_RISING:
355 		sense = TITSR_TITSEL_EDGE_RISING;
356 		break;
357 
358 	case IRQ_TYPE_EDGE_FALLING:
359 		sense = TITSR_TITSEL_EDGE_FALLING;
360 		break;
361 
362 	default:
363 		return -EINVAL;
364 	}
365 
366 	index = 0;
367 	if (titseln >= TITSR0_MAX_INT) {
368 		titseln -= TITSR0_MAX_INT;
369 		index = 1;
370 	}
371 
372 	raw_spin_lock(&priv->lock);
373 	tssr = readl_relaxed(priv->base + TSSR(tssr_index));
374 	tssr = rzg2l_disable_tint_and_set_tint_source(d, priv, tssr, tssr_offset, tssr_index);
375 	reg = readl_relaxed(priv->base + TITSR(index));
376 	reg &= ~(IRQ_MASK << (titseln * TITSEL_WIDTH));
377 	reg |= sense << (titseln * TITSEL_WIDTH);
378 	writel_relaxed(reg, priv->base + TITSR(index));
379 	rzg2l_clear_tint_int(priv, hwirq);
380 	writel_relaxed(tssr, priv->base + TSSR(tssr_index));
381 	raw_spin_unlock(&priv->lock);
382 
383 	return 0;
384 }
385 
386 static int rzg2l_irqc_set_type(struct irq_data *d, unsigned int type)
387 {
388 	unsigned int hw_irq = irqd_to_hwirq(d);
389 	int ret = -EINVAL;
390 
391 	if (hw_irq >= IRQC_IRQ_START && hw_irq <= IRQC_IRQ_COUNT)
392 		ret = rzg2l_irq_set_type(d, type);
393 	else if (hw_irq >= IRQC_TINT_START && hw_irq < IRQC_NUM_IRQ)
394 		ret = rzg2l_tint_set_edge(d, type);
395 	if (ret)
396 		return ret;
397 
398 	return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
399 }
400 
401 static int rzg2l_irqc_irq_suspend(void)
402 {
403 	struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
404 	void __iomem *base = rzg2l_irqc_data->base;
405 
406 	cache->iitsr = readl_relaxed(base + IITSR);
407 	for (u8 i = 0; i < 2; i++)
408 		cache->titsr[i] = readl_relaxed(base + TITSR(i));
409 
410 	return 0;
411 }
412 
413 static void rzg2l_irqc_irq_resume(void)
414 {
415 	struct rzg2l_irqc_reg_cache *cache = &rzg2l_irqc_data->cache;
416 	void __iomem *base = rzg2l_irqc_data->base;
417 
418 	/*
419 	 * Restore only interrupt type. TSSRx will be restored at the
420 	 * request of pin controller to avoid spurious interrupts due
421 	 * to invalid PIN states.
422 	 */
423 	for (u8 i = 0; i < 2; i++)
424 		writel_relaxed(cache->titsr[i], base + TITSR(i));
425 	writel_relaxed(cache->iitsr, base + IITSR);
426 }
427 
428 static struct syscore_ops rzg2l_irqc_syscore_ops = {
429 	.suspend	= rzg2l_irqc_irq_suspend,
430 	.resume		= rzg2l_irqc_irq_resume,
431 };
432 
433 static const struct irq_chip rzg2l_irqc_chip = {
434 	.name			= "rzg2l-irqc",
435 	.irq_eoi		= rzg2l_irqc_eoi,
436 	.irq_mask		= irq_chip_mask_parent,
437 	.irq_unmask		= irq_chip_unmask_parent,
438 	.irq_disable		= rzg2l_irqc_irq_disable,
439 	.irq_enable		= rzg2l_irqc_irq_enable,
440 	.irq_get_irqchip_state	= irq_chip_get_parent_state,
441 	.irq_set_irqchip_state	= irq_chip_set_parent_state,
442 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
443 	.irq_set_type		= rzg2l_irqc_set_type,
444 	.irq_set_affinity	= irq_chip_set_affinity_parent,
445 	.flags			= IRQCHIP_MASK_ON_SUSPEND |
446 				  IRQCHIP_SET_TYPE_MASKED |
447 				  IRQCHIP_SKIP_SET_WAKE,
448 };
449 
450 static const struct irq_chip rzfive_irqc_chip = {
451 	.name			= "rzfive-irqc",
452 	.irq_eoi		= rzg2l_irqc_eoi,
453 	.irq_mask		= rzfive_irqc_mask,
454 	.irq_unmask		= rzfive_irqc_unmask,
455 	.irq_disable		= rzfive_irqc_irq_disable,
456 	.irq_enable		= rzfive_irqc_irq_enable,
457 	.irq_get_irqchip_state	= irq_chip_get_parent_state,
458 	.irq_set_irqchip_state	= irq_chip_set_parent_state,
459 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
460 	.irq_set_type		= rzg2l_irqc_set_type,
461 	.irq_set_affinity	= irq_chip_set_affinity_parent,
462 	.flags			= IRQCHIP_MASK_ON_SUSPEND |
463 				  IRQCHIP_SET_TYPE_MASKED |
464 				  IRQCHIP_SKIP_SET_WAKE,
465 };
466 
467 static int rzg2l_irqc_alloc(struct irq_domain *domain, unsigned int virq,
468 			    unsigned int nr_irqs, void *arg)
469 {
470 	struct rzg2l_irqc_priv *priv = domain->host_data;
471 	unsigned long tint = 0;
472 	irq_hw_number_t hwirq;
473 	unsigned int type;
474 	int ret;
475 
476 	ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
477 	if (ret)
478 		return ret;
479 
480 	/*
481 	 * For TINT interrupts ie where pinctrl driver is child of irqc domain
482 	 * the hwirq and TINT are encoded in fwspec->param[0].
483 	 * hwirq for TINT range from 9-40, hwirq is embedded 0-15 bits and TINT
484 	 * from 16-31 bits. TINT from the pinctrl driver needs to be programmed
485 	 * in IRQC registers to enable a given gpio pin as interrupt.
486 	 */
487 	if (hwirq > IRQC_IRQ_COUNT) {
488 		tint = TINT_EXTRACT_GPIOINT(hwirq);
489 		hwirq = TINT_EXTRACT_HWIRQ(hwirq);
490 
491 		if (hwirq < IRQC_TINT_START)
492 			return -EINVAL;
493 	}
494 
495 	if (hwirq > (IRQC_NUM_IRQ - 1))
496 		return -EINVAL;
497 
498 	ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, priv->irqchip,
499 					    (void *)(uintptr_t)tint);
500 	if (ret)
501 		return ret;
502 
503 	return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
504 }
505 
506 static const struct irq_domain_ops rzg2l_irqc_domain_ops = {
507 	.alloc = rzg2l_irqc_alloc,
508 	.free = irq_domain_free_irqs_common,
509 	.translate = irq_domain_translate_twocell,
510 };
511 
512 static int rzg2l_irqc_parse_interrupts(struct rzg2l_irqc_priv *priv,
513 				       struct device_node *np)
514 {
515 	struct of_phandle_args map;
516 	unsigned int i;
517 	int ret;
518 
519 	for (i = 0; i < IRQC_NUM_IRQ; i++) {
520 		ret = of_irq_parse_one(np, i, &map);
521 		if (ret)
522 			return ret;
523 		of_phandle_args_to_fwspec(np, map.args, map.args_count,
524 					  &priv->fwspec[i]);
525 	}
526 
527 	return 0;
528 }
529 
530 static int rzg2l_irqc_common_init(struct device_node *node, struct device_node *parent,
531 				  const struct irq_chip *irq_chip)
532 {
533 	struct irq_domain *irq_domain, *parent_domain;
534 	struct platform_device *pdev;
535 	struct reset_control *resetn;
536 	int ret;
537 
538 	pdev = of_find_device_by_node(node);
539 	if (!pdev)
540 		return -ENODEV;
541 
542 	parent_domain = irq_find_host(parent);
543 	if (!parent_domain) {
544 		dev_err(&pdev->dev, "cannot find parent domain\n");
545 		return -ENODEV;
546 	}
547 
548 	rzg2l_irqc_data = devm_kzalloc(&pdev->dev, sizeof(*rzg2l_irqc_data), GFP_KERNEL);
549 	if (!rzg2l_irqc_data)
550 		return -ENOMEM;
551 
552 	rzg2l_irqc_data->irqchip = irq_chip;
553 
554 	rzg2l_irqc_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
555 	if (IS_ERR(rzg2l_irqc_data->base))
556 		return PTR_ERR(rzg2l_irqc_data->base);
557 
558 	ret = rzg2l_irqc_parse_interrupts(rzg2l_irqc_data, node);
559 	if (ret) {
560 		dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
561 		return ret;
562 	}
563 
564 	resetn = devm_reset_control_get_exclusive(&pdev->dev, NULL);
565 	if (IS_ERR(resetn))
566 		return PTR_ERR(resetn);
567 
568 	ret = reset_control_deassert(resetn);
569 	if (ret) {
570 		dev_err(&pdev->dev, "failed to deassert resetn pin, %d\n", ret);
571 		return ret;
572 	}
573 
574 	pm_runtime_enable(&pdev->dev);
575 	ret = pm_runtime_resume_and_get(&pdev->dev);
576 	if (ret < 0) {
577 		dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
578 		goto pm_disable;
579 	}
580 
581 	raw_spin_lock_init(&rzg2l_irqc_data->lock);
582 
583 	irq_domain = irq_domain_add_hierarchy(parent_domain, 0, IRQC_NUM_IRQ,
584 					      node, &rzg2l_irqc_domain_ops,
585 					      rzg2l_irqc_data);
586 	if (!irq_domain) {
587 		dev_err(&pdev->dev, "failed to add irq domain\n");
588 		ret = -ENOMEM;
589 		goto pm_put;
590 	}
591 
592 	register_syscore_ops(&rzg2l_irqc_syscore_ops);
593 
594 	return 0;
595 
596 pm_put:
597 	pm_runtime_put(&pdev->dev);
598 pm_disable:
599 	pm_runtime_disable(&pdev->dev);
600 	reset_control_assert(resetn);
601 	return ret;
602 }
603 
604 static int __init rzg2l_irqc_init(struct device_node *node,
605 				  struct device_node *parent)
606 {
607 	return rzg2l_irqc_common_init(node, parent, &rzg2l_irqc_chip);
608 }
609 
610 static int __init rzfive_irqc_init(struct device_node *node,
611 				   struct device_node *parent)
612 {
613 	return rzg2l_irqc_common_init(node, parent, &rzfive_irqc_chip);
614 }
615 
616 IRQCHIP_PLATFORM_DRIVER_BEGIN(rzg2l_irqc)
617 IRQCHIP_MATCH("renesas,rzg2l-irqc", rzg2l_irqc_init)
618 IRQCHIP_MATCH("renesas,r9a07g043f-irqc", rzfive_irqc_init)
619 IRQCHIP_PLATFORM_DRIVER_END(rzg2l_irqc)
620 MODULE_AUTHOR("Lad Prabhakar <prabhakar.mahadev-lad.rj@bp.renesas.com>");
621 MODULE_DESCRIPTION("Renesas RZ/G2L IRQC Driver");
622