1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Renesas RZ/V2H(P) ICU Driver
4 *
5 * Based on irq-renesas-rzg2l.c
6 *
7 * Copyright (C) 2024 Renesas Electronics Corporation.
8 *
9 * Author: Fabrizio Castro <fabrizio.castro.jz@renesas.com>
10 */
11
12 #include <linux/bitfield.h>
13 #include <linux/cleanup.h>
14 #include <linux/err.h>
15 #include <linux/io.h>
16 #include <linux/irqchip.h>
17 #include <linux/irqchip/irq-renesas-rzv2h.h>
18 #include <linux/irqdomain.h>
19 #include <linux/of_platform.h>
20 #include <linux/pm_runtime.h>
21 #include <linux/reset.h>
22 #include <linux/spinlock.h>
23 #include <linux/syscore_ops.h>
24
25 /* DT "interrupts" indexes */
26 #define ICU_IRQ_START 1
27 #define ICU_IRQ_COUNT 16
28 #define ICU_TINT_START (ICU_IRQ_START + ICU_IRQ_COUNT)
29 #define ICU_TINT_COUNT 32
30 #define ICU_NUM_IRQ (ICU_TINT_START + ICU_TINT_COUNT)
31
32 /* Registers */
33 #define ICU_NSCNT 0x00
34 #define ICU_NSCLR 0x04
35 #define ICU_NITSR 0x08
36 #define ICU_ISCTR 0x10
37 #define ICU_ISCLR 0x14
38 #define ICU_IITSR 0x18
39 #define ICU_TSCTR 0x20
40 #define ICU_TSCLR 0x24
41 #define ICU_TITSR(k) (0x28 + (k) * 4)
42 #define ICU_TSSR(k) (0x30 + (k) * 4)
43 #define ICU_DMkSELy(k, y) (0x420 + (k) * 0x20 + (y) * 4)
44 #define ICU_DMACKSELk(k) (0x500 + (k) * 4)
45
46 /* NMI */
47 #define ICU_NMI_EDGE_FALLING 0
48 #define ICU_NMI_EDGE_RISING 1
49
50 #define ICU_NSCLR_NCLR BIT(0)
51
52 /* IRQ */
53 #define ICU_IRQ_LEVEL_LOW 0
54 #define ICU_IRQ_EDGE_FALLING 1
55 #define ICU_IRQ_EDGE_RISING 2
56 #define ICU_IRQ_EDGE_BOTH 3
57
58 #define ICU_IITSR_IITSEL_PREP(iitsel, n) ((iitsel) << ((n) * 2))
59 #define ICU_IITSR_IITSEL_GET(iitsr, n) (((iitsr) >> ((n) * 2)) & 0x03)
60 #define ICU_IITSR_IITSEL_MASK(n) ICU_IITSR_IITSEL_PREP(0x03, n)
61
62 /* TINT */
63 #define ICU_TINT_EDGE_RISING 0
64 #define ICU_TINT_EDGE_FALLING 1
65 #define ICU_TINT_LEVEL_HIGH 2
66 #define ICU_TINT_LEVEL_LOW 3
67
68 #define ICU_TSSR_TSSEL_PREP(tssel, n, field_width) ((tssel) << ((n) * (field_width)))
69 #define ICU_TSSR_TSSEL_MASK(n, field_width) \
70 ({\
71 typeof(field_width) (_field_width) = (field_width); \
72 ICU_TSSR_TSSEL_PREP((GENMASK(((_field_width) - 2), 0)), (n), _field_width); \
73 })
74
75 #define ICU_TSSR_TIEN(n, field_width) \
76 ({\
77 typeof(field_width) (_field_width) = (field_width); \
78 BIT((_field_width) - 1) << ((n) * (_field_width)); \
79 })
80
81 #define ICU_TITSR_K(tint_nr) ((tint_nr) / 16)
82 #define ICU_TITSR_TITSEL_N(tint_nr) ((tint_nr) % 16)
83 #define ICU_TITSR_TITSEL_PREP(titsel, n) ICU_IITSR_IITSEL_PREP(titsel, n)
84 #define ICU_TITSR_TITSEL_MASK(n) ICU_IITSR_IITSEL_MASK(n)
85 #define ICU_TITSR_TITSEL_GET(titsr, n) ICU_IITSR_IITSEL_GET(titsr, n)
86
87 #define ICU_TINT_EXTRACT_HWIRQ(x) FIELD_GET(GENMASK(15, 0), (x))
88 #define ICU_TINT_EXTRACT_GPIOINT(x) FIELD_GET(GENMASK(31, 16), (x))
89 #define ICU_RZG3E_TINT_OFFSET 0x800
90 #define ICU_RZG3E_TSSEL_MAX_VAL 0x8c
91 #define ICU_RZV2H_TSSEL_MAX_VAL 0x55
92
93 /**
94 * struct rzv2h_irqc_reg_cache - registers cache (necessary for suspend/resume)
95 * @nitsr: ICU_NITSR register
96 * @iitsr: ICU_IITSR register
97 * @titsr: ICU_TITSR registers
98 */
99 struct rzv2h_irqc_reg_cache {
100 u32 nitsr;
101 u32 iitsr;
102 u32 titsr[2];
103 };
104
105 /**
106 * struct rzv2h_hw_info - Interrupt Control Unit controller hardware info structure.
107 * @tssel_lut: TINT lookup table
108 * @t_offs: TINT offset
109 * @max_tssel: TSSEL max value
110 * @field_width: TSSR field width
111 */
112 struct rzv2h_hw_info {
113 const u8 *tssel_lut;
114 u16 t_offs;
115 u8 max_tssel;
116 u8 field_width;
117 };
118
119 /* DMAC */
120 #define ICU_DMAC_DkRQ_SEL_MASK GENMASK(9, 0)
121
122 #define ICU_DMAC_DMAREQ_SHIFT(up) ((up) * 16)
123 #define ICU_DMAC_DMAREQ_MASK(up) (ICU_DMAC_DkRQ_SEL_MASK \
124 << ICU_DMAC_DMAREQ_SHIFT(up))
125 #define ICU_DMAC_PREP_DMAREQ(sel, up) (FIELD_PREP(ICU_DMAC_DkRQ_SEL_MASK, (sel)) \
126 << ICU_DMAC_DMAREQ_SHIFT(up))
127
128 /**
129 * struct rzv2h_icu_priv - Interrupt Control Unit controller private data structure.
130 * @base: Controller's base address
131 * @fwspec: IRQ firmware specific data
132 * @lock: Lock to serialize access to hardware registers
133 * @info: Pointer to struct rzv2h_hw_info
134 * @cache: Registers cache for suspend/resume
135 */
136 static struct rzv2h_icu_priv {
137 void __iomem *base;
138 struct irq_fwspec fwspec[ICU_NUM_IRQ];
139 raw_spinlock_t lock;
140 const struct rzv2h_hw_info *info;
141 struct rzv2h_irqc_reg_cache cache;
142 } *rzv2h_icu_data;
143
rzv2h_icu_register_dma_req(struct platform_device * icu_dev,u8 dmac_index,u8 dmac_channel,u16 req_no)144 void rzv2h_icu_register_dma_req(struct platform_device *icu_dev, u8 dmac_index, u8 dmac_channel,
145 u16 req_no)
146 {
147 struct rzv2h_icu_priv *priv = platform_get_drvdata(icu_dev);
148 u32 icu_dmksely, dmareq, dmareq_mask;
149 u8 y, upper;
150
151 y = dmac_channel / 2;
152 upper = dmac_channel % 2;
153
154 dmareq = ICU_DMAC_PREP_DMAREQ(req_no, upper);
155 dmareq_mask = ICU_DMAC_DMAREQ_MASK(upper);
156
157 guard(raw_spinlock_irqsave)(&priv->lock);
158
159 icu_dmksely = readl(priv->base + ICU_DMkSELy(dmac_index, y));
160 icu_dmksely = (icu_dmksely & ~dmareq_mask) | dmareq;
161 writel(icu_dmksely, priv->base + ICU_DMkSELy(dmac_index, y));
162 }
163 EXPORT_SYMBOL_GPL(rzv2h_icu_register_dma_req);
164
irq_data_to_priv(struct irq_data * data)165 static inline struct rzv2h_icu_priv *irq_data_to_priv(struct irq_data *data)
166 {
167 return data->domain->host_data;
168 }
169
rzv2h_icu_eoi(struct irq_data * d)170 static void rzv2h_icu_eoi(struct irq_data *d)
171 {
172 struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
173 unsigned int hw_irq = irqd_to_hwirq(d);
174 unsigned int tintirq_nr;
175 u32 bit;
176
177 scoped_guard(raw_spinlock, &priv->lock) {
178 if (hw_irq >= ICU_TINT_START) {
179 tintirq_nr = hw_irq - ICU_TINT_START;
180 bit = BIT(tintirq_nr);
181 if (!irqd_is_level_type(d))
182 writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
183 } else if (hw_irq >= ICU_IRQ_START) {
184 tintirq_nr = hw_irq - ICU_IRQ_START;
185 bit = BIT(tintirq_nr);
186 if (!irqd_is_level_type(d))
187 writel_relaxed(bit, priv->base + ICU_ISCLR);
188 } else {
189 writel_relaxed(ICU_NSCLR_NCLR, priv->base + ICU_NSCLR);
190 }
191 }
192
193 irq_chip_eoi_parent(d);
194 }
195
rzv2h_tint_irq_endisable(struct irq_data * d,bool enable)196 static void rzv2h_tint_irq_endisable(struct irq_data *d, bool enable)
197 {
198 struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
199 unsigned int hw_irq = irqd_to_hwirq(d);
200 u32 tint_nr, tssel_n, k, tssr;
201 u8 nr_tint;
202
203 if (hw_irq < ICU_TINT_START)
204 return;
205
206 tint_nr = hw_irq - ICU_TINT_START;
207 nr_tint = 32 / priv->info->field_width;
208 k = tint_nr / nr_tint;
209 tssel_n = tint_nr % nr_tint;
210
211 guard(raw_spinlock)(&priv->lock);
212 tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(k));
213 if (enable)
214 tssr |= ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
215 else
216 tssr &= ~ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
217 writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(k));
218
219 /*
220 * A glitch in the edge detection circuit can cause a spurious
221 * interrupt. Clear the status flag after setting the ICU_TSSRk
222 * registers, which is recommended by the hardware manual as a
223 * countermeasure.
224 */
225 writel_relaxed(BIT(tint_nr), priv->base + priv->info->t_offs + ICU_TSCLR);
226 }
227
rzv2h_icu_irq_disable(struct irq_data * d)228 static void rzv2h_icu_irq_disable(struct irq_data *d)
229 {
230 irq_chip_disable_parent(d);
231 rzv2h_tint_irq_endisable(d, false);
232 }
233
rzv2h_icu_irq_enable(struct irq_data * d)234 static void rzv2h_icu_irq_enable(struct irq_data *d)
235 {
236 rzv2h_tint_irq_endisable(d, true);
237 irq_chip_enable_parent(d);
238 }
239
rzv2h_nmi_set_type(struct irq_data * d,unsigned int type)240 static int rzv2h_nmi_set_type(struct irq_data *d, unsigned int type)
241 {
242 struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
243 u32 sense;
244
245 switch (type & IRQ_TYPE_SENSE_MASK) {
246 case IRQ_TYPE_EDGE_FALLING:
247 sense = ICU_NMI_EDGE_FALLING;
248 break;
249
250 case IRQ_TYPE_EDGE_RISING:
251 sense = ICU_NMI_EDGE_RISING;
252 break;
253
254 default:
255 return -EINVAL;
256 }
257
258 writel_relaxed(sense, priv->base + ICU_NITSR);
259
260 return 0;
261 }
262
rzv2h_clear_irq_int(struct rzv2h_icu_priv * priv,unsigned int hwirq)263 static void rzv2h_clear_irq_int(struct rzv2h_icu_priv *priv, unsigned int hwirq)
264 {
265 unsigned int irq_nr = hwirq - ICU_IRQ_START;
266 u32 isctr, iitsr, iitsel;
267 u32 bit = BIT(irq_nr);
268
269 isctr = readl_relaxed(priv->base + ICU_ISCTR);
270 iitsr = readl_relaxed(priv->base + ICU_IITSR);
271 iitsel = ICU_IITSR_IITSEL_GET(iitsr, irq_nr);
272
273 /*
274 * When level sensing is used, the interrupt flag gets automatically cleared when the
275 * interrupt signal is de-asserted by the source of the interrupt request, therefore clear
276 * the interrupt only for edge triggered interrupts.
277 */
278 if ((isctr & bit) && (iitsel != ICU_IRQ_LEVEL_LOW))
279 writel_relaxed(bit, priv->base + ICU_ISCLR);
280 }
281
rzv2h_irq_set_type(struct irq_data * d,unsigned int type)282 static int rzv2h_irq_set_type(struct irq_data *d, unsigned int type)
283 {
284 struct rzv2h_icu_priv *priv = irq_data_to_priv(d);
285 unsigned int hwirq = irqd_to_hwirq(d);
286 u32 irq_nr = hwirq - ICU_IRQ_START;
287 u32 iitsr, sense;
288
289 switch (type & IRQ_TYPE_SENSE_MASK) {
290 case IRQ_TYPE_LEVEL_LOW:
291 sense = ICU_IRQ_LEVEL_LOW;
292 break;
293
294 case IRQ_TYPE_EDGE_FALLING:
295 sense = ICU_IRQ_EDGE_FALLING;
296 break;
297
298 case IRQ_TYPE_EDGE_RISING:
299 sense = ICU_IRQ_EDGE_RISING;
300 break;
301
302 case IRQ_TYPE_EDGE_BOTH:
303 sense = ICU_IRQ_EDGE_BOTH;
304 break;
305
306 default:
307 return -EINVAL;
308 }
309
310 guard(raw_spinlock)(&priv->lock);
311 iitsr = readl_relaxed(priv->base + ICU_IITSR);
312 iitsr &= ~ICU_IITSR_IITSEL_MASK(irq_nr);
313 iitsr |= ICU_IITSR_IITSEL_PREP(sense, irq_nr);
314 rzv2h_clear_irq_int(priv, hwirq);
315 writel_relaxed(iitsr, priv->base + ICU_IITSR);
316
317 return 0;
318 }
319
rzv2h_clear_tint_int(struct rzv2h_icu_priv * priv,unsigned int hwirq)320 static void rzv2h_clear_tint_int(struct rzv2h_icu_priv *priv, unsigned int hwirq)
321 {
322 unsigned int tint_nr = hwirq - ICU_TINT_START;
323 int titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
324 u32 tsctr, titsr, titsel;
325 u32 bit = BIT(tint_nr);
326 int k = tint_nr / 16;
327
328 tsctr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSCTR);
329 titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(k));
330 titsel = ICU_TITSR_TITSEL_GET(titsr, titsel_n);
331
332 /*
333 * Writing 1 to the corresponding flag from register ICU_TSCTR only has effect if
334 * TSTATn = 1b and if it's a rising edge or a falling edge interrupt.
335 */
336 if ((tsctr & bit) && ((titsel == ICU_TINT_EDGE_RISING) ||
337 (titsel == ICU_TINT_EDGE_FALLING)))
338 writel_relaxed(bit, priv->base + priv->info->t_offs + ICU_TSCLR);
339 }
340
rzv2h_tint_set_type(struct irq_data * d,unsigned int type)341 static int rzv2h_tint_set_type(struct irq_data *d, unsigned int type)
342 {
343 u32 titsr, titsr_k, titsel_n, tien;
344 struct rzv2h_icu_priv *priv;
345 u32 tssr, tssr_k, tssel_n;
346 u32 titsr_cur, tssr_cur;
347 unsigned int hwirq;
348 u32 tint, sense;
349 int tint_nr;
350 u8 nr_tint;
351
352 switch (type & IRQ_TYPE_SENSE_MASK) {
353 case IRQ_TYPE_LEVEL_LOW:
354 sense = ICU_TINT_LEVEL_LOW;
355 break;
356
357 case IRQ_TYPE_LEVEL_HIGH:
358 sense = ICU_TINT_LEVEL_HIGH;
359 break;
360
361 case IRQ_TYPE_EDGE_RISING:
362 sense = ICU_TINT_EDGE_RISING;
363 break;
364
365 case IRQ_TYPE_EDGE_FALLING:
366 sense = ICU_TINT_EDGE_FALLING;
367 break;
368
369 default:
370 return -EINVAL;
371 }
372
373 priv = irq_data_to_priv(d);
374 tint = (u32)(uintptr_t)irq_data_get_irq_chip_data(d);
375 if (tint > priv->info->max_tssel)
376 return -EINVAL;
377
378 if (priv->info->tssel_lut)
379 tint = priv->info->tssel_lut[tint];
380
381 hwirq = irqd_to_hwirq(d);
382 tint_nr = hwirq - ICU_TINT_START;
383
384 nr_tint = 32 / priv->info->field_width;
385 tssr_k = tint_nr / nr_tint;
386 tssel_n = tint_nr % nr_tint;
387 tien = ICU_TSSR_TIEN(tssel_n, priv->info->field_width);
388
389 titsr_k = ICU_TITSR_K(tint_nr);
390 titsel_n = ICU_TITSR_TITSEL_N(tint_nr);
391
392 guard(raw_spinlock)(&priv->lock);
393
394 tssr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
395 titsr = readl_relaxed(priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
396
397 tssr_cur = field_get(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width), tssr);
398 titsr_cur = field_get(ICU_TITSR_TITSEL_MASK(titsel_n), titsr);
399 if (tssr_cur == tint && titsr_cur == sense)
400 return 0;
401
402 tssr &= ~(ICU_TSSR_TSSEL_MASK(tssel_n, priv->info->field_width) | tien);
403 tssr |= ICU_TSSR_TSSEL_PREP(tint, tssel_n, priv->info->field_width);
404
405 writel_relaxed(tssr, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
406
407 titsr &= ~ICU_TITSR_TITSEL_MASK(titsel_n);
408 titsr |= ICU_TITSR_TITSEL_PREP(sense, titsel_n);
409
410 writel_relaxed(titsr, priv->base + priv->info->t_offs + ICU_TITSR(titsr_k));
411
412 rzv2h_clear_tint_int(priv, hwirq);
413
414 writel_relaxed(tssr | tien, priv->base + priv->info->t_offs + ICU_TSSR(tssr_k));
415
416 return 0;
417 }
418
rzv2h_icu_set_type(struct irq_data * d,unsigned int type)419 static int rzv2h_icu_set_type(struct irq_data *d, unsigned int type)
420 {
421 unsigned int hw_irq = irqd_to_hwirq(d);
422 int ret;
423
424 if (hw_irq >= ICU_TINT_START)
425 ret = rzv2h_tint_set_type(d, type);
426 else if (hw_irq >= ICU_IRQ_START)
427 ret = rzv2h_irq_set_type(d, type);
428 else
429 ret = rzv2h_nmi_set_type(d, type);
430
431 if (ret)
432 return ret;
433
434 return irq_chip_set_type_parent(d, IRQ_TYPE_LEVEL_HIGH);
435 }
436
rzv2h_irqc_irq_suspend(void * data)437 static int rzv2h_irqc_irq_suspend(void *data)
438 {
439 struct rzv2h_irqc_reg_cache *cache = &rzv2h_icu_data->cache;
440 void __iomem *base = rzv2h_icu_data->base;
441
442 cache->nitsr = readl_relaxed(base + ICU_NITSR);
443 cache->iitsr = readl_relaxed(base + ICU_IITSR);
444 for (unsigned int i = 0; i < 2; i++)
445 cache->titsr[i] = readl_relaxed(base + rzv2h_icu_data->info->t_offs + ICU_TITSR(i));
446
447 return 0;
448 }
449
rzv2h_irqc_irq_resume(void * data)450 static void rzv2h_irqc_irq_resume(void *data)
451 {
452 struct rzv2h_irqc_reg_cache *cache = &rzv2h_icu_data->cache;
453 void __iomem *base = rzv2h_icu_data->base;
454
455 /*
456 * Restore only interrupt type. TSSRx will be restored at the
457 * request of pin controller to avoid spurious interrupts due
458 * to invalid PIN states.
459 */
460 for (unsigned int i = 0; i < 2; i++)
461 writel_relaxed(cache->titsr[i], base + rzv2h_icu_data->info->t_offs + ICU_TITSR(i));
462 writel_relaxed(cache->iitsr, base + ICU_IITSR);
463 writel_relaxed(cache->nitsr, base + ICU_NITSR);
464 }
465
466 static const struct syscore_ops rzv2h_irqc_syscore_ops = {
467 .suspend = rzv2h_irqc_irq_suspend,
468 .resume = rzv2h_irqc_irq_resume,
469 };
470
471 static struct syscore rzv2h_irqc_syscore = {
472 .ops = &rzv2h_irqc_syscore_ops,
473 };
474
475 static const struct irq_chip rzv2h_icu_chip = {
476 .name = "rzv2h-icu",
477 .irq_eoi = rzv2h_icu_eoi,
478 .irq_mask = irq_chip_mask_parent,
479 .irq_unmask = irq_chip_unmask_parent,
480 .irq_disable = rzv2h_icu_irq_disable,
481 .irq_enable = rzv2h_icu_irq_enable,
482 .irq_get_irqchip_state = irq_chip_get_parent_state,
483 .irq_set_irqchip_state = irq_chip_set_parent_state,
484 .irq_retrigger = irq_chip_retrigger_hierarchy,
485 .irq_set_type = rzv2h_icu_set_type,
486 .irq_set_affinity = irq_chip_set_affinity_parent,
487 .flags = IRQCHIP_MASK_ON_SUSPEND |
488 IRQCHIP_SET_TYPE_MASKED |
489 IRQCHIP_SKIP_SET_WAKE,
490 };
491
rzv2h_icu_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs,void * arg)492 static int rzv2h_icu_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs,
493 void *arg)
494 {
495 struct rzv2h_icu_priv *priv = domain->host_data;
496 unsigned long tint = 0;
497 irq_hw_number_t hwirq;
498 unsigned int type;
499 int ret;
500
501 ret = irq_domain_translate_twocell(domain, arg, &hwirq, &type);
502 if (ret)
503 return ret;
504
505 /*
506 * For TINT interrupts the hwirq and TINT are encoded in
507 * fwspec->param[0].
508 * hwirq is embedded in bits 0-15.
509 * TINT is embedded in bits 16-31.
510 */
511 if (hwirq >= ICU_TINT_START) {
512 tint = ICU_TINT_EXTRACT_GPIOINT(hwirq);
513 hwirq = ICU_TINT_EXTRACT_HWIRQ(hwirq);
514
515 if (hwirq < ICU_TINT_START)
516 return -EINVAL;
517 }
518
519 if (hwirq > (ICU_NUM_IRQ - 1))
520 return -EINVAL;
521
522 ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &rzv2h_icu_chip,
523 (void *)(uintptr_t)tint);
524 if (ret)
525 return ret;
526
527 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &priv->fwspec[hwirq]);
528 }
529
530 static const struct irq_domain_ops rzv2h_icu_domain_ops = {
531 .alloc = rzv2h_icu_alloc,
532 .free = irq_domain_free_irqs_common,
533 .translate = irq_domain_translate_twocell,
534 };
535
rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv * priv,struct device_node * np)536 static int rzv2h_icu_parse_interrupts(struct rzv2h_icu_priv *priv, struct device_node *np)
537 {
538 struct of_phandle_args map;
539 unsigned int i;
540 int ret;
541
542 for (i = 0; i < ICU_NUM_IRQ; i++) {
543 ret = of_irq_parse_one(np, i, &map);
544 if (ret)
545 return ret;
546
547 of_phandle_args_to_fwspec(np, map.args, map.args_count, &priv->fwspec[i]);
548 }
549
550 return 0;
551 }
552
rzv2h_icu_probe_common(struct platform_device * pdev,struct device_node * parent,const struct rzv2h_hw_info * hw_info)553 static int rzv2h_icu_probe_common(struct platform_device *pdev, struct device_node *parent,
554 const struct rzv2h_hw_info *hw_info)
555 {
556 struct irq_domain *irq_domain, *parent_domain;
557 struct device_node *node = pdev->dev.of_node;
558 struct reset_control *resetn;
559 int ret;
560
561 parent_domain = irq_find_host(parent);
562 if (!parent_domain) {
563 dev_err(&pdev->dev, "cannot find parent domain\n");
564 return -ENODEV;
565 }
566
567 rzv2h_icu_data = devm_kzalloc(&pdev->dev, sizeof(*rzv2h_icu_data), GFP_KERNEL);
568 if (!rzv2h_icu_data)
569 return -ENOMEM;
570
571 platform_set_drvdata(pdev, rzv2h_icu_data);
572
573 rzv2h_icu_data->base = devm_of_iomap(&pdev->dev, pdev->dev.of_node, 0, NULL);
574 if (IS_ERR(rzv2h_icu_data->base))
575 return PTR_ERR(rzv2h_icu_data->base);
576
577 ret = rzv2h_icu_parse_interrupts(rzv2h_icu_data, node);
578 if (ret) {
579 dev_err(&pdev->dev, "cannot parse interrupts: %d\n", ret);
580 return ret;
581 }
582
583 resetn = devm_reset_control_get_exclusive_deasserted(&pdev->dev, NULL);
584 if (IS_ERR(resetn)) {
585 ret = PTR_ERR(resetn);
586 dev_err(&pdev->dev, "failed to acquire deasserted reset: %d\n", ret);
587 return ret;
588 }
589
590 ret = devm_pm_runtime_enable(&pdev->dev);
591 if (ret < 0) {
592 dev_err(&pdev->dev, "devm_pm_runtime_enable failed, %d\n", ret);
593 return ret;
594 }
595
596 ret = pm_runtime_resume_and_get(&pdev->dev);
597 if (ret < 0) {
598 dev_err(&pdev->dev, "pm_runtime_resume_and_get failed: %d\n", ret);
599 return ret;
600 }
601
602 raw_spin_lock_init(&rzv2h_icu_data->lock);
603
604 irq_domain = irq_domain_create_hierarchy(parent_domain, 0, ICU_NUM_IRQ,
605 dev_fwnode(&pdev->dev), &rzv2h_icu_domain_ops,
606 rzv2h_icu_data);
607 if (!irq_domain) {
608 dev_err(&pdev->dev, "failed to add irq domain\n");
609 ret = -ENOMEM;
610 goto pm_put;
611 }
612
613 rzv2h_icu_data->info = hw_info;
614
615 register_syscore(&rzv2h_irqc_syscore);
616
617 /*
618 * coccicheck complains about a missing put_device call before returning, but it's a false
619 * positive. We still need &pdev->dev after successfully returning from this function.
620 */
621 return 0;
622
623 pm_put:
624 pm_runtime_put(&pdev->dev);
625
626 return ret;
627 }
628
629 /* Mapping based on port index on Table 4.2-6 and TSSEL bits on Table 4.6-4 */
630 static const u8 rzg3e_tssel_lut[] = {
631 81, 82, 83, 84, 85, 86, 87, 88, /* P00-P07 */
632 89, 90, 91, 92, 93, 94, 95, 96, /* P10-P17 */
633 111, 112, /* P20-P21 */
634 97, 98, 99, 100, 101, 102, 103, 104, /* P30-P37 */
635 105, 106, 107, 108, 109, 110, /* P40-P45 */
636 113, 114, 115, 116, 117, 118, 119, /* P50-P56 */
637 120, 121, 122, 123, 124, 125, 126, /* P60-P66 */
638 127, 128, 129, 130, 131, 132, 133, 134, /* P70-P77 */
639 135, 136, 137, 138, 139, 140, /* P80-P85 */
640 43, 44, 45, 46, 47, 48, 49, 50, /* PA0-PA7 */
641 51, 52, 53, 54, 55, 56, 57, 58, /* PB0-PB7 */
642 59, 60, 61, /* PC0-PC2 */
643 62, 63, 64, 65, 66, 67, 68, 69, /* PD0-PD7 */
644 70, 71, 72, 73, 74, 75, 76, 77, /* PE0-PE7 */
645 78, 79, 80, /* PF0-PF2 */
646 25, 26, 27, 28, 29, 30, 31, 32, /* PG0-PG7 */
647 33, 34, 35, 36, 37, 38, /* PH0-PH5 */
648 4, 5, 6, 7, 8, /* PJ0-PJ4 */
649 39, 40, 41, 42, /* PK0-PK3 */
650 9, 10, 11, 12, 21, 22, 23, 24, /* PL0-PL7 */
651 13, 14, 15, 16, 17, 18, 19, 20, /* PM0-PM7 */
652 0, 1, 2, 3 /* PS0-PS3 */
653 };
654
655 static const struct rzv2h_hw_info rzg3e_hw_params = {
656 .tssel_lut = rzg3e_tssel_lut,
657 .t_offs = ICU_RZG3E_TINT_OFFSET,
658 .max_tssel = ICU_RZG3E_TSSEL_MAX_VAL,
659 .field_width = 16,
660 };
661
662 static const struct rzv2h_hw_info rzv2h_hw_params = {
663 .t_offs = 0,
664 .max_tssel = ICU_RZV2H_TSSEL_MAX_VAL,
665 .field_width = 8,
666 };
667
rzg3e_icu_probe(struct platform_device * pdev,struct device_node * parent)668 static int rzg3e_icu_probe(struct platform_device *pdev, struct device_node *parent)
669 {
670 return rzv2h_icu_probe_common(pdev, parent, &rzg3e_hw_params);
671 }
672
rzv2h_icu_probe(struct platform_device * pdev,struct device_node * parent)673 static int rzv2h_icu_probe(struct platform_device *pdev, struct device_node *parent)
674 {
675 return rzv2h_icu_probe_common(pdev, parent, &rzv2h_hw_params);
676 }
677
678 IRQCHIP_PLATFORM_DRIVER_BEGIN(rzv2h_icu)
679 IRQCHIP_MATCH("renesas,r9a09g047-icu", rzg3e_icu_probe)
680 IRQCHIP_MATCH("renesas,r9a09g056-icu", rzv2h_icu_probe)
681 IRQCHIP_MATCH("renesas,r9a09g057-icu", rzv2h_icu_probe)
682 IRQCHIP_PLATFORM_DRIVER_END(rzv2h_icu)
683 MODULE_AUTHOR("Fabrizio Castro <fabrizio.castro.jz@renesas.com>");
684 MODULE_DESCRIPTION("Renesas RZ/V2H(P) ICU Driver");
685