1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2014-2025 MediaTek Inc.
3
4 /*
5 * Library for MediaTek External Interrupt Support
6 *
7 * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8 * Sean Wang <sean.wang@mediatek.com>
9 * Hao Chang <ot_chhao.chang@mediatek.com>
10 * Qingliang Li <qingliang.li@mediatek.com>
11 *
12 */
13
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/gpio/driver.h>
17 #include <linux/io.h>
18 #include <linux/irqchip/chained_irq.h>
19 #include <linux/irqdomain.h>
20 #include <linux/module.h>
21 #include <linux/of_irq.h>
22 #include <linux/platform_device.h>
23
24 #include "mtk-eint.h"
25
26 #define MTK_EINT_EDGE_SENSITIVE 0
27 #define MTK_EINT_LEVEL_SENSITIVE 1
28 #define MTK_EINT_DBNC_SET_DBNC_BITS 4
29 #define MTK_EINT_DBNC_MAX 16
30 #define MTK_EINT_DBNC_RST_BIT (0x1 << 1)
31 #define MTK_EINT_DBNC_SET_EN (0x1 << 0)
32
33 static const struct mtk_eint_regs mtk_generic_eint_regs = {
34 .stat = 0x000,
35 .ack = 0x040,
36 .mask = 0x080,
37 .mask_set = 0x0c0,
38 .mask_clr = 0x100,
39 .sens = 0x140,
40 .sens_set = 0x180,
41 .sens_clr = 0x1c0,
42 .soft = 0x200,
43 .soft_set = 0x240,
44 .soft_clr = 0x280,
45 .pol = 0x300,
46 .pol_set = 0x340,
47 .pol_clr = 0x380,
48 .dom_en = 0x400,
49 .dbnc_ctrl = 0x500,
50 .dbnc_set = 0x600,
51 .dbnc_clr = 0x700,
52 };
53
54 const unsigned int debounce_time_mt2701[] = {
55 500, 1000, 16000, 32000, 64000, 128000, 256000, 0
56 };
57 EXPORT_SYMBOL_GPL(debounce_time_mt2701);
58
59 const unsigned int debounce_time_mt6765[] = {
60 125, 250, 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
61 };
62 EXPORT_SYMBOL_GPL(debounce_time_mt6765);
63
64 const unsigned int debounce_time_mt6795[] = {
65 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
66 };
67 EXPORT_SYMBOL_GPL(debounce_time_mt6795);
68
69 const unsigned int debounce_time_mt6878[] = {
70 156, 313, 625, 1250, 20000, 40000, 80000, 160000, 320000, 640000, 0
71 };
72 EXPORT_SYMBOL_GPL(debounce_time_mt6878);
73
mtk_eint_get_offset(struct mtk_eint * eint,unsigned int eint_num,unsigned int offset)74 static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
75 unsigned int eint_num,
76 unsigned int offset)
77 {
78 unsigned int idx = eint->pins[eint_num].index;
79 unsigned int inst = eint->pins[eint_num].instance;
80 void __iomem *reg;
81
82 reg = eint->base[inst] + offset + (idx / 32 * 4);
83
84 return reg;
85 }
86
mtk_eint_can_en_debounce(struct mtk_eint * eint,unsigned int eint_num)87 static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
88 unsigned int eint_num)
89 {
90 unsigned int sens;
91 unsigned int bit = BIT(eint->pins[eint_num].index % 32);
92 void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
93 eint->regs->sens);
94
95 if (readl(reg) & bit)
96 sens = MTK_EINT_LEVEL_SENSITIVE;
97 else
98 sens = MTK_EINT_EDGE_SENSITIVE;
99
100 if (eint->pins[eint_num].debounce && sens != MTK_EINT_EDGE_SENSITIVE)
101 return 1;
102 else
103 return 0;
104 }
105
mtk_eint_flip_edge(struct mtk_eint * eint,int hwirq)106 static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
107 {
108 int start_level, curr_level;
109 unsigned int reg_offset;
110 unsigned int mask = BIT(eint->pins[hwirq].index & 0x1f);
111 unsigned int port = (eint->pins[hwirq].index >> 5) & eint->hw->port_mask;
112 void __iomem *reg = eint->base[eint->pins[hwirq].instance] + (port << 2);
113
114 curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
115
116 do {
117 start_level = curr_level;
118 if (start_level)
119 reg_offset = eint->regs->pol_clr;
120 else
121 reg_offset = eint->regs->pol_set;
122 writel(mask, reg + reg_offset);
123
124 curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
125 hwirq);
126 } while (start_level != curr_level);
127
128 return start_level;
129 }
130
mtk_eint_mask(struct irq_data * d)131 static void mtk_eint_mask(struct irq_data *d)
132 {
133 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
134 unsigned int idx = eint->pins[d->hwirq].index;
135 unsigned int inst = eint->pins[d->hwirq].instance;
136 unsigned int mask = BIT(idx & 0x1f);
137 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
138 eint->regs->mask_set);
139
140 eint->cur_mask[inst][idx >> 5] &= ~mask;
141
142 writel(mask, reg);
143 }
144
mtk_eint_unmask(struct irq_data * d)145 static void mtk_eint_unmask(struct irq_data *d)
146 {
147 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
148 unsigned int idx = eint->pins[d->hwirq].index;
149 unsigned int inst = eint->pins[d->hwirq].instance;
150 unsigned int mask = BIT(idx & 0x1f);
151 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
152 eint->regs->mask_clr);
153
154 eint->cur_mask[inst][idx >> 5] |= mask;
155
156 writel(mask, reg);
157
158 if (eint->pins[d->hwirq].dual_edge)
159 mtk_eint_flip_edge(eint, d->hwirq);
160 }
161
mtk_eint_get_mask(struct mtk_eint * eint,unsigned int eint_num)162 static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
163 unsigned int eint_num)
164 {
165 unsigned int bit = BIT(eint->pins[eint_num].index % 32);
166 void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
167 eint->regs->mask);
168
169 return !!(readl(reg) & bit);
170 }
171
mtk_eint_ack(struct irq_data * d)172 static void mtk_eint_ack(struct irq_data *d)
173 {
174 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
175 unsigned int mask = BIT(eint->pins[d->hwirq].index & 0x1f);
176 void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
177 eint->regs->ack);
178
179 writel(mask, reg);
180 }
181
mtk_eint_set_type(struct irq_data * d,unsigned int type)182 static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
183 {
184 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
185 bool masked;
186 unsigned int mask = BIT(eint->pins[d->hwirq].index & 0x1f);
187 void __iomem *reg;
188
189 if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
190 ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
191 dev_err(eint->dev,
192 "Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
193 d->irq, d->hwirq, type);
194 return -EINVAL;
195 }
196
197 if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
198 eint->pins[d->hwirq].dual_edge = 1;
199 else
200 eint->pins[d->hwirq].dual_edge = 0;
201
202 if (!mtk_eint_get_mask(eint, d->hwirq)) {
203 mtk_eint_mask(d);
204 masked = false;
205 } else {
206 masked = true;
207 }
208
209 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
210 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
211 writel(mask, reg);
212 } else {
213 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
214 writel(mask, reg);
215 }
216
217 if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
218 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
219 writel(mask, reg);
220 } else {
221 reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
222 writel(mask, reg);
223 }
224
225 mtk_eint_ack(d);
226 if (!masked)
227 mtk_eint_unmask(d);
228
229 return 0;
230 }
231
mtk_eint_irq_set_wake(struct irq_data * d,unsigned int on)232 static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
233 {
234 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
235 unsigned int idx = eint->pins[d->hwirq].index;
236 unsigned int inst = eint->pins[d->hwirq].instance;
237 unsigned int shift = idx & 0x1f;
238 unsigned int port = idx >> 5;
239
240 if (on)
241 eint->wake_mask[inst][port] |= BIT(shift);
242 else
243 eint->wake_mask[inst][port] &= ~BIT(shift);
244
245 return 0;
246 }
247
mtk_eint_chip_write_mask(const struct mtk_eint * eint,void __iomem * base,unsigned int ** buf)248 static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
249 void __iomem *base, unsigned int **buf)
250 {
251 int inst, port, port_num;
252 void __iomem *reg;
253
254 for (inst = 0; inst < eint->nbase; inst++) {
255 port_num = DIV_ROUND_UP(eint->base_pin_num[inst], 32);
256 for (port = 0; port < port_num; port++) {
257 reg = eint->base[inst] + (port << 2);
258 writel_relaxed(~buf[inst][port], reg + eint->regs->mask_set);
259 writel_relaxed(buf[inst][port], reg + eint->regs->mask_clr);
260 }
261 }
262 }
263
mtk_eint_irq_request_resources(struct irq_data * d)264 static int mtk_eint_irq_request_resources(struct irq_data *d)
265 {
266 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
267 struct gpio_chip *gpio_c;
268 unsigned int gpio_n;
269 int err;
270
271 err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
272 &gpio_n, &gpio_c);
273 if (err < 0) {
274 dev_err(eint->dev, "Can not find pin\n");
275 return err;
276 }
277
278 err = gpiochip_lock_as_irq(gpio_c, gpio_n);
279 if (err < 0) {
280 dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
281 irqd_to_hwirq(d));
282 return err;
283 }
284
285 err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
286 if (err < 0) {
287 dev_err(eint->dev, "Can not eint mode\n");
288 return err;
289 }
290
291 return 0;
292 }
293
mtk_eint_irq_release_resources(struct irq_data * d)294 static void mtk_eint_irq_release_resources(struct irq_data *d)
295 {
296 struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
297 struct gpio_chip *gpio_c;
298 unsigned int gpio_n;
299
300 eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
301 &gpio_c);
302
303 gpiochip_unlock_as_irq(gpio_c, gpio_n);
304 }
305
306 static struct irq_chip mtk_eint_irq_chip = {
307 .name = "mt-eint",
308 .irq_disable = mtk_eint_mask,
309 .irq_mask = mtk_eint_mask,
310 .irq_unmask = mtk_eint_unmask,
311 .irq_ack = mtk_eint_ack,
312 .irq_set_type = mtk_eint_set_type,
313 .irq_set_wake = mtk_eint_irq_set_wake,
314 .irq_request_resources = mtk_eint_irq_request_resources,
315 .irq_release_resources = mtk_eint_irq_release_resources,
316 };
317
mtk_eint_hw_init(struct mtk_eint * eint)318 static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
319 {
320 void __iomem *dom_reg, *mask_reg;
321 unsigned int i, j;
322
323 for (i = 0; i < eint->nbase; i++) {
324 dom_reg = eint->base[i] + eint->regs->dom_en;
325 mask_reg = eint->base[i] + eint->regs->mask_set;
326 for (j = 0; j < eint->base_pin_num[i]; j += 32) {
327 writel(0xffffffff, dom_reg);
328 writel(0xffffffff, mask_reg);
329 dom_reg += 4;
330 mask_reg += 4;
331 }
332 }
333
334 return 0;
335 }
336
337 static inline void
mtk_eint_debounce_process(struct mtk_eint * eint,int index)338 mtk_eint_debounce_process(struct mtk_eint *eint, int index)
339 {
340 unsigned int rst, ctrl_offset;
341 unsigned int bit, dbnc;
342 unsigned int inst = eint->pins[index].instance;
343 unsigned int idx = eint->pins[index].index;
344
345 ctrl_offset = (idx / 4) * 4 + eint->regs->dbnc_ctrl;
346 dbnc = readl(eint->base[inst] + ctrl_offset);
347 bit = MTK_EINT_DBNC_SET_EN << ((idx % 4) * 8);
348 if ((bit & dbnc) > 0) {
349 ctrl_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
350 rst = MTK_EINT_DBNC_RST_BIT << ((idx % 4) * 8);
351 writel(rst, eint->base[inst] + ctrl_offset);
352 }
353 }
354
mtk_eint_irq_handler(struct irq_desc * desc)355 static void mtk_eint_irq_handler(struct irq_desc *desc)
356 {
357 struct irq_chip *chip = irq_desc_get_chip(desc);
358 struct mtk_eint *eint = irq_desc_get_handler_data(desc);
359 unsigned int i, j, port, status, shift, mask, eint_num;
360 void __iomem *reg;
361 int dual_edge, start_level, curr_level;
362
363 chained_irq_enter(chip, desc);
364 for (i = 0; i < eint->nbase; i++) {
365 for (j = 0; j < eint->base_pin_num[i]; j += 32) {
366 port = j >> 5;
367 status = readl(eint->base[i] + port * 4 + eint->regs->stat);
368 while (status) {
369 shift = __ffs(status);
370 status &= ~BIT(shift);
371 mask = BIT(shift);
372 eint_num = eint->pin_list[i][shift + j];
373
374 /*
375 * If we get an interrupt on pin that was only required
376 * for wake (but no real interrupt requested), mask the
377 * interrupt (as would mtk_eint_resume do anyway later
378 * in the resume sequence).
379 */
380 if (eint->wake_mask[i][port] & mask &&
381 !(eint->cur_mask[i][port] & mask)) {
382 reg = mtk_eint_get_offset(eint, eint_num,
383 eint->regs->mask_set);
384 writel_relaxed(mask, reg);
385 }
386
387 dual_edge = eint->pins[eint_num].dual_edge;
388 if (dual_edge) {
389 /*
390 * Clear soft-irq in case we raised it last
391 * time.
392 */
393 reg = mtk_eint_get_offset(eint, eint_num,
394 eint->regs->soft_clr);
395 writel(mask, reg);
396
397 start_level =
398 eint->gpio_xlate->get_gpio_state(eint->pctl,
399 eint_num);
400 }
401
402 generic_handle_domain_irq(eint->domain, eint_num);
403
404 if (dual_edge) {
405 curr_level = mtk_eint_flip_edge(eint, eint_num);
406
407 /*
408 * If level changed, we might lost one edge
409 * interrupt, raised it through soft-irq.
410 */
411 if (start_level != curr_level) {
412 reg = mtk_eint_get_offset(eint, eint_num,
413 eint->regs->soft_set);
414 writel(mask, reg);
415 }
416 }
417
418 if (eint->pins[eint_num].debounce)
419 mtk_eint_debounce_process(eint, eint_num);
420 }
421 }
422 }
423 chained_irq_exit(chip, desc);
424 }
425
mtk_eint_do_suspend(struct mtk_eint * eint)426 int mtk_eint_do_suspend(struct mtk_eint *eint)
427 {
428 mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
429
430 return 0;
431 }
432 EXPORT_SYMBOL_GPL(mtk_eint_do_suspend);
433
mtk_eint_do_resume(struct mtk_eint * eint)434 int mtk_eint_do_resume(struct mtk_eint *eint)
435 {
436 mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
437
438 return 0;
439 }
440 EXPORT_SYMBOL_GPL(mtk_eint_do_resume);
441
mtk_eint_set_debounce(struct mtk_eint * eint,unsigned long eint_num,unsigned int debounce)442 int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
443 unsigned int debounce)
444 {
445 int virq, eint_offset;
446 unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
447 dbnc;
448 unsigned int inst = eint->pins[eint_num].instance;
449 unsigned int idx = eint->pins[eint_num].index;
450 struct irq_data *d;
451
452 if (!eint->hw->db_time)
453 return -EOPNOTSUPP;
454
455 virq = irq_find_mapping(eint->domain, eint_num);
456 eint_offset = (idx % 4) * 8;
457 d = irq_get_irq_data(virq);
458
459 set_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
460 clr_offset = (idx / 4) * 4 + eint->regs->dbnc_clr;
461
462 if (!mtk_eint_can_en_debounce(eint, eint_num))
463 return -EINVAL;
464
465 dbnc = eint->num_db_time;
466 for (i = 0; i < eint->num_db_time; i++) {
467 if (debounce <= eint->hw->db_time[i]) {
468 dbnc = i;
469 break;
470 }
471 }
472
473 if (!mtk_eint_get_mask(eint, eint_num)) {
474 mtk_eint_mask(d);
475 unmask = 1;
476 } else {
477 unmask = 0;
478 }
479
480 clr_bit = 0xff << eint_offset;
481 writel(clr_bit, eint->base[inst] + clr_offset);
482
483 bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
484 eint_offset;
485 rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
486 writel(rst | bit, eint->base[inst] + set_offset);
487
488 /*
489 * Delay a while (more than 2T) to wait for hw debounce counter reset
490 * work correctly.
491 */
492 udelay(1);
493 if (unmask == 1)
494 mtk_eint_unmask(d);
495
496 return 0;
497 }
498 EXPORT_SYMBOL_GPL(mtk_eint_set_debounce);
499
mtk_eint_find_irq(struct mtk_eint * eint,unsigned long eint_n)500 int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
501 {
502 int irq;
503
504 irq = irq_find_mapping(eint->domain, eint_n);
505 if (!irq)
506 return -EINVAL;
507
508 return irq;
509 }
510 EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
511
mtk_eint_do_init(struct mtk_eint * eint,struct mtk_eint_pin * eint_pin)512 int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin)
513 {
514 unsigned int size, i, port, virq, inst = 0;
515
516 /* If clients don't assign a specific regs, let's use generic one */
517 if (!eint->regs)
518 eint->regs = &mtk_generic_eint_regs;
519
520 eint->base_pin_num = devm_kmalloc_array(eint->dev, eint->nbase, sizeof(u16),
521 GFP_KERNEL | __GFP_ZERO);
522 if (!eint->base_pin_num)
523 return -ENOMEM;
524
525 if (eint_pin) {
526 eint->pins = eint_pin;
527 for (i = 0; i < eint->hw->ap_num; i++) {
528 inst = eint->pins[i].instance;
529 if (inst >= eint->nbase)
530 continue;
531 eint->base_pin_num[inst]++;
532 }
533 } else {
534 size = eint->hw->ap_num * sizeof(struct mtk_eint_pin);
535 eint->pins = devm_kmalloc(eint->dev, size, GFP_KERNEL);
536 if (!eint->pins)
537 goto err_pins;
538
539 eint->base_pin_num[inst] = eint->hw->ap_num;
540 for (i = 0; i < eint->hw->ap_num; i++) {
541 eint->pins[i].instance = inst;
542 eint->pins[i].index = i;
543 eint->pins[i].debounce = (i < eint->hw->db_cnt) ? 1 : 0;
544 }
545 }
546
547 eint->pin_list = devm_kmalloc(eint->dev, eint->nbase * sizeof(u16 *), GFP_KERNEL);
548 if (!eint->pin_list)
549 goto err_pin_list;
550
551 eint->wake_mask = devm_kmalloc(eint->dev, eint->nbase * sizeof(u32 *), GFP_KERNEL);
552 if (!eint->wake_mask)
553 goto err_wake_mask;
554
555 eint->cur_mask = devm_kmalloc(eint->dev, eint->nbase * sizeof(u32 *), GFP_KERNEL);
556 if (!eint->cur_mask)
557 goto err_cur_mask;
558
559 for (i = 0; i < eint->nbase; i++) {
560 eint->pin_list[i] = devm_kzalloc(eint->dev, eint->base_pin_num[i] * sizeof(u16),
561 GFP_KERNEL);
562 port = DIV_ROUND_UP(eint->base_pin_num[i], 32);
563 eint->wake_mask[i] = devm_kzalloc(eint->dev, port * sizeof(u32), GFP_KERNEL);
564 eint->cur_mask[i] = devm_kzalloc(eint->dev, port * sizeof(u32), GFP_KERNEL);
565 if (!eint->pin_list[i] || !eint->wake_mask[i] || !eint->cur_mask[i])
566 goto err_eint;
567 }
568
569 eint->domain = irq_domain_create_linear(dev_fwnode(eint->dev), eint->hw->ap_num,
570 &irq_domain_simple_ops, NULL);
571 if (!eint->domain)
572 goto err_eint;
573
574 if (eint->hw->db_time) {
575 for (i = 0; i < MTK_EINT_DBNC_MAX; i++)
576 if (eint->hw->db_time[i] == 0)
577 break;
578 eint->num_db_time = i;
579 }
580
581 mtk_eint_hw_init(eint);
582 for (i = 0; i < eint->hw->ap_num; i++) {
583 inst = eint->pins[i].instance;
584 if (inst >= eint->nbase)
585 continue;
586 eint->pin_list[inst][eint->pins[i].index] = i;
587 virq = irq_create_mapping(eint->domain, i);
588 irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
589 handle_level_irq);
590 irq_set_chip_data(virq, eint);
591 }
592
593 irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,
594 eint);
595
596 return 0;
597
598 err_eint:
599 for (i = 0; i < eint->nbase; i++) {
600 if (eint->cur_mask[i])
601 devm_kfree(eint->dev, eint->cur_mask[i]);
602 if (eint->wake_mask[i])
603 devm_kfree(eint->dev, eint->wake_mask[i]);
604 if (eint->pin_list[i])
605 devm_kfree(eint->dev, eint->pin_list[i]);
606 }
607 devm_kfree(eint->dev, eint->cur_mask);
608 err_cur_mask:
609 devm_kfree(eint->dev, eint->wake_mask);
610 err_wake_mask:
611 devm_kfree(eint->dev, eint->pin_list);
612 err_pin_list:
613 if (!eint_pin)
614 devm_kfree(eint->dev, eint->pins);
615 err_pins:
616 devm_kfree(eint->dev, eint->base_pin_num);
617 return -ENOMEM;
618 }
619 EXPORT_SYMBOL_GPL(mtk_eint_do_init);
620
621 MODULE_LICENSE("GPL v2");
622 MODULE_DESCRIPTION("MediaTek EINT Driver");
623