xref: /linux/drivers/pinctrl/mediatek/mtk-eint.c (revision eafd95ea74846eda3e3eac6b2bb7f34619d8a6f8)
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (c) 2014-2025 MediaTek Inc.
3 
4 /*
5  * Library for MediaTek External Interrupt Support
6  *
7  * Author: Maoguang Meng <maoguang.meng@mediatek.com>
8  *	   Sean Wang <sean.wang@mediatek.com>
9  *	   Hao Chang <ot_chhao.chang@mediatek.com>
10  *	   Qingliang Li <qingliang.li@mediatek.com>
11  *
12  */
13 
14 #include <linux/delay.h>
15 #include <linux/err.h>
16 #include <linux/gpio/driver.h>
17 #include <linux/io.h>
18 #include <linux/irqchip/chained_irq.h>
19 #include <linux/irqdomain.h>
20 #include <linux/module.h>
21 #include <linux/of_irq.h>
22 #include <linux/platform_device.h>
23 
24 #include "mtk-eint.h"
25 
26 #define MTK_EINT_EDGE_SENSITIVE           0
27 #define MTK_EINT_LEVEL_SENSITIVE          1
28 #define MTK_EINT_DBNC_SET_DBNC_BITS	  4
29 #define MTK_EINT_DBNC_MAX		  16
30 #define MTK_EINT_DBNC_RST_BIT		  (0x1 << 1)
31 #define MTK_EINT_DBNC_SET_EN		  (0x1 << 0)
32 
33 static const struct mtk_eint_regs mtk_generic_eint_regs = {
34 	.stat      = 0x000,
35 	.ack       = 0x040,
36 	.mask      = 0x080,
37 	.mask_set  = 0x0c0,
38 	.mask_clr  = 0x100,
39 	.sens      = 0x140,
40 	.sens_set  = 0x180,
41 	.sens_clr  = 0x1c0,
42 	.soft      = 0x200,
43 	.soft_set  = 0x240,
44 	.soft_clr  = 0x280,
45 	.pol       = 0x300,
46 	.pol_set   = 0x340,
47 	.pol_clr   = 0x380,
48 	.dom_en    = 0x400,
49 	.dbnc_ctrl = 0x500,
50 	.dbnc_set  = 0x600,
51 	.dbnc_clr  = 0x700,
52 };
53 
54 const unsigned int debounce_time_mt2701[] = {
55 	500, 1000, 16000, 32000, 64000, 128000, 256000, 0
56 };
57 EXPORT_SYMBOL_GPL(debounce_time_mt2701);
58 
59 const unsigned int debounce_time_mt6765[] = {
60 	125, 250, 500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
61 };
62 EXPORT_SYMBOL_GPL(debounce_time_mt6765);
63 
64 const unsigned int debounce_time_mt6795[] = {
65 	500, 1000, 16000, 32000, 64000, 128000, 256000, 512000, 0
66 };
67 EXPORT_SYMBOL_GPL(debounce_time_mt6795);
68 
mtk_eint_get_offset(struct mtk_eint * eint,unsigned int eint_num,unsigned int offset)69 static void __iomem *mtk_eint_get_offset(struct mtk_eint *eint,
70 					 unsigned int eint_num,
71 					 unsigned int offset)
72 {
73 	unsigned int idx = eint->pins[eint_num].index;
74 	unsigned int inst = eint->pins[eint_num].instance;
75 	void __iomem *reg;
76 
77 	reg = eint->base[inst] + offset + (idx / 32 * 4);
78 
79 	return reg;
80 }
81 
mtk_eint_can_en_debounce(struct mtk_eint * eint,unsigned int eint_num)82 static unsigned int mtk_eint_can_en_debounce(struct mtk_eint *eint,
83 					     unsigned int eint_num)
84 {
85 	unsigned int sens;
86 	unsigned int bit = BIT(eint->pins[eint_num].index % 32);
87 	void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
88 						eint->regs->sens);
89 
90 	if (readl(reg) & bit)
91 		sens = MTK_EINT_LEVEL_SENSITIVE;
92 	else
93 		sens = MTK_EINT_EDGE_SENSITIVE;
94 
95 	if (eint->pins[eint_num].debounce && sens != MTK_EINT_EDGE_SENSITIVE)
96 		return 1;
97 	else
98 		return 0;
99 }
100 
mtk_eint_flip_edge(struct mtk_eint * eint,int hwirq)101 static int mtk_eint_flip_edge(struct mtk_eint *eint, int hwirq)
102 {
103 	int start_level, curr_level;
104 	unsigned int reg_offset;
105 	unsigned int mask = BIT(eint->pins[hwirq].index & 0x1f);
106 	unsigned int port = (eint->pins[hwirq].index >> 5) & eint->hw->port_mask;
107 	void __iomem *reg = eint->base[eint->pins[hwirq].instance] + (port << 2);
108 
109 	curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl, hwirq);
110 
111 	do {
112 		start_level = curr_level;
113 		if (start_level)
114 			reg_offset = eint->regs->pol_clr;
115 		else
116 			reg_offset = eint->regs->pol_set;
117 		writel(mask, reg + reg_offset);
118 
119 		curr_level = eint->gpio_xlate->get_gpio_state(eint->pctl,
120 							      hwirq);
121 	} while (start_level != curr_level);
122 
123 	return start_level;
124 }
125 
mtk_eint_mask(struct irq_data * d)126 static void mtk_eint_mask(struct irq_data *d)
127 {
128 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
129 	unsigned int idx = eint->pins[d->hwirq].index;
130 	unsigned int inst = eint->pins[d->hwirq].instance;
131 	unsigned int mask = BIT(idx & 0x1f);
132 	void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
133 						eint->regs->mask_set);
134 
135 	eint->cur_mask[inst][idx >> 5] &= ~mask;
136 
137 	writel(mask, reg);
138 }
139 
mtk_eint_unmask(struct irq_data * d)140 static void mtk_eint_unmask(struct irq_data *d)
141 {
142 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
143 	unsigned int idx = eint->pins[d->hwirq].index;
144 	unsigned int inst = eint->pins[d->hwirq].instance;
145 	unsigned int mask = BIT(idx & 0x1f);
146 	void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
147 						eint->regs->mask_clr);
148 
149 	eint->cur_mask[inst][idx >> 5] |= mask;
150 
151 	writel(mask, reg);
152 
153 	if (eint->pins[d->hwirq].dual_edge)
154 		mtk_eint_flip_edge(eint, d->hwirq);
155 }
156 
mtk_eint_get_mask(struct mtk_eint * eint,unsigned int eint_num)157 static unsigned int mtk_eint_get_mask(struct mtk_eint *eint,
158 				      unsigned int eint_num)
159 {
160 	unsigned int bit = BIT(eint->pins[eint_num].index % 32);
161 	void __iomem *reg = mtk_eint_get_offset(eint, eint_num,
162 						eint->regs->mask);
163 
164 	return !!(readl(reg) & bit);
165 }
166 
mtk_eint_ack(struct irq_data * d)167 static void mtk_eint_ack(struct irq_data *d)
168 {
169 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
170 	unsigned int mask = BIT(eint->pins[d->hwirq].index & 0x1f);
171 	void __iomem *reg = mtk_eint_get_offset(eint, d->hwirq,
172 						eint->regs->ack);
173 
174 	writel(mask, reg);
175 }
176 
mtk_eint_set_type(struct irq_data * d,unsigned int type)177 static int mtk_eint_set_type(struct irq_data *d, unsigned int type)
178 {
179 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
180 	bool masked;
181 	unsigned int mask = BIT(eint->pins[d->hwirq].index & 0x1f);
182 	void __iomem *reg;
183 
184 	if (((type & IRQ_TYPE_EDGE_BOTH) && (type & IRQ_TYPE_LEVEL_MASK)) ||
185 	    ((type & IRQ_TYPE_LEVEL_MASK) == IRQ_TYPE_LEVEL_MASK)) {
186 		dev_err(eint->dev,
187 			"Can't configure IRQ%d (EINT%lu) for type 0x%X\n",
188 			d->irq, d->hwirq, type);
189 		return -EINVAL;
190 	}
191 
192 	if ((type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH)
193 		eint->pins[d->hwirq].dual_edge = 1;
194 	else
195 		eint->pins[d->hwirq].dual_edge = 0;
196 
197 	if (!mtk_eint_get_mask(eint, d->hwirq)) {
198 		mtk_eint_mask(d);
199 		masked = false;
200 	} else {
201 		masked = true;
202 	}
203 
204 	if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_EDGE_FALLING)) {
205 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_clr);
206 		writel(mask, reg);
207 	} else {
208 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->pol_set);
209 		writel(mask, reg);
210 	}
211 
212 	if (type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING)) {
213 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_clr);
214 		writel(mask, reg);
215 	} else {
216 		reg = mtk_eint_get_offset(eint, d->hwirq, eint->regs->sens_set);
217 		writel(mask, reg);
218 	}
219 
220 	mtk_eint_ack(d);
221 	if (!masked)
222 		mtk_eint_unmask(d);
223 
224 	return 0;
225 }
226 
mtk_eint_irq_set_wake(struct irq_data * d,unsigned int on)227 static int mtk_eint_irq_set_wake(struct irq_data *d, unsigned int on)
228 {
229 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
230 	unsigned int idx = eint->pins[d->hwirq].index;
231 	unsigned int inst = eint->pins[d->hwirq].instance;
232 	unsigned int shift = idx & 0x1f;
233 	unsigned int port = idx >> 5;
234 
235 	if (on)
236 		eint->wake_mask[inst][port] |= BIT(shift);
237 	else
238 		eint->wake_mask[inst][port] &= ~BIT(shift);
239 
240 	return 0;
241 }
242 
mtk_eint_chip_write_mask(const struct mtk_eint * eint,void __iomem * base,unsigned int ** buf)243 static void mtk_eint_chip_write_mask(const struct mtk_eint *eint,
244 				     void __iomem *base, unsigned int **buf)
245 {
246 	int inst, port, port_num;
247 	void __iomem *reg;
248 
249 	for (inst = 0; inst < eint->nbase; inst++) {
250 		port_num = DIV_ROUND_UP(eint->base_pin_num[inst], 32);
251 		for (port = 0; port < port_num; port++) {
252 			reg = eint->base[inst] + (port << 2);
253 			writel_relaxed(~buf[inst][port], reg + eint->regs->mask_set);
254 			writel_relaxed(buf[inst][port], reg + eint->regs->mask_clr);
255 		}
256 	}
257 }
258 
mtk_eint_irq_request_resources(struct irq_data * d)259 static int mtk_eint_irq_request_resources(struct irq_data *d)
260 {
261 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
262 	struct gpio_chip *gpio_c;
263 	unsigned int gpio_n;
264 	int err;
265 
266 	err = eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq,
267 					   &gpio_n, &gpio_c);
268 	if (err < 0) {
269 		dev_err(eint->dev, "Can not find pin\n");
270 		return err;
271 	}
272 
273 	err = gpiochip_lock_as_irq(gpio_c, gpio_n);
274 	if (err < 0) {
275 		dev_err(eint->dev, "unable to lock HW IRQ %lu for IRQ\n",
276 			irqd_to_hwirq(d));
277 		return err;
278 	}
279 
280 	err = eint->gpio_xlate->set_gpio_as_eint(eint->pctl, d->hwirq);
281 	if (err < 0) {
282 		dev_err(eint->dev, "Can not eint mode\n");
283 		return err;
284 	}
285 
286 	return 0;
287 }
288 
mtk_eint_irq_release_resources(struct irq_data * d)289 static void mtk_eint_irq_release_resources(struct irq_data *d)
290 {
291 	struct mtk_eint *eint = irq_data_get_irq_chip_data(d);
292 	struct gpio_chip *gpio_c;
293 	unsigned int gpio_n;
294 
295 	eint->gpio_xlate->get_gpio_n(eint->pctl, d->hwirq, &gpio_n,
296 				     &gpio_c);
297 
298 	gpiochip_unlock_as_irq(gpio_c, gpio_n);
299 }
300 
301 static struct irq_chip mtk_eint_irq_chip = {
302 	.name = "mt-eint",
303 	.irq_disable = mtk_eint_mask,
304 	.irq_mask = mtk_eint_mask,
305 	.irq_unmask = mtk_eint_unmask,
306 	.irq_ack = mtk_eint_ack,
307 	.irq_set_type = mtk_eint_set_type,
308 	.irq_set_wake = mtk_eint_irq_set_wake,
309 	.irq_request_resources = mtk_eint_irq_request_resources,
310 	.irq_release_resources = mtk_eint_irq_release_resources,
311 };
312 
mtk_eint_hw_init(struct mtk_eint * eint)313 static unsigned int mtk_eint_hw_init(struct mtk_eint *eint)
314 {
315 	void __iomem *dom_reg, *mask_reg;
316 	unsigned int i, j;
317 
318 	for (i = 0; i < eint->nbase; i++) {
319 		dom_reg = eint->base[i] + eint->regs->dom_en;
320 		mask_reg = eint->base[i] + eint->regs->mask_set;
321 		for (j = 0; j < eint->base_pin_num[i]; j += 32) {
322 			writel(0xffffffff, dom_reg);
323 			writel(0xffffffff, mask_reg);
324 			dom_reg += 4;
325 			mask_reg += 4;
326 		}
327 	}
328 
329 	return 0;
330 }
331 
332 static inline void
mtk_eint_debounce_process(struct mtk_eint * eint,int index)333 mtk_eint_debounce_process(struct mtk_eint *eint, int index)
334 {
335 	unsigned int rst, ctrl_offset;
336 	unsigned int bit, dbnc;
337 	unsigned int inst = eint->pins[index].instance;
338 	unsigned int idx = eint->pins[index].index;
339 
340 	ctrl_offset = (idx / 4) * 4 + eint->regs->dbnc_ctrl;
341 	dbnc = readl(eint->base[inst] + ctrl_offset);
342 	bit = MTK_EINT_DBNC_SET_EN << ((idx % 4) * 8);
343 	if ((bit & dbnc) > 0) {
344 		ctrl_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
345 		rst = MTK_EINT_DBNC_RST_BIT << ((idx % 4) * 8);
346 		writel(rst, eint->base[inst] + ctrl_offset);
347 	}
348 }
349 
mtk_eint_irq_handler(struct irq_desc * desc)350 static void mtk_eint_irq_handler(struct irq_desc *desc)
351 {
352 	struct irq_chip *chip = irq_desc_get_chip(desc);
353 	struct mtk_eint *eint = irq_desc_get_handler_data(desc);
354 	unsigned int i, j, port, status, shift, mask, eint_num;
355 	void __iomem *reg;
356 	int dual_edge, start_level, curr_level;
357 
358 	chained_irq_enter(chip, desc);
359 	for (i = 0; i < eint->nbase; i++) {
360 		for (j = 0; j < eint->base_pin_num[i]; j += 32) {
361 			port = j >> 5;
362 			status = readl(eint->base[i] + port * 4 + eint->regs->stat);
363 			while (status) {
364 				shift = __ffs(status);
365 				status &= ~BIT(shift);
366 				mask = BIT(shift);
367 				eint_num = eint->pin_list[i][shift + j];
368 
369 				/*
370 				 * If we get an interrupt on pin that was only required
371 				 * for wake (but no real interrupt requested), mask the
372 				 * interrupt (as would mtk_eint_resume do anyway later
373 				 * in the resume sequence).
374 				 */
375 				if (eint->wake_mask[i][port] & mask &&
376 				    !(eint->cur_mask[i][port] & mask)) {
377 					reg = mtk_eint_get_offset(eint, eint_num,
378 								  eint->regs->mask_set);
379 					writel_relaxed(mask, reg);
380 				}
381 
382 				dual_edge = eint->pins[eint_num].dual_edge;
383 				if (dual_edge) {
384 					/*
385 					 * Clear soft-irq in case we raised it last
386 					 * time.
387 					 */
388 					reg = mtk_eint_get_offset(eint, eint_num,
389 								  eint->regs->soft_clr);
390 					writel(mask, reg);
391 
392 					start_level =
393 					eint->gpio_xlate->get_gpio_state(eint->pctl,
394 									 eint_num);
395 				}
396 
397 				generic_handle_domain_irq(eint->domain, eint_num);
398 
399 				if (dual_edge) {
400 					curr_level = mtk_eint_flip_edge(eint, eint_num);
401 
402 					/*
403 					 * If level changed, we might lost one edge
404 					 * interrupt, raised it through soft-irq.
405 					 */
406 					if (start_level != curr_level) {
407 						reg = mtk_eint_get_offset(eint, eint_num,
408 									  eint->regs->soft_set);
409 						writel(mask, reg);
410 					}
411 				}
412 
413 				if (eint->pins[eint_num].debounce)
414 					mtk_eint_debounce_process(eint, eint_num);
415 			}
416 		}
417 	}
418 	chained_irq_exit(chip, desc);
419 }
420 
mtk_eint_do_suspend(struct mtk_eint * eint)421 int mtk_eint_do_suspend(struct mtk_eint *eint)
422 {
423 	mtk_eint_chip_write_mask(eint, eint->base, eint->wake_mask);
424 
425 	return 0;
426 }
427 EXPORT_SYMBOL_GPL(mtk_eint_do_suspend);
428 
mtk_eint_do_resume(struct mtk_eint * eint)429 int mtk_eint_do_resume(struct mtk_eint *eint)
430 {
431 	mtk_eint_chip_write_mask(eint, eint->base, eint->cur_mask);
432 
433 	return 0;
434 }
435 EXPORT_SYMBOL_GPL(mtk_eint_do_resume);
436 
mtk_eint_set_debounce(struct mtk_eint * eint,unsigned long eint_num,unsigned int debounce)437 int mtk_eint_set_debounce(struct mtk_eint *eint, unsigned long eint_num,
438 			  unsigned int debounce)
439 {
440 	int virq, eint_offset;
441 	unsigned int set_offset, bit, clr_bit, clr_offset, rst, i, unmask,
442 		     dbnc;
443 	unsigned int inst = eint->pins[eint_num].instance;
444 	unsigned int idx = eint->pins[eint_num].index;
445 	struct irq_data *d;
446 
447 	if (!eint->hw->db_time)
448 		return -EOPNOTSUPP;
449 
450 	virq = irq_find_mapping(eint->domain, eint_num);
451 	eint_offset = (idx % 4) * 8;
452 	d = irq_get_irq_data(virq);
453 
454 	set_offset = (idx / 4) * 4 + eint->regs->dbnc_set;
455 	clr_offset = (idx / 4) * 4 + eint->regs->dbnc_clr;
456 
457 	if (!mtk_eint_can_en_debounce(eint, eint_num))
458 		return -EINVAL;
459 
460 	dbnc = eint->num_db_time;
461 	for (i = 0; i < eint->num_db_time; i++) {
462 		if (debounce <= eint->hw->db_time[i]) {
463 			dbnc = i;
464 			break;
465 		}
466 	}
467 
468 	if (!mtk_eint_get_mask(eint, eint_num)) {
469 		mtk_eint_mask(d);
470 		unmask = 1;
471 	} else {
472 		unmask = 0;
473 	}
474 
475 	clr_bit = 0xff << eint_offset;
476 	writel(clr_bit, eint->base[inst] + clr_offset);
477 
478 	bit = ((dbnc << MTK_EINT_DBNC_SET_DBNC_BITS) | MTK_EINT_DBNC_SET_EN) <<
479 		eint_offset;
480 	rst = MTK_EINT_DBNC_RST_BIT << eint_offset;
481 	writel(rst | bit, eint->base[inst] + set_offset);
482 
483 	/*
484 	 * Delay a while (more than 2T) to wait for hw debounce counter reset
485 	 * work correctly.
486 	 */
487 	udelay(1);
488 	if (unmask == 1)
489 		mtk_eint_unmask(d);
490 
491 	return 0;
492 }
493 EXPORT_SYMBOL_GPL(mtk_eint_set_debounce);
494 
mtk_eint_find_irq(struct mtk_eint * eint,unsigned long eint_n)495 int mtk_eint_find_irq(struct mtk_eint *eint, unsigned long eint_n)
496 {
497 	int irq;
498 
499 	irq = irq_find_mapping(eint->domain, eint_n);
500 	if (!irq)
501 		return -EINVAL;
502 
503 	return irq;
504 }
505 EXPORT_SYMBOL_GPL(mtk_eint_find_irq);
506 
mtk_eint_do_init(struct mtk_eint * eint,struct mtk_eint_pin * eint_pin)507 int mtk_eint_do_init(struct mtk_eint *eint, struct mtk_eint_pin *eint_pin)
508 {
509 	unsigned int size, i, port, virq, inst = 0;
510 
511 	/* If clients don't assign a specific regs, let's use generic one */
512 	if (!eint->regs)
513 		eint->regs = &mtk_generic_eint_regs;
514 
515 	eint->base_pin_num = devm_kmalloc_array(eint->dev, eint->nbase, sizeof(u16),
516 						GFP_KERNEL | __GFP_ZERO);
517 	if (!eint->base_pin_num)
518 		return -ENOMEM;
519 
520 	if (eint_pin) {
521 		eint->pins = eint_pin;
522 		for (i = 0; i < eint->hw->ap_num; i++) {
523 			inst = eint->pins[i].instance;
524 			if (inst >= eint->nbase)
525 				continue;
526 			eint->base_pin_num[inst]++;
527 		}
528 	} else {
529 		size = eint->hw->ap_num * sizeof(struct mtk_eint_pin);
530 		eint->pins = devm_kmalloc(eint->dev, size, GFP_KERNEL);
531 		if (!eint->pins)
532 			goto err_pins;
533 
534 		eint->base_pin_num[inst] = eint->hw->ap_num;
535 		for (i = 0; i < eint->hw->ap_num; i++) {
536 			eint->pins[i].instance = inst;
537 			eint->pins[i].index = i;
538 			eint->pins[i].debounce = (i < eint->hw->db_cnt) ? 1 : 0;
539 		}
540 	}
541 
542 	eint->pin_list = devm_kmalloc(eint->dev, eint->nbase * sizeof(u16 *), GFP_KERNEL);
543 	if (!eint->pin_list)
544 		goto err_pin_list;
545 
546 	eint->wake_mask = devm_kmalloc(eint->dev, eint->nbase * sizeof(u32 *), GFP_KERNEL);
547 	if (!eint->wake_mask)
548 		goto err_wake_mask;
549 
550 	eint->cur_mask = devm_kmalloc(eint->dev, eint->nbase * sizeof(u32 *), GFP_KERNEL);
551 	if (!eint->cur_mask)
552 		goto err_cur_mask;
553 
554 	for (i = 0; i < eint->nbase; i++) {
555 		eint->pin_list[i] = devm_kzalloc(eint->dev, eint->base_pin_num[i] * sizeof(u16),
556 						 GFP_KERNEL);
557 		port = DIV_ROUND_UP(eint->base_pin_num[i], 32);
558 		eint->wake_mask[i] = devm_kzalloc(eint->dev, port * sizeof(u32), GFP_KERNEL);
559 		eint->cur_mask[i] = devm_kzalloc(eint->dev, port * sizeof(u32), GFP_KERNEL);
560 		if (!eint->pin_list[i] || !eint->wake_mask[i] || !eint->cur_mask[i])
561 			goto err_eint;
562 	}
563 
564 	eint->domain = irq_domain_create_linear(of_fwnode_handle(eint->dev->of_node),
565 						eint->hw->ap_num, &irq_domain_simple_ops, NULL);
566 	if (!eint->domain)
567 		goto err_eint;
568 
569 	if (eint->hw->db_time) {
570 		for (i = 0; i < MTK_EINT_DBNC_MAX; i++)
571 			if (eint->hw->db_time[i] == 0)
572 				break;
573 		eint->num_db_time = i;
574 	}
575 
576 	mtk_eint_hw_init(eint);
577 	for (i = 0; i < eint->hw->ap_num; i++) {
578 		inst = eint->pins[i].instance;
579 		if (inst >= eint->nbase)
580 			continue;
581 		eint->pin_list[inst][eint->pins[i].index] = i;
582 		virq = irq_create_mapping(eint->domain, i);
583 		irq_set_chip_and_handler(virq, &mtk_eint_irq_chip,
584 					 handle_level_irq);
585 		irq_set_chip_data(virq, eint);
586 	}
587 
588 	irq_set_chained_handler_and_data(eint->irq, mtk_eint_irq_handler,
589 					 eint);
590 
591 	return 0;
592 
593 err_eint:
594 	for (i = 0; i < eint->nbase; i++) {
595 		if (eint->cur_mask[i])
596 			devm_kfree(eint->dev, eint->cur_mask[i]);
597 		if (eint->wake_mask[i])
598 			devm_kfree(eint->dev, eint->wake_mask[i]);
599 		if (eint->pin_list[i])
600 			devm_kfree(eint->dev, eint->pin_list[i]);
601 	}
602 	devm_kfree(eint->dev, eint->cur_mask);
603 err_cur_mask:
604 	devm_kfree(eint->dev, eint->wake_mask);
605 err_wake_mask:
606 	devm_kfree(eint->dev, eint->pin_list);
607 err_pin_list:
608 	if (!eint_pin)
609 		devm_kfree(eint->dev, eint->pins);
610 err_pins:
611 	devm_kfree(eint->dev, eint->base_pin_num);
612 	return -ENOMEM;
613 }
614 EXPORT_SYMBOL_GPL(mtk_eint_do_init);
615 
616 MODULE_LICENSE("GPL v2");
617 MODULE_DESCRIPTION("MediaTek EINT Driver");
618