xref: /linux/drivers/gpio/gpio-mvebu.c (revision 08ec212c0f92cbf30e3ecc7349f18151714041d6)
1 /*
2  * GPIO driver for Marvell SoCs
3  *
4  * Copyright (C) 2012 Marvell
5  *
6  * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
7  * Andrew Lunn <andrew@lunn.ch>
8  * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
9  *
10  * This file is licensed under the terms of the GNU General Public
11  * License version 2.  This program is licensed "as is" without any
12  * warranty of any kind, whether express or implied.
13  *
14  * This driver is a fairly straightforward GPIO driver for the
15  * complete family of Marvell EBU SoC platforms (Orion, Dove,
16  * Kirkwood, Discovery, Armada 370/XP). The only complexity of this
17  * driver is the different register layout that exists between the
18  * non-SMP platforms (Orion, Dove, Kirkwood, Armada 370) and the SMP
19  * platforms (MV78200 from the Discovery family and the Armada
20  * XP). Therefore, this driver handles three variants of the GPIO
21  * block:
22  * - the basic variant, called "orion-gpio", with the simplest
23  *   register set. Used on Orion, Dove, Kirkwoord, Armada 370 and
24  *   non-SMP Discovery systems
25  * - the mv78200 variant for MV78200 Discovery systems. This variant
26  *   turns the edge mask and level mask registers into CPU0 edge
27  *   mask/level mask registers, and adds CPU1 edge mask/level mask
28  *   registers.
29  * - the armadaxp variant for Armada XP systems. This variant keeps
30  *   the normal cause/edge mask/level mask registers when the global
31  *   interrupts are used, but adds per-CPU cause/edge mask/level mask
32  *   registers n a separate memory area for the per-CPU GPIO
33  *   interrupts.
34  */
35 
36 #include <linux/module.h>
37 #include <linux/gpio.h>
38 #include <linux/irq.h>
39 #include <linux/slab.h>
40 #include <linux/irqdomain.h>
41 #include <linux/io.h>
42 #include <linux/of_irq.h>
43 #include <linux/of_device.h>
44 #include <linux/platform_device.h>
45 #include <linux/pinctrl/consumer.h>
46 
47 /*
48  * GPIO unit register offsets.
49  */
50 #define GPIO_OUT_OFF		0x0000
51 #define GPIO_IO_CONF_OFF	0x0004
52 #define GPIO_BLINK_EN_OFF	0x0008
53 #define GPIO_IN_POL_OFF		0x000c
54 #define GPIO_DATA_IN_OFF	0x0010
55 #define GPIO_EDGE_CAUSE_OFF	0x0014
56 #define GPIO_EDGE_MASK_OFF	0x0018
57 #define GPIO_LEVEL_MASK_OFF	0x001c
58 
59 /* The MV78200 has per-CPU registers for edge mask and level mask */
60 #define GPIO_EDGE_MASK_MV78200_OFF(cpu)   ((cpu) ? 0x30 : 0x18)
61 #define GPIO_LEVEL_MASK_MV78200_OFF(cpu)  ((cpu) ? 0x34 : 0x1C)
62 
63 /* The Armada XP has per-CPU registers for interrupt cause, interrupt
64  * mask and interrupt level mask. Those are relative to the
65  * percpu_membase. */
66 #define GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu) ((cpu) * 0x4)
67 #define GPIO_EDGE_MASK_ARMADAXP_OFF(cpu)  (0x10 + (cpu) * 0x4)
68 #define GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu) (0x20 + (cpu) * 0x4)
69 
70 #define MVEBU_GPIO_SOC_VARIANT_ORION    0x1
71 #define MVEBU_GPIO_SOC_VARIANT_MV78200  0x2
72 #define MVEBU_GPIO_SOC_VARIANT_ARMADAXP 0x3
73 
74 #define MVEBU_MAX_GPIO_PER_BANK         32
75 
76 struct mvebu_gpio_chip {
77 	struct gpio_chip   chip;
78 	spinlock_t	   lock;
79 	void __iomem	  *membase;
80 	void __iomem	  *percpu_membase;
81 	unsigned int       irqbase;
82 	struct irq_domain *domain;
83 	int                soc_variant;
84 };
85 
86 /*
87  * Functions returning addresses of individual registers for a given
88  * GPIO controller.
89  */
90 static inline void __iomem *mvebu_gpioreg_out(struct mvebu_gpio_chip *mvchip)
91 {
92 	return mvchip->membase + GPIO_OUT_OFF;
93 }
94 
95 static inline void __iomem *mvebu_gpioreg_io_conf(struct mvebu_gpio_chip *mvchip)
96 {
97 	return mvchip->membase + GPIO_IO_CONF_OFF;
98 }
99 
100 static inline void __iomem *mvebu_gpioreg_in_pol(struct mvebu_gpio_chip *mvchip)
101 {
102 	return mvchip->membase + GPIO_IN_POL_OFF;
103 }
104 
105 static inline void __iomem *mvebu_gpioreg_data_in(struct mvebu_gpio_chip *mvchip)
106 {
107 	return mvchip->membase + GPIO_DATA_IN_OFF;
108 }
109 
110 static inline void __iomem *mvebu_gpioreg_edge_cause(struct mvebu_gpio_chip *mvchip)
111 {
112 	int cpu;
113 
114 	switch(mvchip->soc_variant) {
115 	case MVEBU_GPIO_SOC_VARIANT_ORION:
116 	case MVEBU_GPIO_SOC_VARIANT_MV78200:
117 		return mvchip->membase + GPIO_EDGE_CAUSE_OFF;
118 	case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
119 		cpu = smp_processor_id();
120 		return mvchip->percpu_membase + GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu);
121 	default:
122 		BUG();
123 	}
124 }
125 
126 static inline void __iomem *mvebu_gpioreg_edge_mask(struct mvebu_gpio_chip *mvchip)
127 {
128 	int cpu;
129 
130 	switch(mvchip->soc_variant) {
131 	case MVEBU_GPIO_SOC_VARIANT_ORION:
132 		return mvchip->membase + GPIO_EDGE_MASK_OFF;
133 	case MVEBU_GPIO_SOC_VARIANT_MV78200:
134 		cpu = smp_processor_id();
135 		return mvchip->membase + GPIO_EDGE_MASK_MV78200_OFF(cpu);
136 	case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
137 		cpu = smp_processor_id();
138 		return mvchip->percpu_membase + GPIO_EDGE_MASK_ARMADAXP_OFF(cpu);
139 	default:
140 		BUG();
141 	}
142 }
143 
144 static void __iomem *mvebu_gpioreg_level_mask(struct mvebu_gpio_chip *mvchip)
145 {
146 	int cpu;
147 
148 	switch(mvchip->soc_variant) {
149 	case MVEBU_GPIO_SOC_VARIANT_ORION:
150 		return mvchip->membase + GPIO_LEVEL_MASK_OFF;
151 	case MVEBU_GPIO_SOC_VARIANT_MV78200:
152 		cpu = smp_processor_id();
153 		return mvchip->membase + GPIO_LEVEL_MASK_MV78200_OFF(cpu);
154 	case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
155 		cpu = smp_processor_id();
156 		return mvchip->percpu_membase + GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu);
157 	default:
158 		BUG();
159 	}
160 }
161 
162 /*
163  * Functions implementing the gpio_chip methods
164  */
165 
166 int mvebu_gpio_request(struct gpio_chip *chip, unsigned pin)
167 {
168 	return pinctrl_request_gpio(chip->base + pin);
169 }
170 
171 void mvebu_gpio_free(struct gpio_chip *chip, unsigned pin)
172 {
173 	pinctrl_free_gpio(chip->base + pin);
174 }
175 
176 static void mvebu_gpio_set(struct gpio_chip *chip, unsigned pin, int value)
177 {
178 	struct mvebu_gpio_chip *mvchip =
179 		container_of(chip, struct mvebu_gpio_chip, chip);
180 	unsigned long flags;
181 	u32 u;
182 
183 	spin_lock_irqsave(&mvchip->lock, flags);
184 	u = readl_relaxed(mvebu_gpioreg_out(mvchip));
185 	if (value)
186 		u |= 1 << pin;
187 	else
188 		u &= ~(1 << pin);
189 	writel_relaxed(u, mvebu_gpioreg_out(mvchip));
190 	spin_unlock_irqrestore(&mvchip->lock, flags);
191 }
192 
193 static int mvebu_gpio_get(struct gpio_chip *chip, unsigned pin)
194 {
195 	struct mvebu_gpio_chip *mvchip =
196 		container_of(chip, struct mvebu_gpio_chip, chip);
197 	u32 u;
198 
199 	if (readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin)) {
200 		u = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) ^
201 			readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
202 	} else {
203 		u = readl_relaxed(mvebu_gpioreg_out(mvchip));
204 	}
205 
206 	return (u >> pin) & 1;
207 }
208 
209 static int mvebu_gpio_direction_input(struct gpio_chip *chip, unsigned pin)
210 {
211 	struct mvebu_gpio_chip *mvchip =
212 		container_of(chip, struct mvebu_gpio_chip, chip);
213 	unsigned long flags;
214 	int ret;
215 	u32 u;
216 
217 	/* Check with the pinctrl driver whether this pin is usable as
218 	 * an input GPIO */
219 	ret = pinctrl_gpio_direction_input(chip->base + pin);
220 	if (ret)
221 		return ret;
222 
223 	spin_lock_irqsave(&mvchip->lock, flags);
224 	u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
225 	u |= 1 << pin;
226 	writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
227 	spin_unlock_irqrestore(&mvchip->lock, flags);
228 
229 	return 0;
230 }
231 
232 static int mvebu_gpio_direction_output(struct gpio_chip *chip, unsigned pin,
233 				       int value)
234 {
235 	struct mvebu_gpio_chip *mvchip =
236 		container_of(chip, struct mvebu_gpio_chip, chip);
237 	unsigned long flags;
238 	int ret;
239 	u32 u;
240 
241 	/* Check with the pinctrl driver whether this pin is usable as
242 	 * an output GPIO */
243 	ret = pinctrl_gpio_direction_output(chip->base + pin);
244 	if (ret)
245 		return ret;
246 
247 	spin_lock_irqsave(&mvchip->lock, flags);
248 	u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip));
249 	u &= ~(1 << pin);
250 	writel_relaxed(u, mvebu_gpioreg_io_conf(mvchip));
251 	spin_unlock_irqrestore(&mvchip->lock, flags);
252 
253 	return 0;
254 }
255 
256 static int mvebu_gpio_to_irq(struct gpio_chip *chip, unsigned pin)
257 {
258 	struct mvebu_gpio_chip *mvchip =
259 		container_of(chip, struct mvebu_gpio_chip, chip);
260 	return irq_create_mapping(mvchip->domain, pin);
261 }
262 
263 /*
264  * Functions implementing the irq_chip methods
265  */
266 static void mvebu_gpio_irq_ack(struct irq_data *d)
267 {
268 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
269 	struct mvebu_gpio_chip *mvchip = gc->private;
270 	u32 mask = ~(1 << (d->irq - gc->irq_base));
271 
272 	irq_gc_lock(gc);
273 	writel_relaxed(mask, mvebu_gpioreg_edge_cause(mvchip));
274 	irq_gc_unlock(gc);
275 }
276 
277 static void mvebu_gpio_edge_irq_mask(struct irq_data *d)
278 {
279 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
280 	struct mvebu_gpio_chip *mvchip = gc->private;
281 	u32 mask = 1 << (d->irq - gc->irq_base);
282 
283 	irq_gc_lock(gc);
284 	gc->mask_cache &= ~mask;
285 	writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
286 	irq_gc_unlock(gc);
287 }
288 
289 static void mvebu_gpio_edge_irq_unmask(struct irq_data *d)
290 {
291 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
292 	struct mvebu_gpio_chip *mvchip = gc->private;
293 	u32 mask = 1 << (d->irq - gc->irq_base);
294 
295 	irq_gc_lock(gc);
296 	gc->mask_cache |= mask;
297 	writel_relaxed(gc->mask_cache, mvebu_gpioreg_edge_mask(mvchip));
298 	irq_gc_unlock(gc);
299 }
300 
301 static void mvebu_gpio_level_irq_mask(struct irq_data *d)
302 {
303 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
304 	struct mvebu_gpio_chip *mvchip = gc->private;
305 	u32 mask = 1 << (d->irq - gc->irq_base);
306 
307 	irq_gc_lock(gc);
308 	gc->mask_cache &= ~mask;
309 	writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
310 	irq_gc_unlock(gc);
311 }
312 
313 static void mvebu_gpio_level_irq_unmask(struct irq_data *d)
314 {
315 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
316 	struct mvebu_gpio_chip *mvchip = gc->private;
317 	u32 mask = 1 << (d->irq - gc->irq_base);
318 
319 	irq_gc_lock(gc);
320 	gc->mask_cache |= mask;
321 	writel_relaxed(gc->mask_cache, mvebu_gpioreg_level_mask(mvchip));
322 	irq_gc_unlock(gc);
323 }
324 
325 /*****************************************************************************
326  * MVEBU GPIO IRQ
327  *
328  * GPIO_IN_POL register controls whether GPIO_DATA_IN will hold the same
329  * value of the line or the opposite value.
330  *
331  * Level IRQ handlers: DATA_IN is used directly as cause register.
332  *                     Interrupt are masked by LEVEL_MASK registers.
333  * Edge IRQ handlers:  Change in DATA_IN are latched in EDGE_CAUSE.
334  *                     Interrupt are masked by EDGE_MASK registers.
335  * Both-edge handlers: Similar to regular Edge handlers, but also swaps
336  *                     the polarity to catch the next line transaction.
337  *                     This is a race condition that might not perfectly
338  *                     work on some use cases.
339  *
340  * Every eight GPIO lines are grouped (OR'ed) before going up to main
341  * cause register.
342  *
343  *                    EDGE  cause    mask
344  *        data-in   /--------| |-----| |----\
345  *     -----| |-----                         ---- to main cause reg
346  *           X      \----------------| |----/
347  *        polarity    LEVEL          mask
348  *
349  ****************************************************************************/
350 
351 static int mvebu_gpio_irq_set_type(struct irq_data *d, unsigned int type)
352 {
353 	struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
354 	struct irq_chip_type *ct = irq_data_get_chip_type(d);
355 	struct mvebu_gpio_chip *mvchip = gc->private;
356 	int pin;
357 	u32 u;
358 
359 	pin = d->hwirq;
360 
361 	u = readl_relaxed(mvebu_gpioreg_io_conf(mvchip)) & (1 << pin);
362 	if (!u) {
363 		return -EINVAL;
364 	}
365 
366 	type &= IRQ_TYPE_SENSE_MASK;
367 	if (type == IRQ_TYPE_NONE)
368 		return -EINVAL;
369 
370 	/* Check if we need to change chip and handler */
371 	if (!(ct->type & type))
372 		if (irq_setup_alt_chip(d, type))
373 			return -EINVAL;
374 
375 	/*
376 	 * Configure interrupt polarity.
377 	 */
378 	switch(type) {
379 	case IRQ_TYPE_EDGE_RISING:
380 	case IRQ_TYPE_LEVEL_HIGH:
381 		u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
382 		u &= ~(1 << pin);
383 		writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
384 		break;
385 	case IRQ_TYPE_EDGE_FALLING:
386 	case IRQ_TYPE_LEVEL_LOW:
387 		u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
388 		u |= 1 << pin;
389 		writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
390 		break;
391 	case IRQ_TYPE_EDGE_BOTH: {
392 		u32 v;
393 
394 		v = readl_relaxed(mvebu_gpioreg_in_pol(mvchip)) ^
395 			readl_relaxed(mvebu_gpioreg_data_in(mvchip));
396 
397 		/*
398 		 * set initial polarity based on current input level
399 		 */
400 		u = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
401 		if (v & (1 << pin))
402 			u |= 1 << pin;		/* falling */
403 		else
404 			u &= ~(1 << pin);	/* rising */
405 		writel_relaxed(u, mvebu_gpioreg_in_pol(mvchip));
406 		break;
407 	}
408 	}
409 	return 0;
410 }
411 
412 static void mvebu_gpio_irq_handler(unsigned int irq, struct irq_desc *desc)
413 {
414 	struct mvebu_gpio_chip *mvchip = irq_get_handler_data(irq);
415 	u32 cause, type;
416 	int i;
417 
418 	if (mvchip == NULL)
419 		return;
420 
421 	cause = readl_relaxed(mvebu_gpioreg_data_in(mvchip)) &
422 		readl_relaxed(mvebu_gpioreg_level_mask(mvchip));
423 	cause |= readl_relaxed(mvebu_gpioreg_edge_cause(mvchip)) &
424 		readl_relaxed(mvebu_gpioreg_edge_mask(mvchip));
425 
426 	for (i = 0; i < mvchip->chip.ngpio; i++) {
427 		int irq;
428 
429 		irq = mvchip->irqbase + i;
430 
431 		if (!(cause & (1 << i)))
432 			continue;
433 
434 		type = irqd_get_trigger_type(irq_get_irq_data(irq));
435 		if ((type & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) {
436 			/* Swap polarity (race with GPIO line) */
437 			u32 polarity;
438 
439 			polarity = readl_relaxed(mvebu_gpioreg_in_pol(mvchip));
440 			polarity ^= 1 << i;
441 			writel_relaxed(polarity, mvebu_gpioreg_in_pol(mvchip));
442 		}
443 		generic_handle_irq(irq);
444 	}
445 }
446 
447 static struct platform_device_id mvebu_gpio_ids[] = {
448 	{
449 		.name = "orion-gpio",
450 	}, {
451 		.name = "mv78200-gpio",
452 	}, {
453 		.name = "armadaxp-gpio",
454 	}, {
455 		/* sentinel */
456 	},
457 };
458 MODULE_DEVICE_TABLE(platform, mvebu_gpio_ids);
459 
460 static struct of_device_id mvebu_gpio_of_match[] __devinitdata = {
461 	{
462 		.compatible = "marvell,orion-gpio",
463 		.data       = (void*) MVEBU_GPIO_SOC_VARIANT_ORION,
464 	},
465 	{
466 		.compatible = "marvell,mv78200-gpio",
467 		.data       = (void*) MVEBU_GPIO_SOC_VARIANT_MV78200,
468 	},
469 	{
470 		.compatible = "marvell,armadaxp-gpio",
471 		.data       = (void*) MVEBU_GPIO_SOC_VARIANT_ARMADAXP,
472 	},
473 	{
474 		/* sentinel */
475 	},
476 };
477 MODULE_DEVICE_TABLE(of, mvebu_gpio_of_match);
478 
479 static int __devinit mvebu_gpio_probe(struct platform_device *pdev)
480 {
481 	struct mvebu_gpio_chip *mvchip;
482 	const struct of_device_id *match;
483 	struct device_node *np = pdev->dev.of_node;
484 	struct resource *res;
485 	struct irq_chip_generic *gc;
486 	struct irq_chip_type *ct;
487 	unsigned int ngpios;
488 	int soc_variant;
489 	int i, cpu, id;
490 
491 	match = of_match_device(mvebu_gpio_of_match, &pdev->dev);
492 	if (match)
493 		soc_variant = (int) match->data;
494 	else
495 		soc_variant = MVEBU_GPIO_SOC_VARIANT_ORION;
496 
497 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
498 	if (! res) {
499 		dev_err(&pdev->dev, "Cannot get memory resource\n");
500 		return -ENODEV;
501 	}
502 
503 	mvchip = devm_kzalloc(&pdev->dev, sizeof(struct mvebu_gpio_chip), GFP_KERNEL);
504 	if (! mvchip){
505 		dev_err(&pdev->dev, "Cannot allocate memory\n");
506 		return -ENOMEM;
507 	}
508 
509 	if (of_property_read_u32(pdev->dev.of_node, "ngpios", &ngpios)) {
510 		dev_err(&pdev->dev, "Missing ngpios OF property\n");
511 		return -ENODEV;
512 	}
513 
514 	id = of_alias_get_id(pdev->dev.of_node, "gpio");
515 	if (id < 0) {
516 		dev_err(&pdev->dev, "Couldn't get OF id\n");
517 		return id;
518 	}
519 
520 	mvchip->soc_variant = soc_variant;
521 	mvchip->chip.label = dev_name(&pdev->dev);
522 	mvchip->chip.dev = &pdev->dev;
523 	mvchip->chip.request = mvebu_gpio_request;
524 	mvchip->chip.direction_input = mvebu_gpio_direction_input;
525 	mvchip->chip.get = mvebu_gpio_get;
526 	mvchip->chip.direction_output = mvebu_gpio_direction_output;
527 	mvchip->chip.set = mvebu_gpio_set;
528 	mvchip->chip.to_irq = mvebu_gpio_to_irq;
529 	mvchip->chip.base = id * MVEBU_MAX_GPIO_PER_BANK;
530 	mvchip->chip.ngpio = ngpios;
531 	mvchip->chip.can_sleep = 0;
532 #ifdef CONFIG_OF
533 	mvchip->chip.of_node = np;
534 #endif
535 
536 	spin_lock_init(&mvchip->lock);
537 	mvchip->membase = devm_request_and_ioremap(&pdev->dev, res);
538 	if (! mvchip->membase) {
539 		dev_err(&pdev->dev, "Cannot ioremap\n");
540 		kfree(mvchip->chip.label);
541 		return -ENOMEM;
542 	}
543 
544 	/* The Armada XP has a second range of registers for the
545 	 * per-CPU registers */
546 	if (soc_variant == MVEBU_GPIO_SOC_VARIANT_ARMADAXP) {
547 		res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
548 		if (! res) {
549 			dev_err(&pdev->dev, "Cannot get memory resource\n");
550 			kfree(mvchip->chip.label);
551 			return -ENODEV;
552 		}
553 
554 		mvchip->percpu_membase = devm_request_and_ioremap(&pdev->dev, res);
555 		if (! mvchip->percpu_membase) {
556 			dev_err(&pdev->dev, "Cannot ioremap\n");
557 			kfree(mvchip->chip.label);
558 			return -ENOMEM;
559 		}
560 	}
561 
562 	/*
563 	 * Mask and clear GPIO interrupts.
564 	 */
565 	switch(soc_variant) {
566 	case MVEBU_GPIO_SOC_VARIANT_ORION:
567 		writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
568 		writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
569 		writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
570 		break;
571 	case MVEBU_GPIO_SOC_VARIANT_MV78200:
572 		writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
573 		for (cpu = 0; cpu < 2; cpu++) {
574 			writel_relaxed(0, mvchip->membase +
575 				       GPIO_EDGE_MASK_MV78200_OFF(cpu));
576 			writel_relaxed(0, mvchip->membase +
577 				       GPIO_LEVEL_MASK_MV78200_OFF(cpu));
578 		}
579 		break;
580 	case MVEBU_GPIO_SOC_VARIANT_ARMADAXP:
581 		writel_relaxed(0, mvchip->membase + GPIO_EDGE_CAUSE_OFF);
582 		writel_relaxed(0, mvchip->membase + GPIO_EDGE_MASK_OFF);
583 		writel_relaxed(0, mvchip->membase + GPIO_LEVEL_MASK_OFF);
584 		for (cpu = 0; cpu < 4; cpu++) {
585 			writel_relaxed(0, mvchip->percpu_membase +
586 				       GPIO_EDGE_CAUSE_ARMADAXP_OFF(cpu));
587 			writel_relaxed(0, mvchip->percpu_membase +
588 				       GPIO_EDGE_MASK_ARMADAXP_OFF(cpu));
589 			writel_relaxed(0, mvchip->percpu_membase +
590 				       GPIO_LEVEL_MASK_ARMADAXP_OFF(cpu));
591 		}
592 		break;
593 	default:
594 		BUG();
595 	}
596 
597 	gpiochip_add(&mvchip->chip);
598 
599 	/* Some gpio controllers do not provide irq support */
600 	if (!of_irq_count(np))
601 		return 0;
602 
603 	/* Setup the interrupt handlers. Each chip can have up to 4
604 	 * interrupt handlers, with each handler dealing with 8 GPIO
605 	 * pins. */
606 	for (i = 0; i < 4; i++) {
607 		int irq;
608 		irq = platform_get_irq(pdev, i);
609 		if (irq < 0)
610 			continue;
611 		irq_set_handler_data(irq, mvchip);
612 		irq_set_chained_handler(irq, mvebu_gpio_irq_handler);
613 	}
614 
615 	mvchip->irqbase = irq_alloc_descs(-1, 0, ngpios, -1);
616 	if (mvchip->irqbase < 0) {
617 		dev_err(&pdev->dev, "no irqs\n");
618 		kfree(mvchip->chip.label);
619 		return -ENOMEM;
620 	}
621 
622 	gc = irq_alloc_generic_chip("mvebu_gpio_irq", 2, mvchip->irqbase,
623 				    mvchip->membase, handle_level_irq);
624 	if (! gc) {
625 		dev_err(&pdev->dev, "Cannot allocate generic irq_chip\n");
626 		kfree(mvchip->chip.label);
627 		return -ENOMEM;
628 	}
629 
630 	gc->private = mvchip;
631 	ct = &gc->chip_types[0];
632 	ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
633 	ct->chip.irq_mask = mvebu_gpio_level_irq_mask;
634 	ct->chip.irq_unmask = mvebu_gpio_level_irq_unmask;
635 	ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
636 	ct->chip.name = mvchip->chip.label;
637 
638 	ct = &gc->chip_types[1];
639 	ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
640 	ct->chip.irq_ack = mvebu_gpio_irq_ack;
641 	ct->chip.irq_mask = mvebu_gpio_edge_irq_mask;
642 	ct->chip.irq_unmask = mvebu_gpio_edge_irq_unmask;
643 	ct->chip.irq_set_type = mvebu_gpio_irq_set_type;
644 	ct->handler = handle_edge_irq;
645 	ct->chip.name = mvchip->chip.label;
646 
647 	irq_setup_generic_chip(gc, IRQ_MSK(ngpios), IRQ_GC_INIT_MASK_CACHE,
648 			       IRQ_NOREQUEST, IRQ_LEVEL | IRQ_NOPROBE);
649 
650 	/* Setup irq domain on top of the generic chip. */
651 	mvchip->domain = irq_domain_add_legacy(np, mvchip->chip.ngpio,
652 					       mvchip->irqbase, 0,
653 					       &irq_domain_simple_ops,
654 					       mvchip);
655 	if (!mvchip->domain) {
656 		dev_err(&pdev->dev, "couldn't allocate irq domain %s (DT).\n",
657 			mvchip->chip.label);
658 		irq_remove_generic_chip(gc, IRQ_MSK(ngpios), IRQ_NOREQUEST,
659 					IRQ_LEVEL | IRQ_NOPROBE);
660 		kfree(gc);
661 		kfree(mvchip->chip.label);
662 		return -ENODEV;
663 	}
664 
665 	return 0;
666 }
667 
668 static struct platform_driver mvebu_gpio_driver = {
669 	.driver		= {
670 		.name	        = "mvebu-gpio",
671 		.owner	        = THIS_MODULE,
672 		.of_match_table = mvebu_gpio_of_match,
673 	},
674 	.probe		= mvebu_gpio_probe,
675 	.id_table	= mvebu_gpio_ids,
676 };
677 
678 static int __init mvebu_gpio_init(void)
679 {
680 	return platform_driver_register(&mvebu_gpio_driver);
681 }
682 postcore_initcall(mvebu_gpio_init);
683