1 /* 2 * linux/arch/arm/plat-pxa/gpio.c 3 * 4 * Generic PXA GPIO handling 5 * 6 * Author: Nicolas Pitre 7 * Created: Jun 15, 2001 8 * Copyright: MontaVista Software Inc. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 #include <linux/gpio.h> 15 #include <linux/gpio-pxa.h> 16 #include <linux/init.h> 17 #include <linux/irq.h> 18 #include <linux/io.h> 19 #include <linux/platform_device.h> 20 #include <linux/syscore_ops.h> 21 #include <linux/slab.h> 22 23 /* 24 * We handle the GPIOs by banks, each bank covers up to 32 GPIOs with 25 * one set of registers. The register offsets are organized below: 26 * 27 * GPLR GPDR GPSR GPCR GRER GFER GEDR 28 * BANK 0 - 0x0000 0x000C 0x0018 0x0024 0x0030 0x003C 0x0048 29 * BANK 1 - 0x0004 0x0010 0x001C 0x0028 0x0034 0x0040 0x004C 30 * BANK 2 - 0x0008 0x0014 0x0020 0x002C 0x0038 0x0044 0x0050 31 * 32 * BANK 3 - 0x0100 0x010C 0x0118 0x0124 0x0130 0x013C 0x0148 33 * BANK 4 - 0x0104 0x0110 0x011C 0x0128 0x0134 0x0140 0x014C 34 * BANK 5 - 0x0108 0x0114 0x0120 0x012C 0x0138 0x0144 0x0150 35 * 36 * NOTE: 37 * BANK 3 is only available on PXA27x and later processors. 38 * BANK 4 and 5 are only available on PXA935 39 */ 40 41 #define GPLR_OFFSET 0x00 42 #define GPDR_OFFSET 0x0C 43 #define GPSR_OFFSET 0x18 44 #define GPCR_OFFSET 0x24 45 #define GRER_OFFSET 0x30 46 #define GFER_OFFSET 0x3C 47 #define GEDR_OFFSET 0x48 48 #define GAFR_OFFSET 0x54 49 50 #define BANK_OFF(n) (((n) < 3) ? (n) << 2 : 0x100 + (((n) - 3) << 2)) 51 52 int pxa_last_gpio; 53 54 struct pxa_gpio_chip { 55 struct gpio_chip chip; 56 void __iomem *regbase; 57 char label[10]; 58 59 unsigned long irq_mask; 60 unsigned long irq_edge_rise; 61 unsigned long irq_edge_fall; 62 63 #ifdef CONFIG_PM 64 unsigned long saved_gplr; 65 unsigned long saved_gpdr; 66 unsigned long saved_grer; 67 unsigned long saved_gfer; 68 #endif 69 }; 70 71 enum { 72 PXA25X_GPIO = 0, 73 PXA26X_GPIO, 74 PXA27X_GPIO, 75 PXA3XX_GPIO, 76 PXA93X_GPIO, 77 MMP_GPIO = 0x10, 78 MMP2_GPIO, 79 }; 80 81 static DEFINE_SPINLOCK(gpio_lock); 82 static struct pxa_gpio_chip *pxa_gpio_chips; 83 static int gpio_type; 84 static void __iomem *gpio_reg_base; 85 86 #define for_each_gpio_chip(i, c) \ 87 for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++) 88 89 static inline void __iomem *gpio_chip_base(struct gpio_chip *c) 90 { 91 return container_of(c, struct pxa_gpio_chip, chip)->regbase; 92 } 93 94 static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio) 95 { 96 return &pxa_gpio_chips[gpio_to_bank(gpio)]; 97 } 98 99 static inline int gpio_is_pxa_type(int type) 100 { 101 return (type & MMP_GPIO) == 0; 102 } 103 104 static inline int gpio_is_mmp_type(int type) 105 { 106 return (type & MMP_GPIO) != 0; 107 } 108 109 /* GPIO86/87/88/89 on PXA26x have their direction bits in PXA_GPDR(2 inverted, 110 * as well as their Alternate Function value being '1' for GPIO in GAFRx. 111 */ 112 static inline int __gpio_is_inverted(int gpio) 113 { 114 if ((gpio_type == PXA26X_GPIO) && (gpio > 85)) 115 return 1; 116 return 0; 117 } 118 119 /* 120 * On PXA25x and PXA27x, GAFRx and GPDRx together decide the alternate 121 * function of a GPIO, and GPDRx cannot be altered once configured. It 122 * is attributed as "occupied" here (I know this terminology isn't 123 * accurate, you are welcome to propose a better one :-) 124 */ 125 static inline int __gpio_is_occupied(unsigned gpio) 126 { 127 struct pxa_gpio_chip *pxachip; 128 void __iomem *base; 129 unsigned long gafr = 0, gpdr = 0; 130 int ret, af = 0, dir = 0; 131 132 pxachip = gpio_to_pxachip(gpio); 133 base = gpio_chip_base(&pxachip->chip); 134 gpdr = readl_relaxed(base + GPDR_OFFSET); 135 136 switch (gpio_type) { 137 case PXA25X_GPIO: 138 case PXA26X_GPIO: 139 case PXA27X_GPIO: 140 gafr = readl_relaxed(base + GAFR_OFFSET); 141 af = (gafr >> ((gpio & 0xf) * 2)) & 0x3; 142 dir = gpdr & GPIO_bit(gpio); 143 144 if (__gpio_is_inverted(gpio)) 145 ret = (af != 1) || (dir == 0); 146 else 147 ret = (af != 0) || (dir != 0); 148 break; 149 default: 150 ret = gpdr & GPIO_bit(gpio); 151 break; 152 } 153 return ret; 154 } 155 156 #ifdef CONFIG_ARCH_PXA 157 static inline int __pxa_gpio_to_irq(int gpio) 158 { 159 if (gpio_is_pxa_type(gpio_type)) 160 return PXA_GPIO_TO_IRQ(gpio); 161 return -1; 162 } 163 164 static inline int __pxa_irq_to_gpio(int irq) 165 { 166 if (gpio_is_pxa_type(gpio_type)) 167 return irq - PXA_GPIO_TO_IRQ(0); 168 return -1; 169 } 170 #else 171 static inline int __pxa_gpio_to_irq(int gpio) { return -1; } 172 static inline int __pxa_irq_to_gpio(int irq) { return -1; } 173 #endif 174 175 #ifdef CONFIG_ARCH_MMP 176 static inline int __mmp_gpio_to_irq(int gpio) 177 { 178 if (gpio_is_mmp_type(gpio_type)) 179 return MMP_GPIO_TO_IRQ(gpio); 180 return -1; 181 } 182 183 static inline int __mmp_irq_to_gpio(int irq) 184 { 185 if (gpio_is_mmp_type(gpio_type)) 186 return irq - MMP_GPIO_TO_IRQ(0); 187 return -1; 188 } 189 #else 190 static inline int __mmp_gpio_to_irq(int gpio) { return -1; } 191 static inline int __mmp_irq_to_gpio(int irq) { return -1; } 192 #endif 193 194 static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 195 { 196 int gpio, ret; 197 198 gpio = chip->base + offset; 199 ret = __pxa_gpio_to_irq(gpio); 200 if (ret >= 0) 201 return ret; 202 return __mmp_gpio_to_irq(gpio); 203 } 204 205 int pxa_irq_to_gpio(int irq) 206 { 207 int ret; 208 209 ret = __pxa_irq_to_gpio(irq); 210 if (ret >= 0) 211 return ret; 212 return __mmp_irq_to_gpio(irq); 213 } 214 215 static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 216 { 217 void __iomem *base = gpio_chip_base(chip); 218 uint32_t value, mask = 1 << offset; 219 unsigned long flags; 220 221 spin_lock_irqsave(&gpio_lock, flags); 222 223 value = readl_relaxed(base + GPDR_OFFSET); 224 if (__gpio_is_inverted(chip->base + offset)) 225 value |= mask; 226 else 227 value &= ~mask; 228 writel_relaxed(value, base + GPDR_OFFSET); 229 230 spin_unlock_irqrestore(&gpio_lock, flags); 231 return 0; 232 } 233 234 static int pxa_gpio_direction_output(struct gpio_chip *chip, 235 unsigned offset, int value) 236 { 237 void __iomem *base = gpio_chip_base(chip); 238 uint32_t tmp, mask = 1 << offset; 239 unsigned long flags; 240 241 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET)); 242 243 spin_lock_irqsave(&gpio_lock, flags); 244 245 tmp = readl_relaxed(base + GPDR_OFFSET); 246 if (__gpio_is_inverted(chip->base + offset)) 247 tmp &= ~mask; 248 else 249 tmp |= mask; 250 writel_relaxed(tmp, base + GPDR_OFFSET); 251 252 spin_unlock_irqrestore(&gpio_lock, flags); 253 return 0; 254 } 255 256 static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset) 257 { 258 return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset); 259 } 260 261 static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 262 { 263 writel_relaxed(1 << offset, gpio_chip_base(chip) + 264 (value ? GPSR_OFFSET : GPCR_OFFSET)); 265 } 266 267 static int __devinit pxa_init_gpio_chip(int gpio_end) 268 { 269 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1; 270 struct pxa_gpio_chip *chips; 271 272 chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL); 273 if (chips == NULL) { 274 pr_err("%s: failed to allocate GPIO chips\n", __func__); 275 return -ENOMEM; 276 } 277 278 for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) { 279 struct gpio_chip *c = &chips[i].chip; 280 281 sprintf(chips[i].label, "gpio-%d", i); 282 chips[i].regbase = gpio_reg_base + BANK_OFF(i); 283 284 c->base = gpio; 285 c->label = chips[i].label; 286 287 c->direction_input = pxa_gpio_direction_input; 288 c->direction_output = pxa_gpio_direction_output; 289 c->get = pxa_gpio_get; 290 c->set = pxa_gpio_set; 291 c->to_irq = pxa_gpio_to_irq; 292 293 /* number of GPIOs on last bank may be less than 32 */ 294 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; 295 gpiochip_add(c); 296 } 297 pxa_gpio_chips = chips; 298 return 0; 299 } 300 301 /* Update only those GRERx and GFERx edge detection register bits if those 302 * bits are set in c->irq_mask 303 */ 304 static inline void update_edge_detect(struct pxa_gpio_chip *c) 305 { 306 uint32_t grer, gfer; 307 308 grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask; 309 gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask; 310 grer |= c->irq_edge_rise & c->irq_mask; 311 gfer |= c->irq_edge_fall & c->irq_mask; 312 writel_relaxed(grer, c->regbase + GRER_OFFSET); 313 writel_relaxed(gfer, c->regbase + GFER_OFFSET); 314 } 315 316 static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) 317 { 318 struct pxa_gpio_chip *c; 319 int gpio = pxa_irq_to_gpio(d->irq); 320 unsigned long gpdr, mask = GPIO_bit(gpio); 321 322 c = gpio_to_pxachip(gpio); 323 324 if (type == IRQ_TYPE_PROBE) { 325 /* Don't mess with enabled GPIOs using preconfigured edges or 326 * GPIOs set to alternate function or to output during probe 327 */ 328 if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio)) 329 return 0; 330 331 if (__gpio_is_occupied(gpio)) 332 return 0; 333 334 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 335 } 336 337 gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); 338 339 if (__gpio_is_inverted(gpio)) 340 writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET); 341 else 342 writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET); 343 344 if (type & IRQ_TYPE_EDGE_RISING) 345 c->irq_edge_rise |= mask; 346 else 347 c->irq_edge_rise &= ~mask; 348 349 if (type & IRQ_TYPE_EDGE_FALLING) 350 c->irq_edge_fall |= mask; 351 else 352 c->irq_edge_fall &= ~mask; 353 354 update_edge_detect(c); 355 356 pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio, 357 ((type & IRQ_TYPE_EDGE_RISING) ? " rising" : ""), 358 ((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : "")); 359 return 0; 360 } 361 362 static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) 363 { 364 struct pxa_gpio_chip *c; 365 int loop, gpio, gpio_base, n; 366 unsigned long gedr; 367 368 do { 369 loop = 0; 370 for_each_gpio_chip(gpio, c) { 371 gpio_base = c->chip.base; 372 373 gedr = readl_relaxed(c->regbase + GEDR_OFFSET); 374 gedr = gedr & c->irq_mask; 375 writel_relaxed(gedr, c->regbase + GEDR_OFFSET); 376 377 n = find_first_bit(&gedr, BITS_PER_LONG); 378 while (n < BITS_PER_LONG) { 379 loop = 1; 380 381 generic_handle_irq(gpio_to_irq(gpio_base + n)); 382 n = find_next_bit(&gedr, BITS_PER_LONG, n + 1); 383 } 384 } 385 } while (loop); 386 } 387 388 static void pxa_ack_muxed_gpio(struct irq_data *d) 389 { 390 int gpio = pxa_irq_to_gpio(d->irq); 391 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); 392 393 writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); 394 } 395 396 static void pxa_mask_muxed_gpio(struct irq_data *d) 397 { 398 int gpio = pxa_irq_to_gpio(d->irq); 399 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); 400 uint32_t grer, gfer; 401 402 c->irq_mask &= ~GPIO_bit(gpio); 403 404 grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio); 405 gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio); 406 writel_relaxed(grer, c->regbase + GRER_OFFSET); 407 writel_relaxed(gfer, c->regbase + GFER_OFFSET); 408 } 409 410 static void pxa_unmask_muxed_gpio(struct irq_data *d) 411 { 412 int gpio = pxa_irq_to_gpio(d->irq); 413 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); 414 415 c->irq_mask |= GPIO_bit(gpio); 416 update_edge_detect(c); 417 } 418 419 static struct irq_chip pxa_muxed_gpio_chip = { 420 .name = "GPIO", 421 .irq_ack = pxa_ack_muxed_gpio, 422 .irq_mask = pxa_mask_muxed_gpio, 423 .irq_unmask = pxa_unmask_muxed_gpio, 424 .irq_set_type = pxa_gpio_irq_type, 425 }; 426 427 static int pxa_gpio_nums(void) 428 { 429 int count = 0; 430 431 #ifdef CONFIG_ARCH_PXA 432 if (cpu_is_pxa25x()) { 433 #ifdef CONFIG_CPU_PXA26x 434 count = 89; 435 gpio_type = PXA26X_GPIO; 436 #elif defined(CONFIG_PXA25x) 437 count = 84; 438 gpio_type = PXA26X_GPIO; 439 #endif /* CONFIG_CPU_PXA26x */ 440 } else if (cpu_is_pxa27x()) { 441 count = 120; 442 gpio_type = PXA27X_GPIO; 443 } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) { 444 count = 191; 445 gpio_type = PXA93X_GPIO; 446 } else if (cpu_is_pxa3xx()) { 447 count = 127; 448 gpio_type = PXA3XX_GPIO; 449 } 450 #endif /* CONFIG_ARCH_PXA */ 451 452 #ifdef CONFIG_ARCH_MMP 453 if (cpu_is_pxa168() || cpu_is_pxa910()) { 454 count = 127; 455 gpio_type = MMP_GPIO; 456 } else if (cpu_is_mmp2()) { 457 count = 191; 458 gpio_type = MMP2_GPIO; 459 } 460 #endif /* CONFIG_ARCH_MMP */ 461 return count; 462 } 463 464 static int __devinit pxa_gpio_probe(struct platform_device *pdev) 465 { 466 struct pxa_gpio_chip *c; 467 struct resource *res; 468 int gpio, irq; 469 int irq0 = 0, irq1 = 0, irq_mux, gpio_offset = 0; 470 471 pxa_last_gpio = pxa_gpio_nums(); 472 if (!pxa_last_gpio) 473 return -EINVAL; 474 475 irq0 = platform_get_irq_byname(pdev, "gpio0"); 476 irq1 = platform_get_irq_byname(pdev, "gpio1"); 477 irq_mux = platform_get_irq_byname(pdev, "gpio_mux"); 478 if ((irq0 > 0 && irq1 <= 0) || (irq0 <= 0 && irq1 > 0) 479 || (irq_mux <= 0)) 480 return -EINVAL; 481 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 482 if (!res) 483 return -EINVAL; 484 gpio_reg_base = ioremap(res->start, resource_size(res)); 485 if (!gpio_reg_base) 486 return -EINVAL; 487 488 if (irq0 > 0) 489 gpio_offset = 2; 490 491 /* Initialize GPIO chips */ 492 pxa_init_gpio_chip(pxa_last_gpio); 493 494 /* clear all GPIO edge detects */ 495 for_each_gpio_chip(gpio, c) { 496 writel_relaxed(0, c->regbase + GFER_OFFSET); 497 writel_relaxed(0, c->regbase + GRER_OFFSET); 498 writel_relaxed(~0,c->regbase + GEDR_OFFSET); 499 } 500 501 #ifdef CONFIG_ARCH_PXA 502 irq = gpio_to_irq(0); 503 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, 504 handle_edge_irq); 505 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 506 irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler); 507 508 irq = gpio_to_irq(1); 509 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, 510 handle_edge_irq); 511 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 512 irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler); 513 #endif 514 515 for (irq = gpio_to_irq(gpio_offset); 516 irq <= gpio_to_irq(pxa_last_gpio); irq++) { 517 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, 518 handle_edge_irq); 519 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 520 } 521 522 irq_set_chained_handler(irq_mux, pxa_gpio_demux_handler); 523 return 0; 524 } 525 526 static struct platform_driver pxa_gpio_driver = { 527 .probe = pxa_gpio_probe, 528 .driver = { 529 .name = "pxa-gpio", 530 }, 531 }; 532 533 static int __init pxa_gpio_init(void) 534 { 535 return platform_driver_register(&pxa_gpio_driver); 536 } 537 postcore_initcall(pxa_gpio_init); 538 539 #ifdef CONFIG_PM 540 static int pxa_gpio_suspend(void) 541 { 542 struct pxa_gpio_chip *c; 543 int gpio; 544 545 for_each_gpio_chip(gpio, c) { 546 c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET); 547 c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); 548 c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET); 549 c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET); 550 551 /* Clear GPIO transition detect bits */ 552 writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET); 553 } 554 return 0; 555 } 556 557 static void pxa_gpio_resume(void) 558 { 559 struct pxa_gpio_chip *c; 560 int gpio; 561 562 for_each_gpio_chip(gpio, c) { 563 /* restore level with set/clear */ 564 writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET); 565 writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET); 566 567 writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET); 568 writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET); 569 writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET); 570 } 571 } 572 #else 573 #define pxa_gpio_suspend NULL 574 #define pxa_gpio_resume NULL 575 #endif 576 577 struct syscore_ops pxa_gpio_syscore_ops = { 578 .suspend = pxa_gpio_suspend, 579 .resume = pxa_gpio_resume, 580 }; 581 582 static int __init pxa_gpio_sysinit(void) 583 { 584 register_syscore_ops(&pxa_gpio_syscore_ops); 585 return 0; 586 } 587 postcore_initcall(pxa_gpio_sysinit); 588