1 /* 2 * Support functions for OMAP GPIO 3 * 4 * Copyright (C) 2003-2005 Nokia Corporation 5 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 6 * 7 * Copyright (C) 2009 Texas Instruments 8 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 15 #include <linux/init.h> 16 #include <linux/module.h> 17 #include <linux/interrupt.h> 18 #include <linux/syscore_ops.h> 19 #include <linux/err.h> 20 #include <linux/clk.h> 21 #include <linux/io.h> 22 #include <linux/device.h> 23 #include <linux/pm_runtime.h> 24 #include <linux/pm.h> 25 #include <linux/of.h> 26 #include <linux/of_device.h> 27 #include <linux/irqdomain.h> 28 29 #include <mach/hardware.h> 30 #include <asm/irq.h> 31 #include <mach/irqs.h> 32 #include <asm/gpio.h> 33 #include <asm/mach/irq.h> 34 35 #define OFF_MODE 1 36 37 static LIST_HEAD(omap_gpio_list); 38 39 struct gpio_regs { 40 u32 irqenable1; 41 u32 irqenable2; 42 u32 wake_en; 43 u32 ctrl; 44 u32 oe; 45 u32 leveldetect0; 46 u32 leveldetect1; 47 u32 risingdetect; 48 u32 fallingdetect; 49 u32 dataout; 50 u32 debounce; 51 u32 debounce_en; 52 }; 53 54 struct gpio_bank { 55 struct list_head node; 56 void __iomem *base; 57 u16 irq; 58 int irq_base; 59 struct irq_domain *domain; 60 u32 non_wakeup_gpios; 61 u32 enabled_non_wakeup_gpios; 62 struct gpio_regs context; 63 u32 saved_datain; 64 u32 level_mask; 65 u32 toggle_mask; 66 spinlock_t lock; 67 struct gpio_chip chip; 68 struct clk *dbck; 69 u32 mod_usage; 70 u32 dbck_enable_mask; 71 bool dbck_enabled; 72 struct device *dev; 73 bool is_mpuio; 74 bool dbck_flag; 75 bool loses_context; 76 int stride; 77 u32 width; 78 int context_loss_count; 79 int power_mode; 80 bool workaround_enabled; 81 82 void (*set_dataout)(struct gpio_bank *bank, int gpio, int enable); 83 int (*get_context_loss_count)(struct device *dev); 84 85 struct omap_gpio_reg_offs *regs; 86 }; 87 88 #define GPIO_INDEX(bank, gpio) (gpio % bank->width) 89 #define GPIO_BIT(bank, gpio) (1 << GPIO_INDEX(bank, gpio)) 90 #define GPIO_MOD_CTRL_BIT BIT(0) 91 92 static int irq_to_gpio(struct gpio_bank *bank, unsigned int gpio_irq) 93 { 94 return gpio_irq - bank->irq_base + bank->chip.base; 95 } 96 97 static void _set_gpio_direction(struct gpio_bank *bank, int gpio, int is_input) 98 { 99 void __iomem *reg = bank->base; 100 u32 l; 101 102 reg += bank->regs->direction; 103 l = __raw_readl(reg); 104 if (is_input) 105 l |= 1 << gpio; 106 else 107 l &= ~(1 << gpio); 108 __raw_writel(l, reg); 109 bank->context.oe = l; 110 } 111 112 113 /* set data out value using dedicate set/clear register */ 114 static void _set_gpio_dataout_reg(struct gpio_bank *bank, int gpio, int enable) 115 { 116 void __iomem *reg = bank->base; 117 u32 l = GPIO_BIT(bank, gpio); 118 119 if (enable) { 120 reg += bank->regs->set_dataout; 121 bank->context.dataout |= l; 122 } else { 123 reg += bank->regs->clr_dataout; 124 bank->context.dataout &= ~l; 125 } 126 127 __raw_writel(l, reg); 128 } 129 130 /* set data out value using mask register */ 131 static void _set_gpio_dataout_mask(struct gpio_bank *bank, int gpio, int enable) 132 { 133 void __iomem *reg = bank->base + bank->regs->dataout; 134 u32 gpio_bit = GPIO_BIT(bank, gpio); 135 u32 l; 136 137 l = __raw_readl(reg); 138 if (enable) 139 l |= gpio_bit; 140 else 141 l &= ~gpio_bit; 142 __raw_writel(l, reg); 143 bank->context.dataout = l; 144 } 145 146 static int _get_gpio_datain(struct gpio_bank *bank, int offset) 147 { 148 void __iomem *reg = bank->base + bank->regs->datain; 149 150 return (__raw_readl(reg) & (1 << offset)) != 0; 151 } 152 153 static int _get_gpio_dataout(struct gpio_bank *bank, int offset) 154 { 155 void __iomem *reg = bank->base + bank->regs->dataout; 156 157 return (__raw_readl(reg) & (1 << offset)) != 0; 158 } 159 160 static inline void _gpio_rmw(void __iomem *base, u32 reg, u32 mask, bool set) 161 { 162 int l = __raw_readl(base + reg); 163 164 if (set) 165 l |= mask; 166 else 167 l &= ~mask; 168 169 __raw_writel(l, base + reg); 170 } 171 172 static inline void _gpio_dbck_enable(struct gpio_bank *bank) 173 { 174 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 175 clk_enable(bank->dbck); 176 bank->dbck_enabled = true; 177 178 __raw_writel(bank->dbck_enable_mask, 179 bank->base + bank->regs->debounce_en); 180 } 181 } 182 183 static inline void _gpio_dbck_disable(struct gpio_bank *bank) 184 { 185 if (bank->dbck_enable_mask && bank->dbck_enabled) { 186 /* 187 * Disable debounce before cutting it's clock. If debounce is 188 * enabled but the clock is not, GPIO module seems to be unable 189 * to detect events and generate interrupts at least on OMAP3. 190 */ 191 __raw_writel(0, bank->base + bank->regs->debounce_en); 192 193 clk_disable(bank->dbck); 194 bank->dbck_enabled = false; 195 } 196 } 197 198 /** 199 * _set_gpio_debounce - low level gpio debounce time 200 * @bank: the gpio bank we're acting upon 201 * @gpio: the gpio number on this @gpio 202 * @debounce: debounce time to use 203 * 204 * OMAP's debounce time is in 31us steps so we need 205 * to convert and round up to the closest unit. 206 */ 207 static void _set_gpio_debounce(struct gpio_bank *bank, unsigned gpio, 208 unsigned debounce) 209 { 210 void __iomem *reg; 211 u32 val; 212 u32 l; 213 214 if (!bank->dbck_flag) 215 return; 216 217 if (debounce < 32) 218 debounce = 0x01; 219 else if (debounce > 7936) 220 debounce = 0xff; 221 else 222 debounce = (debounce / 0x1f) - 1; 223 224 l = GPIO_BIT(bank, gpio); 225 226 clk_enable(bank->dbck); 227 reg = bank->base + bank->regs->debounce; 228 __raw_writel(debounce, reg); 229 230 reg = bank->base + bank->regs->debounce_en; 231 val = __raw_readl(reg); 232 233 if (debounce) 234 val |= l; 235 else 236 val &= ~l; 237 bank->dbck_enable_mask = val; 238 239 __raw_writel(val, reg); 240 clk_disable(bank->dbck); 241 /* 242 * Enable debounce clock per module. 243 * This call is mandatory because in omap_gpio_request() when 244 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 245 * runtime callbck fails to turn on dbck because dbck_enable_mask 246 * used within _gpio_dbck_enable() is still not initialized at 247 * that point. Therefore we have to enable dbck here. 248 */ 249 _gpio_dbck_enable(bank); 250 if (bank->dbck_enable_mask) { 251 bank->context.debounce = debounce; 252 bank->context.debounce_en = val; 253 } 254 } 255 256 static inline void set_gpio_trigger(struct gpio_bank *bank, int gpio, 257 unsigned trigger) 258 { 259 void __iomem *base = bank->base; 260 u32 gpio_bit = 1 << gpio; 261 262 _gpio_rmw(base, bank->regs->leveldetect0, gpio_bit, 263 trigger & IRQ_TYPE_LEVEL_LOW); 264 _gpio_rmw(base, bank->regs->leveldetect1, gpio_bit, 265 trigger & IRQ_TYPE_LEVEL_HIGH); 266 _gpio_rmw(base, bank->regs->risingdetect, gpio_bit, 267 trigger & IRQ_TYPE_EDGE_RISING); 268 _gpio_rmw(base, bank->regs->fallingdetect, gpio_bit, 269 trigger & IRQ_TYPE_EDGE_FALLING); 270 271 bank->context.leveldetect0 = 272 __raw_readl(bank->base + bank->regs->leveldetect0); 273 bank->context.leveldetect1 = 274 __raw_readl(bank->base + bank->regs->leveldetect1); 275 bank->context.risingdetect = 276 __raw_readl(bank->base + bank->regs->risingdetect); 277 bank->context.fallingdetect = 278 __raw_readl(bank->base + bank->regs->fallingdetect); 279 280 if (likely(!(bank->non_wakeup_gpios & gpio_bit))) { 281 _gpio_rmw(base, bank->regs->wkup_en, gpio_bit, trigger != 0); 282 bank->context.wake_en = 283 __raw_readl(bank->base + bank->regs->wkup_en); 284 } 285 286 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 287 if (!bank->regs->irqctrl) { 288 /* On omap24xx proceed only when valid GPIO bit is set */ 289 if (bank->non_wakeup_gpios) { 290 if (!(bank->non_wakeup_gpios & gpio_bit)) 291 goto exit; 292 } 293 294 /* 295 * Log the edge gpio and manually trigger the IRQ 296 * after resume if the input level changes 297 * to avoid irq lost during PER RET/OFF mode 298 * Applies for omap2 non-wakeup gpio and all omap3 gpios 299 */ 300 if (trigger & IRQ_TYPE_EDGE_BOTH) 301 bank->enabled_non_wakeup_gpios |= gpio_bit; 302 else 303 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 304 } 305 306 exit: 307 bank->level_mask = 308 __raw_readl(bank->base + bank->regs->leveldetect0) | 309 __raw_readl(bank->base + bank->regs->leveldetect1); 310 } 311 312 #ifdef CONFIG_ARCH_OMAP1 313 /* 314 * This only applies to chips that can't do both rising and falling edge 315 * detection at once. For all other chips, this function is a noop. 316 */ 317 static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 318 { 319 void __iomem *reg = bank->base; 320 u32 l = 0; 321 322 if (!bank->regs->irqctrl) 323 return; 324 325 reg += bank->regs->irqctrl; 326 327 l = __raw_readl(reg); 328 if ((l >> gpio) & 1) 329 l &= ~(1 << gpio); 330 else 331 l |= 1 << gpio; 332 333 __raw_writel(l, reg); 334 } 335 #else 336 static void _toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) {} 337 #endif 338 339 static int _set_gpio_triggering(struct gpio_bank *bank, int gpio, 340 unsigned trigger) 341 { 342 void __iomem *reg = bank->base; 343 void __iomem *base = bank->base; 344 u32 l = 0; 345 346 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 347 set_gpio_trigger(bank, gpio, trigger); 348 } else if (bank->regs->irqctrl) { 349 reg += bank->regs->irqctrl; 350 351 l = __raw_readl(reg); 352 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 353 bank->toggle_mask |= 1 << gpio; 354 if (trigger & IRQ_TYPE_EDGE_RISING) 355 l |= 1 << gpio; 356 else if (trigger & IRQ_TYPE_EDGE_FALLING) 357 l &= ~(1 << gpio); 358 else 359 return -EINVAL; 360 361 __raw_writel(l, reg); 362 } else if (bank->regs->edgectrl1) { 363 if (gpio & 0x08) 364 reg += bank->regs->edgectrl2; 365 else 366 reg += bank->regs->edgectrl1; 367 368 gpio &= 0x07; 369 l = __raw_readl(reg); 370 l &= ~(3 << (gpio << 1)); 371 if (trigger & IRQ_TYPE_EDGE_RISING) 372 l |= 2 << (gpio << 1); 373 if (trigger & IRQ_TYPE_EDGE_FALLING) 374 l |= 1 << (gpio << 1); 375 376 /* Enable wake-up during idle for dynamic tick */ 377 _gpio_rmw(base, bank->regs->wkup_en, 1 << gpio, trigger); 378 bank->context.wake_en = 379 __raw_readl(bank->base + bank->regs->wkup_en); 380 __raw_writel(l, reg); 381 } 382 return 0; 383 } 384 385 static int gpio_irq_type(struct irq_data *d, unsigned type) 386 { 387 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 388 unsigned gpio; 389 int retval; 390 unsigned long flags; 391 392 if (!cpu_class_is_omap2() && d->irq > IH_MPUIO_BASE) 393 gpio = OMAP_MPUIO(d->irq - IH_MPUIO_BASE); 394 else 395 gpio = irq_to_gpio(bank, d->irq); 396 397 if (type & ~IRQ_TYPE_SENSE_MASK) 398 return -EINVAL; 399 400 if (!bank->regs->leveldetect0 && 401 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 402 return -EINVAL; 403 404 spin_lock_irqsave(&bank->lock, flags); 405 retval = _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), type); 406 spin_unlock_irqrestore(&bank->lock, flags); 407 408 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 409 __irq_set_handler_locked(d->irq, handle_level_irq); 410 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 411 __irq_set_handler_locked(d->irq, handle_edge_irq); 412 413 return retval; 414 } 415 416 static void _clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 417 { 418 void __iomem *reg = bank->base; 419 420 reg += bank->regs->irqstatus; 421 __raw_writel(gpio_mask, reg); 422 423 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 424 if (bank->regs->irqstatus2) { 425 reg = bank->base + bank->regs->irqstatus2; 426 __raw_writel(gpio_mask, reg); 427 } 428 429 /* Flush posted write for the irq status to avoid spurious interrupts */ 430 __raw_readl(reg); 431 } 432 433 static inline void _clear_gpio_irqstatus(struct gpio_bank *bank, int gpio) 434 { 435 _clear_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); 436 } 437 438 static u32 _get_gpio_irqbank_mask(struct gpio_bank *bank) 439 { 440 void __iomem *reg = bank->base; 441 u32 l; 442 u32 mask = (1 << bank->width) - 1; 443 444 reg += bank->regs->irqenable; 445 l = __raw_readl(reg); 446 if (bank->regs->irqenable_inv) 447 l = ~l; 448 l &= mask; 449 return l; 450 } 451 452 static void _enable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 453 { 454 void __iomem *reg = bank->base; 455 u32 l; 456 457 if (bank->regs->set_irqenable) { 458 reg += bank->regs->set_irqenable; 459 l = gpio_mask; 460 bank->context.irqenable1 |= gpio_mask; 461 } else { 462 reg += bank->regs->irqenable; 463 l = __raw_readl(reg); 464 if (bank->regs->irqenable_inv) 465 l &= ~gpio_mask; 466 else 467 l |= gpio_mask; 468 bank->context.irqenable1 = l; 469 } 470 471 __raw_writel(l, reg); 472 } 473 474 static void _disable_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 475 { 476 void __iomem *reg = bank->base; 477 u32 l; 478 479 if (bank->regs->clr_irqenable) { 480 reg += bank->regs->clr_irqenable; 481 l = gpio_mask; 482 bank->context.irqenable1 &= ~gpio_mask; 483 } else { 484 reg += bank->regs->irqenable; 485 l = __raw_readl(reg); 486 if (bank->regs->irqenable_inv) 487 l |= gpio_mask; 488 else 489 l &= ~gpio_mask; 490 bank->context.irqenable1 = l; 491 } 492 493 __raw_writel(l, reg); 494 } 495 496 static inline void _set_gpio_irqenable(struct gpio_bank *bank, int gpio, int enable) 497 { 498 if (enable) 499 _enable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); 500 else 501 _disable_gpio_irqbank(bank, GPIO_BIT(bank, gpio)); 502 } 503 504 /* 505 * Note that ENAWAKEUP needs to be enabled in GPIO_SYSCONFIG register. 506 * 1510 does not seem to have a wake-up register. If JTAG is connected 507 * to the target, system will wake up always on GPIO events. While 508 * system is running all registered GPIO interrupts need to have wake-up 509 * enabled. When system is suspended, only selected GPIO interrupts need 510 * to have wake-up enabled. 511 */ 512 static int _set_gpio_wakeup(struct gpio_bank *bank, int gpio, int enable) 513 { 514 u32 gpio_bit = GPIO_BIT(bank, gpio); 515 unsigned long flags; 516 517 if (bank->non_wakeup_gpios & gpio_bit) { 518 dev_err(bank->dev, 519 "Unable to modify wakeup on non-wakeup GPIO%d\n", gpio); 520 return -EINVAL; 521 } 522 523 spin_lock_irqsave(&bank->lock, flags); 524 if (enable) 525 bank->context.wake_en |= gpio_bit; 526 else 527 bank->context.wake_en &= ~gpio_bit; 528 529 __raw_writel(bank->context.wake_en, bank->base + bank->regs->wkup_en); 530 spin_unlock_irqrestore(&bank->lock, flags); 531 532 return 0; 533 } 534 535 static void _reset_gpio(struct gpio_bank *bank, int gpio) 536 { 537 _set_gpio_direction(bank, GPIO_INDEX(bank, gpio), 1); 538 _set_gpio_irqenable(bank, gpio, 0); 539 _clear_gpio_irqstatus(bank, gpio); 540 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); 541 } 542 543 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 544 static int gpio_wake_enable(struct irq_data *d, unsigned int enable) 545 { 546 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 547 unsigned int gpio = irq_to_gpio(bank, d->irq); 548 549 return _set_gpio_wakeup(bank, gpio, enable); 550 } 551 552 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 553 { 554 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 555 unsigned long flags; 556 557 /* 558 * If this is the first gpio_request for the bank, 559 * enable the bank module. 560 */ 561 if (!bank->mod_usage) 562 pm_runtime_get_sync(bank->dev); 563 564 spin_lock_irqsave(&bank->lock, flags); 565 /* Set trigger to none. You need to enable the desired trigger with 566 * request_irq() or set_irq_type(). 567 */ 568 _set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 569 570 if (bank->regs->pinctrl) { 571 void __iomem *reg = bank->base + bank->regs->pinctrl; 572 573 /* Claim the pin for MPU */ 574 __raw_writel(__raw_readl(reg) | (1 << offset), reg); 575 } 576 577 if (bank->regs->ctrl && !bank->mod_usage) { 578 void __iomem *reg = bank->base + bank->regs->ctrl; 579 u32 ctrl; 580 581 ctrl = __raw_readl(reg); 582 /* Module is enabled, clocks are not gated */ 583 ctrl &= ~GPIO_MOD_CTRL_BIT; 584 __raw_writel(ctrl, reg); 585 bank->context.ctrl = ctrl; 586 } 587 588 bank->mod_usage |= 1 << offset; 589 590 spin_unlock_irqrestore(&bank->lock, flags); 591 592 return 0; 593 } 594 595 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 596 { 597 struct gpio_bank *bank = container_of(chip, struct gpio_bank, chip); 598 void __iomem *base = bank->base; 599 unsigned long flags; 600 601 spin_lock_irqsave(&bank->lock, flags); 602 603 if (bank->regs->wkup_en) { 604 /* Disable wake-up during idle for dynamic tick */ 605 _gpio_rmw(base, bank->regs->wkup_en, 1 << offset, 0); 606 bank->context.wake_en = 607 __raw_readl(bank->base + bank->regs->wkup_en); 608 } 609 610 bank->mod_usage &= ~(1 << offset); 611 612 if (bank->regs->ctrl && !bank->mod_usage) { 613 void __iomem *reg = bank->base + bank->regs->ctrl; 614 u32 ctrl; 615 616 ctrl = __raw_readl(reg); 617 /* Module is disabled, clocks are gated */ 618 ctrl |= GPIO_MOD_CTRL_BIT; 619 __raw_writel(ctrl, reg); 620 bank->context.ctrl = ctrl; 621 } 622 623 _reset_gpio(bank, bank->chip.base + offset); 624 spin_unlock_irqrestore(&bank->lock, flags); 625 626 /* 627 * If this is the last gpio to be freed in the bank, 628 * disable the bank module. 629 */ 630 if (!bank->mod_usage) 631 pm_runtime_put(bank->dev); 632 } 633 634 /* 635 * We need to unmask the GPIO bank interrupt as soon as possible to 636 * avoid missing GPIO interrupts for other lines in the bank. 637 * Then we need to mask-read-clear-unmask the triggered GPIO lines 638 * in the bank to avoid missing nested interrupts for a GPIO line. 639 * If we wait to unmask individual GPIO lines in the bank after the 640 * line's interrupt handler has been run, we may miss some nested 641 * interrupts. 642 */ 643 static void gpio_irq_handler(unsigned int irq, struct irq_desc *desc) 644 { 645 void __iomem *isr_reg = NULL; 646 u32 isr; 647 unsigned int gpio_irq, gpio_index; 648 struct gpio_bank *bank; 649 int unmasked = 0; 650 struct irq_chip *chip = irq_desc_get_chip(desc); 651 652 chained_irq_enter(chip, desc); 653 654 bank = irq_get_handler_data(irq); 655 isr_reg = bank->base + bank->regs->irqstatus; 656 pm_runtime_get_sync(bank->dev); 657 658 if (WARN_ON(!isr_reg)) 659 goto exit; 660 661 while(1) { 662 u32 isr_saved, level_mask = 0; 663 u32 enabled; 664 665 enabled = _get_gpio_irqbank_mask(bank); 666 isr_saved = isr = __raw_readl(isr_reg) & enabled; 667 668 if (bank->level_mask) 669 level_mask = bank->level_mask & enabled; 670 671 /* clear edge sensitive interrupts before handler(s) are 672 called so that we don't miss any interrupt occurred while 673 executing them */ 674 _disable_gpio_irqbank(bank, isr_saved & ~level_mask); 675 _clear_gpio_irqbank(bank, isr_saved & ~level_mask); 676 _enable_gpio_irqbank(bank, isr_saved & ~level_mask); 677 678 /* if there is only edge sensitive GPIO pin interrupts 679 configured, we could unmask GPIO bank interrupt immediately */ 680 if (!level_mask && !unmasked) { 681 unmasked = 1; 682 chained_irq_exit(chip, desc); 683 } 684 685 if (!isr) 686 break; 687 688 gpio_irq = bank->irq_base; 689 for (; isr != 0; isr >>= 1, gpio_irq++) { 690 int gpio = irq_to_gpio(bank, gpio_irq); 691 692 if (!(isr & 1)) 693 continue; 694 695 gpio_index = GPIO_INDEX(bank, gpio); 696 697 /* 698 * Some chips can't respond to both rising and falling 699 * at the same time. If this irq was requested with 700 * both flags, we need to flip the ICR data for the IRQ 701 * to respond to the IRQ for the opposite direction. 702 * This will be indicated in the bank toggle_mask. 703 */ 704 if (bank->toggle_mask & (1 << gpio_index)) 705 _toggle_gpio_edge_triggering(bank, gpio_index); 706 707 generic_handle_irq(gpio_irq); 708 } 709 } 710 /* if bank has any level sensitive GPIO pin interrupt 711 configured, we must unmask the bank interrupt only after 712 handler(s) are executed in order to avoid spurious bank 713 interrupt */ 714 exit: 715 if (!unmasked) 716 chained_irq_exit(chip, desc); 717 pm_runtime_put(bank->dev); 718 } 719 720 static void gpio_irq_shutdown(struct irq_data *d) 721 { 722 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 723 unsigned int gpio = irq_to_gpio(bank, d->irq); 724 unsigned long flags; 725 726 spin_lock_irqsave(&bank->lock, flags); 727 _reset_gpio(bank, gpio); 728 spin_unlock_irqrestore(&bank->lock, flags); 729 } 730 731 static void gpio_ack_irq(struct irq_data *d) 732 { 733 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 734 unsigned int gpio = irq_to_gpio(bank, d->irq); 735 736 _clear_gpio_irqstatus(bank, gpio); 737 } 738 739 static void gpio_mask_irq(struct irq_data *d) 740 { 741 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 742 unsigned int gpio = irq_to_gpio(bank, d->irq); 743 unsigned long flags; 744 745 spin_lock_irqsave(&bank->lock, flags); 746 _set_gpio_irqenable(bank, gpio, 0); 747 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), IRQ_TYPE_NONE); 748 spin_unlock_irqrestore(&bank->lock, flags); 749 } 750 751 static void gpio_unmask_irq(struct irq_data *d) 752 { 753 struct gpio_bank *bank = irq_data_get_irq_chip_data(d); 754 unsigned int gpio = irq_to_gpio(bank, d->irq); 755 unsigned int irq_mask = GPIO_BIT(bank, gpio); 756 u32 trigger = irqd_get_trigger_type(d); 757 unsigned long flags; 758 759 spin_lock_irqsave(&bank->lock, flags); 760 if (trigger) 761 _set_gpio_triggering(bank, GPIO_INDEX(bank, gpio), trigger); 762 763 /* For level-triggered GPIOs, the clearing must be done after 764 * the HW source is cleared, thus after the handler has run */ 765 if (bank->level_mask & irq_mask) { 766 _set_gpio_irqenable(bank, gpio, 0); 767 _clear_gpio_irqstatus(bank, gpio); 768 } 769 770 _set_gpio_irqenable(bank, gpio, 1); 771 spin_unlock_irqrestore(&bank->lock, flags); 772 } 773 774 static struct irq_chip gpio_irq_chip = { 775 .name = "GPIO", 776 .irq_shutdown = gpio_irq_shutdown, 777 .irq_ack = gpio_ack_irq, 778 .irq_mask = gpio_mask_irq, 779 .irq_unmask = gpio_unmask_irq, 780 .irq_set_type = gpio_irq_type, 781 .irq_set_wake = gpio_wake_enable, 782 }; 783 784 /*---------------------------------------------------------------------*/ 785 786 static int omap_mpuio_suspend_noirq(struct device *dev) 787 { 788 struct platform_device *pdev = to_platform_device(dev); 789 struct gpio_bank *bank = platform_get_drvdata(pdev); 790 void __iomem *mask_reg = bank->base + 791 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 792 unsigned long flags; 793 794 spin_lock_irqsave(&bank->lock, flags); 795 __raw_writel(0xffff & ~bank->context.wake_en, mask_reg); 796 spin_unlock_irqrestore(&bank->lock, flags); 797 798 return 0; 799 } 800 801 static int omap_mpuio_resume_noirq(struct device *dev) 802 { 803 struct platform_device *pdev = to_platform_device(dev); 804 struct gpio_bank *bank = platform_get_drvdata(pdev); 805 void __iomem *mask_reg = bank->base + 806 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 807 unsigned long flags; 808 809 spin_lock_irqsave(&bank->lock, flags); 810 __raw_writel(bank->context.wake_en, mask_reg); 811 spin_unlock_irqrestore(&bank->lock, flags); 812 813 return 0; 814 } 815 816 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 817 .suspend_noirq = omap_mpuio_suspend_noirq, 818 .resume_noirq = omap_mpuio_resume_noirq, 819 }; 820 821 /* use platform_driver for this. */ 822 static struct platform_driver omap_mpuio_driver = { 823 .driver = { 824 .name = "mpuio", 825 .pm = &omap_mpuio_dev_pm_ops, 826 }, 827 }; 828 829 static struct platform_device omap_mpuio_device = { 830 .name = "mpuio", 831 .id = -1, 832 .dev = { 833 .driver = &omap_mpuio_driver.driver, 834 } 835 /* could list the /proc/iomem resources */ 836 }; 837 838 static inline void mpuio_init(struct gpio_bank *bank) 839 { 840 platform_set_drvdata(&omap_mpuio_device, bank); 841 842 if (platform_driver_register(&omap_mpuio_driver) == 0) 843 (void) platform_device_register(&omap_mpuio_device); 844 } 845 846 /*---------------------------------------------------------------------*/ 847 848 static int gpio_input(struct gpio_chip *chip, unsigned offset) 849 { 850 struct gpio_bank *bank; 851 unsigned long flags; 852 853 bank = container_of(chip, struct gpio_bank, chip); 854 spin_lock_irqsave(&bank->lock, flags); 855 _set_gpio_direction(bank, offset, 1); 856 spin_unlock_irqrestore(&bank->lock, flags); 857 return 0; 858 } 859 860 static int gpio_is_input(struct gpio_bank *bank, int mask) 861 { 862 void __iomem *reg = bank->base + bank->regs->direction; 863 864 return __raw_readl(reg) & mask; 865 } 866 867 static int gpio_get(struct gpio_chip *chip, unsigned offset) 868 { 869 struct gpio_bank *bank; 870 u32 mask; 871 872 bank = container_of(chip, struct gpio_bank, chip); 873 mask = (1 << offset); 874 875 if (gpio_is_input(bank, mask)) 876 return _get_gpio_datain(bank, offset); 877 else 878 return _get_gpio_dataout(bank, offset); 879 } 880 881 static int gpio_output(struct gpio_chip *chip, unsigned offset, int value) 882 { 883 struct gpio_bank *bank; 884 unsigned long flags; 885 886 bank = container_of(chip, struct gpio_bank, chip); 887 spin_lock_irqsave(&bank->lock, flags); 888 bank->set_dataout(bank, offset, value); 889 _set_gpio_direction(bank, offset, 0); 890 spin_unlock_irqrestore(&bank->lock, flags); 891 return 0; 892 } 893 894 static int gpio_debounce(struct gpio_chip *chip, unsigned offset, 895 unsigned debounce) 896 { 897 struct gpio_bank *bank; 898 unsigned long flags; 899 900 bank = container_of(chip, struct gpio_bank, chip); 901 902 spin_lock_irqsave(&bank->lock, flags); 903 _set_gpio_debounce(bank, offset, debounce); 904 spin_unlock_irqrestore(&bank->lock, flags); 905 906 return 0; 907 } 908 909 static void gpio_set(struct gpio_chip *chip, unsigned offset, int value) 910 { 911 struct gpio_bank *bank; 912 unsigned long flags; 913 914 bank = container_of(chip, struct gpio_bank, chip); 915 spin_lock_irqsave(&bank->lock, flags); 916 bank->set_dataout(bank, offset, value); 917 spin_unlock_irqrestore(&bank->lock, flags); 918 } 919 920 static int gpio_2irq(struct gpio_chip *chip, unsigned offset) 921 { 922 struct gpio_bank *bank; 923 924 bank = container_of(chip, struct gpio_bank, chip); 925 return bank->irq_base + offset; 926 } 927 928 /*---------------------------------------------------------------------*/ 929 930 static void __init omap_gpio_show_rev(struct gpio_bank *bank) 931 { 932 static bool called; 933 u32 rev; 934 935 if (called || bank->regs->revision == USHRT_MAX) 936 return; 937 938 rev = __raw_readw(bank->base + bank->regs->revision); 939 pr_info("OMAP GPIO hardware version %d.%d\n", 940 (rev >> 4) & 0x0f, rev & 0x0f); 941 942 called = true; 943 } 944 945 /* This lock class tells lockdep that GPIO irqs are in a different 946 * category than their parents, so it won't report false recursion. 947 */ 948 static struct lock_class_key gpio_lock_class; 949 950 static void omap_gpio_mod_init(struct gpio_bank *bank) 951 { 952 void __iomem *base = bank->base; 953 u32 l = 0xffffffff; 954 955 if (bank->width == 16) 956 l = 0xffff; 957 958 if (bank->is_mpuio) { 959 __raw_writel(l, bank->base + bank->regs->irqenable); 960 return; 961 } 962 963 _gpio_rmw(base, bank->regs->irqenable, l, bank->regs->irqenable_inv); 964 _gpio_rmw(base, bank->regs->irqstatus, l, !bank->regs->irqenable_inv); 965 if (bank->regs->debounce_en) 966 __raw_writel(0, base + bank->regs->debounce_en); 967 968 /* Save OE default value (0xffffffff) in the context */ 969 bank->context.oe = __raw_readl(bank->base + bank->regs->direction); 970 /* Initialize interface clk ungated, module enabled */ 971 if (bank->regs->ctrl) 972 __raw_writel(0, base + bank->regs->ctrl); 973 974 bank->dbck = clk_get(bank->dev, "dbclk"); 975 if (IS_ERR(bank->dbck)) 976 dev_err(bank->dev, "Could not get gpio dbck\n"); 977 } 978 979 static __devinit void 980 omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start, 981 unsigned int num) 982 { 983 struct irq_chip_generic *gc; 984 struct irq_chip_type *ct; 985 986 gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base, 987 handle_simple_irq); 988 if (!gc) { 989 dev_err(bank->dev, "Memory alloc failed for gc\n"); 990 return; 991 } 992 993 ct = gc->chip_types; 994 995 /* NOTE: No ack required, reading IRQ status clears it. */ 996 ct->chip.irq_mask = irq_gc_mask_set_bit; 997 ct->chip.irq_unmask = irq_gc_mask_clr_bit; 998 ct->chip.irq_set_type = gpio_irq_type; 999 1000 if (bank->regs->wkup_en) 1001 ct->chip.irq_set_wake = gpio_wake_enable, 1002 1003 ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride; 1004 irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE, 1005 IRQ_NOREQUEST | IRQ_NOPROBE, 0); 1006 } 1007 1008 static void __devinit omap_gpio_chip_init(struct gpio_bank *bank) 1009 { 1010 int j; 1011 static int gpio; 1012 1013 /* 1014 * REVISIT eventually switch from OMAP-specific gpio structs 1015 * over to the generic ones 1016 */ 1017 bank->chip.request = omap_gpio_request; 1018 bank->chip.free = omap_gpio_free; 1019 bank->chip.direction_input = gpio_input; 1020 bank->chip.get = gpio_get; 1021 bank->chip.direction_output = gpio_output; 1022 bank->chip.set_debounce = gpio_debounce; 1023 bank->chip.set = gpio_set; 1024 bank->chip.to_irq = gpio_2irq; 1025 if (bank->is_mpuio) { 1026 bank->chip.label = "mpuio"; 1027 if (bank->regs->wkup_en) 1028 bank->chip.dev = &omap_mpuio_device.dev; 1029 bank->chip.base = OMAP_MPUIO(0); 1030 } else { 1031 bank->chip.label = "gpio"; 1032 bank->chip.base = gpio; 1033 gpio += bank->width; 1034 } 1035 bank->chip.ngpio = bank->width; 1036 1037 gpiochip_add(&bank->chip); 1038 1039 for (j = bank->irq_base; j < bank->irq_base + bank->width; j++) { 1040 irq_set_lockdep_class(j, &gpio_lock_class); 1041 irq_set_chip_data(j, bank); 1042 if (bank->is_mpuio) { 1043 omap_mpuio_alloc_gc(bank, j, bank->width); 1044 } else { 1045 irq_set_chip(j, &gpio_irq_chip); 1046 irq_set_handler(j, handle_simple_irq); 1047 set_irq_flags(j, IRQF_VALID); 1048 } 1049 } 1050 irq_set_chained_handler(bank->irq, gpio_irq_handler); 1051 irq_set_handler_data(bank->irq, bank); 1052 } 1053 1054 static const struct of_device_id omap_gpio_match[]; 1055 1056 static int __devinit omap_gpio_probe(struct platform_device *pdev) 1057 { 1058 struct device *dev = &pdev->dev; 1059 struct device_node *node = dev->of_node; 1060 const struct of_device_id *match; 1061 struct omap_gpio_platform_data *pdata; 1062 struct resource *res; 1063 struct gpio_bank *bank; 1064 int ret = 0; 1065 1066 match = of_match_device(of_match_ptr(omap_gpio_match), dev); 1067 1068 pdata = match ? match->data : dev->platform_data; 1069 if (!pdata) 1070 return -EINVAL; 1071 1072 bank = devm_kzalloc(&pdev->dev, sizeof(struct gpio_bank), GFP_KERNEL); 1073 if (!bank) { 1074 dev_err(dev, "Memory alloc failed\n"); 1075 return -ENOMEM; 1076 } 1077 1078 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 1079 if (unlikely(!res)) { 1080 dev_err(dev, "Invalid IRQ resource\n"); 1081 return -ENODEV; 1082 } 1083 1084 bank->irq = res->start; 1085 bank->dev = dev; 1086 bank->dbck_flag = pdata->dbck_flag; 1087 bank->stride = pdata->bank_stride; 1088 bank->width = pdata->bank_width; 1089 bank->is_mpuio = pdata->is_mpuio; 1090 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1091 bank->loses_context = pdata->loses_context; 1092 bank->regs = pdata->regs; 1093 #ifdef CONFIG_OF_GPIO 1094 bank->chip.of_node = of_node_get(node); 1095 #endif 1096 1097 bank->irq_base = irq_alloc_descs(-1, 0, bank->width, 0); 1098 if (bank->irq_base < 0) { 1099 dev_err(dev, "Couldn't allocate IRQ numbers\n"); 1100 return -ENODEV; 1101 } 1102 1103 bank->domain = irq_domain_add_legacy(node, bank->width, bank->irq_base, 1104 0, &irq_domain_simple_ops, NULL); 1105 1106 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1107 bank->set_dataout = _set_gpio_dataout_reg; 1108 else 1109 bank->set_dataout = _set_gpio_dataout_mask; 1110 1111 spin_lock_init(&bank->lock); 1112 1113 /* Static mapping, never released */ 1114 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1115 if (unlikely(!res)) { 1116 dev_err(dev, "Invalid mem resource\n"); 1117 return -ENODEV; 1118 } 1119 1120 if (!devm_request_mem_region(dev, res->start, resource_size(res), 1121 pdev->name)) { 1122 dev_err(dev, "Region already claimed\n"); 1123 return -EBUSY; 1124 } 1125 1126 bank->base = devm_ioremap(dev, res->start, resource_size(res)); 1127 if (!bank->base) { 1128 dev_err(dev, "Could not ioremap\n"); 1129 return -ENOMEM; 1130 } 1131 1132 platform_set_drvdata(pdev, bank); 1133 1134 pm_runtime_enable(bank->dev); 1135 pm_runtime_irq_safe(bank->dev); 1136 pm_runtime_get_sync(bank->dev); 1137 1138 if (bank->is_mpuio) 1139 mpuio_init(bank); 1140 1141 omap_gpio_mod_init(bank); 1142 omap_gpio_chip_init(bank); 1143 omap_gpio_show_rev(bank); 1144 1145 if (bank->loses_context) 1146 bank->get_context_loss_count = pdata->get_context_loss_count; 1147 1148 pm_runtime_put(bank->dev); 1149 1150 list_add_tail(&bank->node, &omap_gpio_list); 1151 1152 return ret; 1153 } 1154 1155 #ifdef CONFIG_ARCH_OMAP2PLUS 1156 1157 #if defined(CONFIG_PM_RUNTIME) 1158 static void omap_gpio_restore_context(struct gpio_bank *bank); 1159 1160 static int omap_gpio_runtime_suspend(struct device *dev) 1161 { 1162 struct platform_device *pdev = to_platform_device(dev); 1163 struct gpio_bank *bank = platform_get_drvdata(pdev); 1164 u32 l1 = 0, l2 = 0; 1165 unsigned long flags; 1166 u32 wake_low, wake_hi; 1167 1168 spin_lock_irqsave(&bank->lock, flags); 1169 1170 /* 1171 * Only edges can generate a wakeup event to the PRCM. 1172 * 1173 * Therefore, ensure any wake-up capable GPIOs have 1174 * edge-detection enabled before going idle to ensure a wakeup 1175 * to the PRCM is generated on a GPIO transition. (c.f. 34xx 1176 * NDA TRM 25.5.3.1) 1177 * 1178 * The normal values will be restored upon ->runtime_resume() 1179 * by writing back the values saved in bank->context. 1180 */ 1181 wake_low = bank->context.leveldetect0 & bank->context.wake_en; 1182 if (wake_low) 1183 __raw_writel(wake_low | bank->context.fallingdetect, 1184 bank->base + bank->regs->fallingdetect); 1185 wake_hi = bank->context.leveldetect1 & bank->context.wake_en; 1186 if (wake_hi) 1187 __raw_writel(wake_hi | bank->context.risingdetect, 1188 bank->base + bank->regs->risingdetect); 1189 1190 if (!bank->enabled_non_wakeup_gpios) 1191 goto update_gpio_context_count; 1192 1193 if (bank->power_mode != OFF_MODE) { 1194 bank->power_mode = 0; 1195 goto update_gpio_context_count; 1196 } 1197 /* 1198 * If going to OFF, remove triggering for all 1199 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1200 * generated. See OMAP2420 Errata item 1.101. 1201 */ 1202 bank->saved_datain = __raw_readl(bank->base + 1203 bank->regs->datain); 1204 l1 = bank->context.fallingdetect; 1205 l2 = bank->context.risingdetect; 1206 1207 l1 &= ~bank->enabled_non_wakeup_gpios; 1208 l2 &= ~bank->enabled_non_wakeup_gpios; 1209 1210 __raw_writel(l1, bank->base + bank->regs->fallingdetect); 1211 __raw_writel(l2, bank->base + bank->regs->risingdetect); 1212 1213 bank->workaround_enabled = true; 1214 1215 update_gpio_context_count: 1216 if (bank->get_context_loss_count) 1217 bank->context_loss_count = 1218 bank->get_context_loss_count(bank->dev); 1219 1220 _gpio_dbck_disable(bank); 1221 spin_unlock_irqrestore(&bank->lock, flags); 1222 1223 return 0; 1224 } 1225 1226 static int omap_gpio_runtime_resume(struct device *dev) 1227 { 1228 struct platform_device *pdev = to_platform_device(dev); 1229 struct gpio_bank *bank = platform_get_drvdata(pdev); 1230 int context_lost_cnt_after; 1231 u32 l = 0, gen, gen0, gen1; 1232 unsigned long flags; 1233 1234 spin_lock_irqsave(&bank->lock, flags); 1235 _gpio_dbck_enable(bank); 1236 1237 /* 1238 * In ->runtime_suspend(), level-triggered, wakeup-enabled 1239 * GPIOs were set to edge trigger also in order to be able to 1240 * generate a PRCM wakeup. Here we restore the 1241 * pre-runtime_suspend() values for edge triggering. 1242 */ 1243 __raw_writel(bank->context.fallingdetect, 1244 bank->base + bank->regs->fallingdetect); 1245 __raw_writel(bank->context.risingdetect, 1246 bank->base + bank->regs->risingdetect); 1247 1248 if (bank->get_context_loss_count) { 1249 context_lost_cnt_after = 1250 bank->get_context_loss_count(bank->dev); 1251 if (context_lost_cnt_after != bank->context_loss_count) { 1252 omap_gpio_restore_context(bank); 1253 } else { 1254 spin_unlock_irqrestore(&bank->lock, flags); 1255 return 0; 1256 } 1257 } 1258 1259 if (!bank->workaround_enabled) { 1260 spin_unlock_irqrestore(&bank->lock, flags); 1261 return 0; 1262 } 1263 1264 __raw_writel(bank->context.fallingdetect, 1265 bank->base + bank->regs->fallingdetect); 1266 __raw_writel(bank->context.risingdetect, 1267 bank->base + bank->regs->risingdetect); 1268 l = __raw_readl(bank->base + bank->regs->datain); 1269 1270 /* 1271 * Check if any of the non-wakeup interrupt GPIOs have changed 1272 * state. If so, generate an IRQ by software. This is 1273 * horribly racy, but it's the best we can do to work around 1274 * this silicon bug. 1275 */ 1276 l ^= bank->saved_datain; 1277 l &= bank->enabled_non_wakeup_gpios; 1278 1279 /* 1280 * No need to generate IRQs for the rising edge for gpio IRQs 1281 * configured with falling edge only; and vice versa. 1282 */ 1283 gen0 = l & bank->context.fallingdetect; 1284 gen0 &= bank->saved_datain; 1285 1286 gen1 = l & bank->context.risingdetect; 1287 gen1 &= ~(bank->saved_datain); 1288 1289 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1290 gen = l & (~(bank->context.fallingdetect) & 1291 ~(bank->context.risingdetect)); 1292 /* Consider all GPIO IRQs needed to be updated */ 1293 gen |= gen0 | gen1; 1294 1295 if (gen) { 1296 u32 old0, old1; 1297 1298 old0 = __raw_readl(bank->base + bank->regs->leveldetect0); 1299 old1 = __raw_readl(bank->base + bank->regs->leveldetect1); 1300 1301 if (!bank->regs->irqstatus_raw0) { 1302 __raw_writel(old0 | gen, bank->base + 1303 bank->regs->leveldetect0); 1304 __raw_writel(old1 | gen, bank->base + 1305 bank->regs->leveldetect1); 1306 } 1307 1308 if (bank->regs->irqstatus_raw0) { 1309 __raw_writel(old0 | l, bank->base + 1310 bank->regs->leveldetect0); 1311 __raw_writel(old1 | l, bank->base + 1312 bank->regs->leveldetect1); 1313 } 1314 __raw_writel(old0, bank->base + bank->regs->leveldetect0); 1315 __raw_writel(old1, bank->base + bank->regs->leveldetect1); 1316 } 1317 1318 bank->workaround_enabled = false; 1319 spin_unlock_irqrestore(&bank->lock, flags); 1320 1321 return 0; 1322 } 1323 #endif /* CONFIG_PM_RUNTIME */ 1324 1325 void omap2_gpio_prepare_for_idle(int pwr_mode) 1326 { 1327 struct gpio_bank *bank; 1328 1329 list_for_each_entry(bank, &omap_gpio_list, node) { 1330 if (!bank->mod_usage || !bank->loses_context) 1331 continue; 1332 1333 bank->power_mode = pwr_mode; 1334 1335 pm_runtime_put_sync_suspend(bank->dev); 1336 } 1337 } 1338 1339 void omap2_gpio_resume_after_idle(void) 1340 { 1341 struct gpio_bank *bank; 1342 1343 list_for_each_entry(bank, &omap_gpio_list, node) { 1344 if (!bank->mod_usage || !bank->loses_context) 1345 continue; 1346 1347 pm_runtime_get_sync(bank->dev); 1348 } 1349 } 1350 1351 #if defined(CONFIG_PM_RUNTIME) 1352 static void omap_gpio_restore_context(struct gpio_bank *bank) 1353 { 1354 __raw_writel(bank->context.wake_en, 1355 bank->base + bank->regs->wkup_en); 1356 __raw_writel(bank->context.ctrl, bank->base + bank->regs->ctrl); 1357 __raw_writel(bank->context.leveldetect0, 1358 bank->base + bank->regs->leveldetect0); 1359 __raw_writel(bank->context.leveldetect1, 1360 bank->base + bank->regs->leveldetect1); 1361 __raw_writel(bank->context.risingdetect, 1362 bank->base + bank->regs->risingdetect); 1363 __raw_writel(bank->context.fallingdetect, 1364 bank->base + bank->regs->fallingdetect); 1365 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1366 __raw_writel(bank->context.dataout, 1367 bank->base + bank->regs->set_dataout); 1368 else 1369 __raw_writel(bank->context.dataout, 1370 bank->base + bank->regs->dataout); 1371 __raw_writel(bank->context.oe, bank->base + bank->regs->direction); 1372 1373 if (bank->dbck_enable_mask) { 1374 __raw_writel(bank->context.debounce, bank->base + 1375 bank->regs->debounce); 1376 __raw_writel(bank->context.debounce_en, 1377 bank->base + bank->regs->debounce_en); 1378 } 1379 1380 __raw_writel(bank->context.irqenable1, 1381 bank->base + bank->regs->irqenable); 1382 __raw_writel(bank->context.irqenable2, 1383 bank->base + bank->regs->irqenable2); 1384 } 1385 #endif /* CONFIG_PM_RUNTIME */ 1386 #else 1387 #define omap_gpio_runtime_suspend NULL 1388 #define omap_gpio_runtime_resume NULL 1389 #endif 1390 1391 static const struct dev_pm_ops gpio_pm_ops = { 1392 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1393 NULL) 1394 }; 1395 1396 #if defined(CONFIG_OF) 1397 static struct omap_gpio_reg_offs omap2_gpio_regs = { 1398 .revision = OMAP24XX_GPIO_REVISION, 1399 .direction = OMAP24XX_GPIO_OE, 1400 .datain = OMAP24XX_GPIO_DATAIN, 1401 .dataout = OMAP24XX_GPIO_DATAOUT, 1402 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1403 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1404 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1405 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1406 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1407 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1408 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1409 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1410 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1411 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1412 .ctrl = OMAP24XX_GPIO_CTRL, 1413 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1414 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1415 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1416 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1417 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1418 }; 1419 1420 static struct omap_gpio_reg_offs omap4_gpio_regs = { 1421 .revision = OMAP4_GPIO_REVISION, 1422 .direction = OMAP4_GPIO_OE, 1423 .datain = OMAP4_GPIO_DATAIN, 1424 .dataout = OMAP4_GPIO_DATAOUT, 1425 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1426 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1427 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1428 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1429 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1430 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1431 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1432 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1433 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1434 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1435 .ctrl = OMAP4_GPIO_CTRL, 1436 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1437 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1438 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1439 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1440 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1441 }; 1442 1443 static struct omap_gpio_platform_data omap2_pdata = { 1444 .regs = &omap2_gpio_regs, 1445 .bank_width = 32, 1446 .dbck_flag = false, 1447 }; 1448 1449 static struct omap_gpio_platform_data omap3_pdata = { 1450 .regs = &omap2_gpio_regs, 1451 .bank_width = 32, 1452 .dbck_flag = true, 1453 }; 1454 1455 static struct omap_gpio_platform_data omap4_pdata = { 1456 .regs = &omap4_gpio_regs, 1457 .bank_width = 32, 1458 .dbck_flag = true, 1459 }; 1460 1461 static const struct of_device_id omap_gpio_match[] = { 1462 { 1463 .compatible = "ti,omap4-gpio", 1464 .data = &omap4_pdata, 1465 }, 1466 { 1467 .compatible = "ti,omap3-gpio", 1468 .data = &omap3_pdata, 1469 }, 1470 { 1471 .compatible = "ti,omap2-gpio", 1472 .data = &omap2_pdata, 1473 }, 1474 { }, 1475 }; 1476 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1477 #endif 1478 1479 static struct platform_driver omap_gpio_driver = { 1480 .probe = omap_gpio_probe, 1481 .driver = { 1482 .name = "omap_gpio", 1483 .pm = &gpio_pm_ops, 1484 .of_match_table = of_match_ptr(omap_gpio_match), 1485 }, 1486 }; 1487 1488 /* 1489 * gpio driver register needs to be done before 1490 * machine_init functions access gpio APIs. 1491 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1492 */ 1493 static int __init omap_gpio_drv_reg(void) 1494 { 1495 return platform_driver_register(&omap_gpio_driver); 1496 } 1497 postcore_initcall(omap_gpio_drv_reg); 1498