1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support functions for OMAP GPIO 4 * 5 * Copyright (C) 2003-2005 Nokia Corporation 6 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 7 * 8 * Copyright (C) 2009 Texas Instruments 9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 10 */ 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/err.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 #include <linux/cpu_pm.h> 21 #include <linux/device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm.h> 24 #include <linux/of.h> 25 #include <linux/gpio/driver.h> 26 #include <linux/bitops.h> 27 #include <linux/platform_data/gpio-omap.h> 28 29 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 30 31 struct gpio_regs { 32 u32 sysconfig; 33 u32 irqenable1; 34 u32 irqenable2; 35 u32 wake_en; 36 u32 ctrl; 37 u32 oe; 38 u32 leveldetect0; 39 u32 leveldetect1; 40 u32 risingdetect; 41 u32 fallingdetect; 42 u32 dataout; 43 u32 debounce; 44 u32 debounce_en; 45 }; 46 47 struct gpio_bank { 48 void __iomem *base; 49 const struct omap_gpio_reg_offs *regs; 50 struct device *dev; 51 52 int irq; 53 u32 non_wakeup_gpios; 54 u32 enabled_non_wakeup_gpios; 55 struct gpio_regs context; 56 u32 saved_datain; 57 u32 level_mask; 58 u32 toggle_mask; 59 raw_spinlock_t lock; 60 raw_spinlock_t wa_lock; 61 struct gpio_chip chip; 62 struct clk *dbck; 63 struct notifier_block nb; 64 unsigned int is_suspended:1; 65 unsigned int needs_resume:1; 66 u32 mod_usage; 67 u32 irq_usage; 68 u32 dbck_enable_mask; 69 bool dbck_enabled; 70 bool is_mpuio; 71 bool dbck_flag; 72 bool loses_context; 73 bool context_valid; 74 int stride; 75 u32 width; 76 int context_loss_count; 77 78 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 79 int (*get_context_loss_count)(struct device *dev); 80 }; 81 82 #define GPIO_MOD_CTRL_BIT BIT(0) 83 84 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 85 #define LINE_USED(line, offset) (line & (BIT(offset))) 86 87 static void omap_gpio_unmask_irq(struct irq_data *d); 88 89 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 90 { 91 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 92 return gpiochip_get_data(chip); 93 } 94 95 static inline u32 omap_gpio_rmw(void __iomem *reg, u32 mask, bool set) 96 { 97 u32 val = readl_relaxed(reg); 98 99 if (set) 100 val |= mask; 101 else 102 val &= ~mask; 103 104 writel_relaxed(val, reg); 105 106 return val; 107 } 108 109 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 110 int is_input) 111 { 112 bank->context.oe = omap_gpio_rmw(bank->base + bank->regs->direction, 113 BIT(gpio), is_input); 114 } 115 116 117 /* set data out value using dedicate set/clear register */ 118 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 119 int enable) 120 { 121 void __iomem *reg = bank->base; 122 u32 l = BIT(offset); 123 124 if (enable) { 125 reg += bank->regs->set_dataout; 126 bank->context.dataout |= l; 127 } else { 128 reg += bank->regs->clr_dataout; 129 bank->context.dataout &= ~l; 130 } 131 132 writel_relaxed(l, reg); 133 } 134 135 /* set data out value using mask register */ 136 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 137 int enable) 138 { 139 bank->context.dataout = omap_gpio_rmw(bank->base + bank->regs->dataout, 140 BIT(offset), enable); 141 } 142 143 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 144 { 145 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 146 clk_enable(bank->dbck); 147 bank->dbck_enabled = true; 148 149 writel_relaxed(bank->dbck_enable_mask, 150 bank->base + bank->regs->debounce_en); 151 } 152 } 153 154 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 155 { 156 if (bank->dbck_enable_mask && bank->dbck_enabled) { 157 /* 158 * Disable debounce before cutting it's clock. If debounce is 159 * enabled but the clock is not, GPIO module seems to be unable 160 * to detect events and generate interrupts at least on OMAP3. 161 */ 162 writel_relaxed(0, bank->base + bank->regs->debounce_en); 163 164 clk_disable(bank->dbck); 165 bank->dbck_enabled = false; 166 } 167 } 168 169 /** 170 * omap2_set_gpio_debounce - low level gpio debounce time 171 * @bank: the gpio bank we're acting upon 172 * @offset: the gpio number on this @bank 173 * @debounce: debounce time to use 174 * 175 * OMAP's debounce time is in 31us steps 176 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 177 * so we need to convert and round up to the closest unit. 178 * 179 * Return: 0 on success, negative error otherwise. 180 */ 181 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 182 unsigned debounce) 183 { 184 u32 val; 185 u32 l; 186 bool enable = !!debounce; 187 188 if (!bank->dbck_flag) 189 return -ENOTSUPP; 190 191 if (enable) { 192 debounce = DIV_ROUND_UP(debounce, 31) - 1; 193 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) 194 return -EINVAL; 195 } 196 197 l = BIT(offset); 198 199 clk_enable(bank->dbck); 200 writel_relaxed(debounce, bank->base + bank->regs->debounce); 201 202 val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable); 203 bank->dbck_enable_mask = val; 204 205 clk_disable(bank->dbck); 206 /* 207 * Enable debounce clock per module. 208 * This call is mandatory because in omap_gpio_request() when 209 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 210 * runtime callbck fails to turn on dbck because dbck_enable_mask 211 * used within _gpio_dbck_enable() is still not initialized at 212 * that point. Therefore we have to enable dbck here. 213 */ 214 omap_gpio_dbck_enable(bank); 215 if (bank->dbck_enable_mask) { 216 bank->context.debounce = debounce; 217 bank->context.debounce_en = val; 218 } 219 220 return 0; 221 } 222 223 /** 224 * omap_clear_gpio_debounce - clear debounce settings for a gpio 225 * @bank: the gpio bank we're acting upon 226 * @offset: the gpio number on this @bank 227 * 228 * If a gpio is using debounce, then clear the debounce enable bit and if 229 * this is the only gpio in this bank using debounce, then clear the debounce 230 * time too. The debounce clock will also be disabled when calling this function 231 * if this is the only gpio in the bank using debounce. 232 */ 233 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 234 { 235 u32 gpio_bit = BIT(offset); 236 237 if (!bank->dbck_flag) 238 return; 239 240 if (!(bank->dbck_enable_mask & gpio_bit)) 241 return; 242 243 bank->dbck_enable_mask &= ~gpio_bit; 244 bank->context.debounce_en &= ~gpio_bit; 245 writel_relaxed(bank->context.debounce_en, 246 bank->base + bank->regs->debounce_en); 247 248 if (!bank->dbck_enable_mask) { 249 bank->context.debounce = 0; 250 writel_relaxed(bank->context.debounce, bank->base + 251 bank->regs->debounce); 252 clk_disable(bank->dbck); 253 bank->dbck_enabled = false; 254 } 255 } 256 257 /* 258 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. 259 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs 260 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none 261 * are capable waking up the system from off mode. 262 */ 263 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) 264 { 265 u32 no_wake = bank->non_wakeup_gpios; 266 267 if (no_wake) 268 return !!(~no_wake & gpio_mask); 269 270 return false; 271 } 272 273 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 274 unsigned trigger) 275 { 276 void __iomem *base = bank->base; 277 u32 gpio_bit = BIT(gpio); 278 279 omap_gpio_rmw(base + bank->regs->leveldetect0, gpio_bit, 280 trigger & IRQ_TYPE_LEVEL_LOW); 281 omap_gpio_rmw(base + bank->regs->leveldetect1, gpio_bit, 282 trigger & IRQ_TYPE_LEVEL_HIGH); 283 284 /* 285 * We need the edge detection enabled for to allow the GPIO block 286 * to be woken from idle state. Set the appropriate edge detection 287 * in addition to the level detection. 288 */ 289 omap_gpio_rmw(base + bank->regs->risingdetect, gpio_bit, 290 trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)); 291 omap_gpio_rmw(base + bank->regs->fallingdetect, gpio_bit, 292 trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)); 293 294 bank->context.leveldetect0 = 295 readl_relaxed(bank->base + bank->regs->leveldetect0); 296 bank->context.leveldetect1 = 297 readl_relaxed(bank->base + bank->regs->leveldetect1); 298 bank->context.risingdetect = 299 readl_relaxed(bank->base + bank->regs->risingdetect); 300 bank->context.fallingdetect = 301 readl_relaxed(bank->base + bank->regs->fallingdetect); 302 303 bank->level_mask = bank->context.leveldetect0 | 304 bank->context.leveldetect1; 305 306 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 307 if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { 308 /* 309 * Log the edge gpio and manually trigger the IRQ 310 * after resume if the input level changes 311 * to avoid irq lost during PER RET/OFF mode 312 * Applies for omap2 non-wakeup gpio and all omap3 gpios 313 */ 314 if (trigger & IRQ_TYPE_EDGE_BOTH) 315 bank->enabled_non_wakeup_gpios |= gpio_bit; 316 else 317 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 318 } 319 } 320 321 /* 322 * This only applies to chips that can't do both rising and falling edge 323 * detection at once. For all other chips, this function is a noop. 324 */ 325 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 326 { 327 if (IS_ENABLED(CONFIG_ARCH_OMAP1) && bank->regs->irqctrl) { 328 void __iomem *reg = bank->base + bank->regs->irqctrl; 329 330 writel_relaxed(readl_relaxed(reg) ^ BIT(gpio), reg); 331 } 332 } 333 334 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 335 unsigned trigger) 336 { 337 void __iomem *reg = bank->base; 338 u32 l = 0; 339 340 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 341 omap_set_gpio_trigger(bank, gpio, trigger); 342 } else if (bank->regs->irqctrl) { 343 reg += bank->regs->irqctrl; 344 345 l = readl_relaxed(reg); 346 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 347 bank->toggle_mask |= BIT(gpio); 348 if (trigger & IRQ_TYPE_EDGE_RISING) 349 l |= BIT(gpio); 350 else if (trigger & IRQ_TYPE_EDGE_FALLING) 351 l &= ~(BIT(gpio)); 352 else 353 return -EINVAL; 354 355 writel_relaxed(l, reg); 356 } else if (bank->regs->edgectrl1) { 357 if (gpio & 0x08) 358 reg += bank->regs->edgectrl2; 359 else 360 reg += bank->regs->edgectrl1; 361 362 gpio &= 0x07; 363 l = readl_relaxed(reg); 364 l &= ~(3 << (gpio << 1)); 365 if (trigger & IRQ_TYPE_EDGE_RISING) 366 l |= 2 << (gpio << 1); 367 if (trigger & IRQ_TYPE_EDGE_FALLING) 368 l |= BIT(gpio << 1); 369 writel_relaxed(l, reg); 370 } 371 return 0; 372 } 373 374 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 375 { 376 if (bank->regs->pinctrl) { 377 void __iomem *reg = bank->base + bank->regs->pinctrl; 378 379 /* Claim the pin for MPU */ 380 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 381 } 382 383 if (bank->regs->ctrl && !BANK_USED(bank)) { 384 void __iomem *reg = bank->base + bank->regs->ctrl; 385 u32 ctrl; 386 387 ctrl = readl_relaxed(reg); 388 /* Module is enabled, clocks are not gated */ 389 ctrl &= ~GPIO_MOD_CTRL_BIT; 390 writel_relaxed(ctrl, reg); 391 bank->context.ctrl = ctrl; 392 } 393 } 394 395 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 396 { 397 if (bank->regs->ctrl && !BANK_USED(bank)) { 398 void __iomem *reg = bank->base + bank->regs->ctrl; 399 u32 ctrl; 400 401 ctrl = readl_relaxed(reg); 402 /* Module is disabled, clocks are gated */ 403 ctrl |= GPIO_MOD_CTRL_BIT; 404 writel_relaxed(ctrl, reg); 405 bank->context.ctrl = ctrl; 406 } 407 } 408 409 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 410 { 411 void __iomem *reg = bank->base + bank->regs->direction; 412 413 return readl_relaxed(reg) & BIT(offset); 414 } 415 416 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 417 { 418 if (!LINE_USED(bank->mod_usage, offset)) { 419 omap_enable_gpio_module(bank, offset); 420 omap_set_gpio_direction(bank, offset, 1); 421 } 422 bank->irq_usage |= BIT(offset); 423 } 424 425 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 426 { 427 struct gpio_bank *bank = omap_irq_data_get_bank(d); 428 int retval; 429 unsigned long flags; 430 unsigned offset = d->hwirq; 431 432 if (type & ~IRQ_TYPE_SENSE_MASK) 433 return -EINVAL; 434 435 if (!bank->regs->leveldetect0 && 436 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 437 return -EINVAL; 438 439 raw_spin_lock_irqsave(&bank->lock, flags); 440 retval = omap_set_gpio_triggering(bank, offset, type); 441 if (retval) { 442 raw_spin_unlock_irqrestore(&bank->lock, flags); 443 goto error; 444 } 445 omap_gpio_init_irq(bank, offset); 446 if (!omap_gpio_is_input(bank, offset)) { 447 raw_spin_unlock_irqrestore(&bank->lock, flags); 448 retval = -EINVAL; 449 goto error; 450 } 451 raw_spin_unlock_irqrestore(&bank->lock, flags); 452 453 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 454 irq_set_handler_locked(d, handle_level_irq); 455 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 456 /* 457 * Edge IRQs are already cleared/acked in irq_handler and 458 * not need to be masked, as result handle_edge_irq() 459 * logic is excessed here and may cause lose of interrupts. 460 * So just use handle_simple_irq. 461 */ 462 irq_set_handler_locked(d, handle_simple_irq); 463 464 return 0; 465 466 error: 467 return retval; 468 } 469 470 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 471 { 472 void __iomem *reg = bank->base; 473 474 reg += bank->regs->irqstatus; 475 writel_relaxed(gpio_mask, reg); 476 477 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 478 if (bank->regs->irqstatus2) { 479 reg = bank->base + bank->regs->irqstatus2; 480 writel_relaxed(gpio_mask, reg); 481 } 482 483 /* Flush posted write for the irq status to avoid spurious interrupts */ 484 readl_relaxed(reg); 485 } 486 487 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 488 unsigned offset) 489 { 490 omap_clear_gpio_irqbank(bank, BIT(offset)); 491 } 492 493 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 494 { 495 void __iomem *reg = bank->base; 496 u32 l; 497 u32 mask = (BIT(bank->width)) - 1; 498 499 reg += bank->regs->irqenable; 500 l = readl_relaxed(reg); 501 if (bank->regs->irqenable_inv) 502 l = ~l; 503 l &= mask; 504 return l; 505 } 506 507 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 508 unsigned offset, int enable) 509 { 510 void __iomem *reg = bank->base; 511 u32 gpio_mask = BIT(offset); 512 513 if (bank->regs->set_irqenable && bank->regs->clr_irqenable) { 514 if (enable) { 515 reg += bank->regs->set_irqenable; 516 bank->context.irqenable1 |= gpio_mask; 517 } else { 518 reg += bank->regs->clr_irqenable; 519 bank->context.irqenable1 &= ~gpio_mask; 520 } 521 writel_relaxed(gpio_mask, reg); 522 } else { 523 bank->context.irqenable1 = 524 omap_gpio_rmw(reg + bank->regs->irqenable, gpio_mask, 525 enable ^ bank->regs->irqenable_inv); 526 } 527 528 /* 529 * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM 530 * note requiring correlation between the IRQ enable registers and 531 * the wakeup registers. In any case, we want wakeup from idle 532 * enabled for the GPIOs which support this feature. 533 */ 534 if (bank->regs->wkup_en && 535 (bank->regs->edgectrl1 || !(bank->non_wakeup_gpios & gpio_mask))) { 536 bank->context.wake_en = 537 omap_gpio_rmw(bank->base + bank->regs->wkup_en, 538 gpio_mask, enable); 539 } 540 } 541 542 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 543 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 544 { 545 struct gpio_bank *bank = omap_irq_data_get_bank(d); 546 547 return irq_set_irq_wake(bank->irq, enable); 548 } 549 550 /* 551 * We need to unmask the GPIO bank interrupt as soon as possible to 552 * avoid missing GPIO interrupts for other lines in the bank. 553 * Then we need to mask-read-clear-unmask the triggered GPIO lines 554 * in the bank to avoid missing nested interrupts for a GPIO line. 555 * If we wait to unmask individual GPIO lines in the bank after the 556 * line's interrupt handler has been run, we may miss some nested 557 * interrupts. 558 */ 559 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 560 { 561 void __iomem *isr_reg = NULL; 562 u32 enabled, isr, edge; 563 unsigned int bit; 564 struct gpio_bank *bank = gpiobank; 565 unsigned long wa_lock_flags; 566 unsigned long lock_flags; 567 568 isr_reg = bank->base + bank->regs->irqstatus; 569 if (WARN_ON(!isr_reg)) 570 goto exit; 571 572 if (WARN_ONCE(!pm_runtime_active(bank->chip.parent), 573 "gpio irq%i while runtime suspended?\n", irq)) 574 return IRQ_NONE; 575 576 while (1) { 577 raw_spin_lock_irqsave(&bank->lock, lock_flags); 578 579 enabled = omap_get_gpio_irqbank_mask(bank); 580 isr = readl_relaxed(isr_reg) & enabled; 581 582 /* 583 * Clear edge sensitive interrupts before calling handler(s) 584 * so subsequent edge transitions are not missed while the 585 * handlers are running. 586 */ 587 edge = isr & ~bank->level_mask; 588 if (edge) 589 omap_clear_gpio_irqbank(bank, edge); 590 591 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 592 593 if (!isr) 594 break; 595 596 while (isr) { 597 bit = __ffs(isr); 598 isr &= ~(BIT(bit)); 599 600 raw_spin_lock_irqsave(&bank->lock, lock_flags); 601 /* 602 * Some chips can't respond to both rising and falling 603 * at the same time. If this irq was requested with 604 * both flags, we need to flip the ICR data for the IRQ 605 * to respond to the IRQ for the opposite direction. 606 * This will be indicated in the bank toggle_mask. 607 */ 608 if (bank->toggle_mask & (BIT(bit))) 609 omap_toggle_gpio_edge_triggering(bank, bit); 610 611 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 612 613 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 614 615 generic_handle_domain_irq(bank->chip.irq.domain, bit); 616 617 raw_spin_unlock_irqrestore(&bank->wa_lock, 618 wa_lock_flags); 619 } 620 } 621 exit: 622 return IRQ_HANDLED; 623 } 624 625 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 626 { 627 struct gpio_bank *bank = omap_irq_data_get_bank(d); 628 unsigned long flags; 629 unsigned offset = d->hwirq; 630 631 raw_spin_lock_irqsave(&bank->lock, flags); 632 633 if (!LINE_USED(bank->mod_usage, offset)) 634 omap_set_gpio_direction(bank, offset, 1); 635 omap_enable_gpio_module(bank, offset); 636 bank->irq_usage |= BIT(offset); 637 638 raw_spin_unlock_irqrestore(&bank->lock, flags); 639 omap_gpio_unmask_irq(d); 640 641 return 0; 642 } 643 644 static void omap_gpio_irq_shutdown(struct irq_data *d) 645 { 646 struct gpio_bank *bank = omap_irq_data_get_bank(d); 647 unsigned long flags; 648 unsigned offset = d->hwirq; 649 650 raw_spin_lock_irqsave(&bank->lock, flags); 651 bank->irq_usage &= ~(BIT(offset)); 652 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 653 omap_clear_gpio_irqstatus(bank, offset); 654 omap_set_gpio_irqenable(bank, offset, 0); 655 if (!LINE_USED(bank->mod_usage, offset)) 656 omap_clear_gpio_debounce(bank, offset); 657 omap_disable_gpio_module(bank, offset); 658 raw_spin_unlock_irqrestore(&bank->lock, flags); 659 } 660 661 static void omap_gpio_irq_bus_lock(struct irq_data *data) 662 { 663 struct gpio_bank *bank = omap_irq_data_get_bank(data); 664 665 pm_runtime_get_sync(bank->chip.parent); 666 } 667 668 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 669 { 670 struct gpio_bank *bank = omap_irq_data_get_bank(data); 671 672 pm_runtime_put(bank->chip.parent); 673 } 674 675 static void omap_gpio_mask_irq(struct irq_data *d) 676 { 677 struct gpio_bank *bank = omap_irq_data_get_bank(d); 678 unsigned offset = d->hwirq; 679 unsigned long flags; 680 681 raw_spin_lock_irqsave(&bank->lock, flags); 682 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 683 omap_set_gpio_irqenable(bank, offset, 0); 684 raw_spin_unlock_irqrestore(&bank->lock, flags); 685 gpiochip_disable_irq(&bank->chip, offset); 686 } 687 688 static void omap_gpio_unmask_irq(struct irq_data *d) 689 { 690 struct gpio_bank *bank = omap_irq_data_get_bank(d); 691 unsigned offset = d->hwirq; 692 u32 trigger = irqd_get_trigger_type(d); 693 unsigned long flags; 694 695 gpiochip_enable_irq(&bank->chip, offset); 696 raw_spin_lock_irqsave(&bank->lock, flags); 697 omap_set_gpio_irqenable(bank, offset, 1); 698 699 /* 700 * For level-triggered GPIOs, clearing must be done after the source 701 * is cleared, thus after the handler has run. OMAP4 needs this done 702 * after enabing the interrupt to clear the wakeup status. 703 */ 704 if (bank->regs->leveldetect0 && bank->regs->wkup_en && 705 trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 706 omap_clear_gpio_irqstatus(bank, offset); 707 708 if (trigger) 709 omap_set_gpio_triggering(bank, offset, trigger); 710 711 raw_spin_unlock_irqrestore(&bank->lock, flags); 712 } 713 714 static void omap_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p) 715 { 716 struct gpio_bank *bank = omap_irq_data_get_bank(d); 717 718 seq_printf(p, dev_name(bank->dev)); 719 } 720 721 static const struct irq_chip omap_gpio_irq_chip = { 722 .irq_startup = omap_gpio_irq_startup, 723 .irq_shutdown = omap_gpio_irq_shutdown, 724 .irq_mask = omap_gpio_mask_irq, 725 .irq_unmask = omap_gpio_unmask_irq, 726 .irq_set_type = omap_gpio_irq_type, 727 .irq_set_wake = omap_gpio_wake_enable, 728 .irq_bus_lock = omap_gpio_irq_bus_lock, 729 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 730 .irq_print_chip = omap_gpio_irq_print_chip, 731 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 732 GPIOCHIP_IRQ_RESOURCE_HELPERS, 733 }; 734 735 static const struct irq_chip omap_gpio_irq_chip_nowake = { 736 .irq_startup = omap_gpio_irq_startup, 737 .irq_shutdown = omap_gpio_irq_shutdown, 738 .irq_mask = omap_gpio_mask_irq, 739 .irq_unmask = omap_gpio_unmask_irq, 740 .irq_set_type = omap_gpio_irq_type, 741 .irq_bus_lock = omap_gpio_irq_bus_lock, 742 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 743 .irq_print_chip = omap_gpio_irq_print_chip, 744 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 745 GPIOCHIP_IRQ_RESOURCE_HELPERS, 746 }; 747 748 /*---------------------------------------------------------------------*/ 749 750 static int omap_mpuio_suspend_noirq(struct device *dev) 751 { 752 struct gpio_bank *bank = dev_get_drvdata(dev); 753 void __iomem *mask_reg = bank->base + 754 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 755 unsigned long flags; 756 757 raw_spin_lock_irqsave(&bank->lock, flags); 758 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 759 raw_spin_unlock_irqrestore(&bank->lock, flags); 760 761 return 0; 762 } 763 764 static int omap_mpuio_resume_noirq(struct device *dev) 765 { 766 struct gpio_bank *bank = dev_get_drvdata(dev); 767 void __iomem *mask_reg = bank->base + 768 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 769 unsigned long flags; 770 771 raw_spin_lock_irqsave(&bank->lock, flags); 772 writel_relaxed(bank->context.wake_en, mask_reg); 773 raw_spin_unlock_irqrestore(&bank->lock, flags); 774 775 return 0; 776 } 777 778 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 779 .suspend_noirq = omap_mpuio_suspend_noirq, 780 .resume_noirq = omap_mpuio_resume_noirq, 781 }; 782 783 /* use platform_driver for this. */ 784 static struct platform_driver omap_mpuio_driver = { 785 .driver = { 786 .name = "mpuio", 787 .pm = &omap_mpuio_dev_pm_ops, 788 }, 789 }; 790 791 static struct platform_device omap_mpuio_device = { 792 .name = "mpuio", 793 .id = -1, 794 .dev = { 795 .driver = &omap_mpuio_driver.driver, 796 } 797 /* could list the /proc/iomem resources */ 798 }; 799 800 static inline void omap_mpuio_init(struct gpio_bank *bank) 801 { 802 platform_set_drvdata(&omap_mpuio_device, bank); 803 804 if (platform_driver_register(&omap_mpuio_driver) == 0) 805 (void) platform_device_register(&omap_mpuio_device); 806 } 807 808 /*---------------------------------------------------------------------*/ 809 810 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 811 { 812 struct gpio_bank *bank = gpiochip_get_data(chip); 813 unsigned long flags; 814 815 pm_runtime_get_sync(chip->parent); 816 817 raw_spin_lock_irqsave(&bank->lock, flags); 818 omap_enable_gpio_module(bank, offset); 819 bank->mod_usage |= BIT(offset); 820 raw_spin_unlock_irqrestore(&bank->lock, flags); 821 822 return 0; 823 } 824 825 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 826 { 827 struct gpio_bank *bank = gpiochip_get_data(chip); 828 unsigned long flags; 829 830 raw_spin_lock_irqsave(&bank->lock, flags); 831 bank->mod_usage &= ~(BIT(offset)); 832 if (!LINE_USED(bank->irq_usage, offset)) { 833 omap_set_gpio_direction(bank, offset, 1); 834 omap_clear_gpio_debounce(bank, offset); 835 } 836 omap_disable_gpio_module(bank, offset); 837 raw_spin_unlock_irqrestore(&bank->lock, flags); 838 839 pm_runtime_put(chip->parent); 840 } 841 842 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 843 { 844 struct gpio_bank *bank = gpiochip_get_data(chip); 845 846 if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset)) 847 return GPIO_LINE_DIRECTION_IN; 848 849 return GPIO_LINE_DIRECTION_OUT; 850 } 851 852 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 853 { 854 struct gpio_bank *bank; 855 unsigned long flags; 856 857 bank = gpiochip_get_data(chip); 858 raw_spin_lock_irqsave(&bank->lock, flags); 859 omap_set_gpio_direction(bank, offset, 1); 860 raw_spin_unlock_irqrestore(&bank->lock, flags); 861 return 0; 862 } 863 864 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 865 { 866 struct gpio_bank *bank = gpiochip_get_data(chip); 867 void __iomem *reg; 868 869 if (omap_gpio_is_input(bank, offset)) 870 reg = bank->base + bank->regs->datain; 871 else 872 reg = bank->base + bank->regs->dataout; 873 874 return (readl_relaxed(reg) & BIT(offset)) != 0; 875 } 876 877 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 878 { 879 struct gpio_bank *bank; 880 unsigned long flags; 881 882 bank = gpiochip_get_data(chip); 883 raw_spin_lock_irqsave(&bank->lock, flags); 884 bank->set_dataout(bank, offset, value); 885 omap_set_gpio_direction(bank, offset, 0); 886 raw_spin_unlock_irqrestore(&bank->lock, flags); 887 return 0; 888 } 889 890 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, 891 unsigned long *bits) 892 { 893 struct gpio_bank *bank = gpiochip_get_data(chip); 894 void __iomem *base = bank->base; 895 u32 direction, m, val = 0; 896 897 direction = readl_relaxed(base + bank->regs->direction); 898 899 m = direction & *mask; 900 if (m) 901 val |= readl_relaxed(base + bank->regs->datain) & m; 902 903 m = ~direction & *mask; 904 if (m) 905 val |= readl_relaxed(base + bank->regs->dataout) & m; 906 907 *bits = val; 908 909 return 0; 910 } 911 912 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 913 unsigned debounce) 914 { 915 struct gpio_bank *bank; 916 unsigned long flags; 917 int ret; 918 919 bank = gpiochip_get_data(chip); 920 921 raw_spin_lock_irqsave(&bank->lock, flags); 922 ret = omap2_set_gpio_debounce(bank, offset, debounce); 923 raw_spin_unlock_irqrestore(&bank->lock, flags); 924 925 if (ret) 926 dev_info(chip->parent, 927 "Could not set line %u debounce to %u microseconds (%d)", 928 offset, debounce, ret); 929 930 return ret; 931 } 932 933 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, 934 unsigned long config) 935 { 936 u32 debounce; 937 int ret = -ENOTSUPP; 938 939 switch (pinconf_to_config_param(config)) { 940 case PIN_CONFIG_BIAS_DISABLE: 941 case PIN_CONFIG_BIAS_PULL_UP: 942 case PIN_CONFIG_BIAS_PULL_DOWN: 943 ret = gpiochip_generic_config(chip, offset, config); 944 break; 945 case PIN_CONFIG_INPUT_DEBOUNCE: 946 debounce = pinconf_to_config_argument(config); 947 ret = omap_gpio_debounce(chip, offset, debounce); 948 break; 949 default: 950 break; 951 } 952 953 return ret; 954 } 955 956 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 957 { 958 struct gpio_bank *bank; 959 unsigned long flags; 960 961 bank = gpiochip_get_data(chip); 962 raw_spin_lock_irqsave(&bank->lock, flags); 963 bank->set_dataout(bank, offset, value); 964 raw_spin_unlock_irqrestore(&bank->lock, flags); 965 } 966 967 static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, 968 unsigned long *bits) 969 { 970 struct gpio_bank *bank = gpiochip_get_data(chip); 971 void __iomem *reg = bank->base + bank->regs->dataout; 972 unsigned long flags; 973 u32 l; 974 975 raw_spin_lock_irqsave(&bank->lock, flags); 976 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); 977 writel_relaxed(l, reg); 978 bank->context.dataout = l; 979 raw_spin_unlock_irqrestore(&bank->lock, flags); 980 } 981 982 /*---------------------------------------------------------------------*/ 983 984 static void omap_gpio_show_rev(struct gpio_bank *bank) 985 { 986 static bool called; 987 u32 rev; 988 989 if (called || bank->regs->revision == USHRT_MAX) 990 return; 991 992 rev = readw_relaxed(bank->base + bank->regs->revision); 993 pr_info("OMAP GPIO hardware version %d.%d\n", 994 (rev >> 4) & 0x0f, rev & 0x0f); 995 996 called = true; 997 } 998 999 static void omap_gpio_mod_init(struct gpio_bank *bank) 1000 { 1001 void __iomem *base = bank->base; 1002 u32 l = 0xffffffff; 1003 1004 if (bank->width == 16) 1005 l = 0xffff; 1006 1007 if (bank->is_mpuio) { 1008 writel_relaxed(l, bank->base + bank->regs->irqenable); 1009 return; 1010 } 1011 1012 omap_gpio_rmw(base + bank->regs->irqenable, l, 1013 bank->regs->irqenable_inv); 1014 omap_gpio_rmw(base + bank->regs->irqstatus, l, 1015 !bank->regs->irqenable_inv); 1016 if (bank->regs->debounce_en) 1017 writel_relaxed(0, base + bank->regs->debounce_en); 1018 1019 /* Save OE default value (0xffffffff) in the context */ 1020 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1021 /* Initialize interface clk ungated, module enabled */ 1022 if (bank->regs->ctrl) 1023 writel_relaxed(0, base + bank->regs->ctrl); 1024 } 1025 1026 static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev) 1027 { 1028 struct gpio_irq_chip *irq; 1029 static int gpio; 1030 const char *label; 1031 int ret; 1032 1033 /* 1034 * REVISIT eventually switch from OMAP-specific gpio structs 1035 * over to the generic ones 1036 */ 1037 bank->chip.request = omap_gpio_request; 1038 bank->chip.free = omap_gpio_free; 1039 bank->chip.get_direction = omap_gpio_get_direction; 1040 bank->chip.direction_input = omap_gpio_input; 1041 bank->chip.get = omap_gpio_get; 1042 bank->chip.get_multiple = omap_gpio_get_multiple; 1043 bank->chip.direction_output = omap_gpio_output; 1044 bank->chip.set_config = omap_gpio_set_config; 1045 bank->chip.set = omap_gpio_set; 1046 bank->chip.set_multiple = omap_gpio_set_multiple; 1047 if (bank->is_mpuio) { 1048 bank->chip.label = "mpuio"; 1049 if (bank->regs->wkup_en) 1050 bank->chip.parent = &omap_mpuio_device.dev; 1051 } else { 1052 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d", 1053 gpio, gpio + bank->width - 1); 1054 if (!label) 1055 return -ENOMEM; 1056 bank->chip.label = label; 1057 } 1058 bank->chip.base = -1; 1059 bank->chip.ngpio = bank->width; 1060 1061 irq = &bank->chip.irq; 1062 /* MPUIO is a bit different, reading IRQ status clears it */ 1063 if (bank->is_mpuio && !bank->regs->wkup_en) 1064 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip_nowake); 1065 else 1066 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip); 1067 irq->handler = handle_bad_irq; 1068 irq->default_type = IRQ_TYPE_NONE; 1069 irq->num_parents = 1; 1070 irq->parents = &bank->irq; 1071 1072 ret = gpiochip_add_data(&bank->chip, bank); 1073 if (ret) 1074 return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n"); 1075 1076 irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev); 1077 ret = devm_request_irq(bank->chip.parent, bank->irq, 1078 omap_gpio_irq_handler, 1079 0, dev_name(bank->chip.parent), bank); 1080 if (ret) 1081 gpiochip_remove(&bank->chip); 1082 1083 if (!bank->is_mpuio) 1084 gpio += bank->width; 1085 1086 return ret; 1087 } 1088 1089 static void omap_gpio_init_context(struct gpio_bank *p) 1090 { 1091 const struct omap_gpio_reg_offs *regs = p->regs; 1092 void __iomem *base = p->base; 1093 1094 p->context.sysconfig = readl_relaxed(base + regs->sysconfig); 1095 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1096 p->context.oe = readl_relaxed(base + regs->direction); 1097 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1098 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1099 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1100 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1101 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1102 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1103 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1104 p->context.dataout = readl_relaxed(base + regs->dataout); 1105 1106 p->context_valid = true; 1107 } 1108 1109 static void omap_gpio_restore_context(struct gpio_bank *bank) 1110 { 1111 const struct omap_gpio_reg_offs *regs = bank->regs; 1112 void __iomem *base = bank->base; 1113 1114 writel_relaxed(bank->context.sysconfig, base + regs->sysconfig); 1115 writel_relaxed(bank->context.wake_en, base + regs->wkup_en); 1116 writel_relaxed(bank->context.ctrl, base + regs->ctrl); 1117 writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); 1118 writel_relaxed(bank->context.leveldetect1, base + regs->leveldetect1); 1119 writel_relaxed(bank->context.risingdetect, base + regs->risingdetect); 1120 writel_relaxed(bank->context.fallingdetect, base + regs->fallingdetect); 1121 writel_relaxed(bank->context.dataout, base + regs->dataout); 1122 writel_relaxed(bank->context.oe, base + regs->direction); 1123 1124 if (bank->dbck_enable_mask) { 1125 writel_relaxed(bank->context.debounce, base + regs->debounce); 1126 writel_relaxed(bank->context.debounce_en, 1127 base + regs->debounce_en); 1128 } 1129 1130 writel_relaxed(bank->context.irqenable1, base + regs->irqenable); 1131 writel_relaxed(bank->context.irqenable2, base + regs->irqenable2); 1132 } 1133 1134 static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) 1135 { 1136 struct device *dev = bank->chip.parent; 1137 void __iomem *base = bank->base; 1138 u32 mask, nowake; 1139 1140 bank->saved_datain = readl_relaxed(base + bank->regs->datain); 1141 1142 /* Save syconfig, it's runtime value can be different from init value */ 1143 if (bank->loses_context) 1144 bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig); 1145 1146 if (!bank->enabled_non_wakeup_gpios) 1147 goto update_gpio_context_count; 1148 1149 /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */ 1150 mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect; 1151 mask &= ~bank->context.risingdetect; 1152 bank->saved_datain |= mask; 1153 1154 /* Check for pending EDGE_RISING, ignore EDGE_BOTH */ 1155 mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect; 1156 mask &= ~bank->context.fallingdetect; 1157 bank->saved_datain &= ~mask; 1158 1159 if (!may_lose_context) 1160 goto update_gpio_context_count; 1161 1162 /* 1163 * If going to OFF, remove triggering for all wkup domain 1164 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1165 * generated. See OMAP2420 Errata item 1.101. 1166 */ 1167 if (!bank->loses_context && bank->enabled_non_wakeup_gpios) { 1168 nowake = bank->enabled_non_wakeup_gpios; 1169 omap_gpio_rmw(base + bank->regs->fallingdetect, nowake, ~nowake); 1170 omap_gpio_rmw(base + bank->regs->risingdetect, nowake, ~nowake); 1171 } 1172 1173 update_gpio_context_count: 1174 if (bank->get_context_loss_count) 1175 bank->context_loss_count = 1176 bank->get_context_loss_count(dev); 1177 1178 omap_gpio_dbck_disable(bank); 1179 } 1180 1181 static void omap_gpio_unidle(struct gpio_bank *bank) 1182 { 1183 struct device *dev = bank->chip.parent; 1184 u32 l = 0, gen, gen0, gen1; 1185 int c; 1186 1187 /* 1188 * On the first resume during the probe, the context has not 1189 * been initialised and so initialise it now. Also initialise 1190 * the context loss count. 1191 */ 1192 if (bank->loses_context && !bank->context_valid) { 1193 omap_gpio_init_context(bank); 1194 1195 if (bank->get_context_loss_count) 1196 bank->context_loss_count = 1197 bank->get_context_loss_count(dev); 1198 } 1199 1200 omap_gpio_dbck_enable(bank); 1201 1202 if (bank->loses_context) { 1203 if (!bank->get_context_loss_count) { 1204 omap_gpio_restore_context(bank); 1205 } else { 1206 c = bank->get_context_loss_count(dev); 1207 if (c != bank->context_loss_count) { 1208 omap_gpio_restore_context(bank); 1209 } else { 1210 return; 1211 } 1212 } 1213 } else { 1214 /* Restore changes done for OMAP2420 errata 1.101 */ 1215 writel_relaxed(bank->context.fallingdetect, 1216 bank->base + bank->regs->fallingdetect); 1217 writel_relaxed(bank->context.risingdetect, 1218 bank->base + bank->regs->risingdetect); 1219 } 1220 1221 l = readl_relaxed(bank->base + bank->regs->datain); 1222 1223 /* 1224 * Check if any of the non-wakeup interrupt GPIOs have changed 1225 * state. If so, generate an IRQ by software. This is 1226 * horribly racy, but it's the best we can do to work around 1227 * this silicon bug. 1228 */ 1229 l ^= bank->saved_datain; 1230 l &= bank->enabled_non_wakeup_gpios; 1231 1232 /* 1233 * No need to generate IRQs for the rising edge for gpio IRQs 1234 * configured with falling edge only; and vice versa. 1235 */ 1236 gen0 = l & bank->context.fallingdetect; 1237 gen0 &= bank->saved_datain; 1238 1239 gen1 = l & bank->context.risingdetect; 1240 gen1 &= ~(bank->saved_datain); 1241 1242 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1243 gen = l & (~(bank->context.fallingdetect) & 1244 ~(bank->context.risingdetect)); 1245 /* Consider all GPIO IRQs needed to be updated */ 1246 gen |= gen0 | gen1; 1247 1248 if (gen) { 1249 u32 old0, old1; 1250 1251 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1252 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1253 1254 if (!bank->regs->irqstatus_raw0) { 1255 writel_relaxed(old0 | gen, bank->base + 1256 bank->regs->leveldetect0); 1257 writel_relaxed(old1 | gen, bank->base + 1258 bank->regs->leveldetect1); 1259 } 1260 1261 if (bank->regs->irqstatus_raw0) { 1262 writel_relaxed(old0 | l, bank->base + 1263 bank->regs->leveldetect0); 1264 writel_relaxed(old1 | l, bank->base + 1265 bank->regs->leveldetect1); 1266 } 1267 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1268 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1269 } 1270 } 1271 1272 static int gpio_omap_cpu_notifier(struct notifier_block *nb, 1273 unsigned long cmd, void *v) 1274 { 1275 struct gpio_bank *bank; 1276 unsigned long flags; 1277 int ret = NOTIFY_OK; 1278 u32 isr, mask; 1279 1280 bank = container_of(nb, struct gpio_bank, nb); 1281 1282 raw_spin_lock_irqsave(&bank->lock, flags); 1283 if (bank->is_suspended) 1284 goto out_unlock; 1285 1286 switch (cmd) { 1287 case CPU_CLUSTER_PM_ENTER: 1288 mask = omap_get_gpio_irqbank_mask(bank); 1289 isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask; 1290 if (isr) { 1291 ret = NOTIFY_BAD; 1292 break; 1293 } 1294 omap_gpio_idle(bank, true); 1295 break; 1296 case CPU_CLUSTER_PM_ENTER_FAILED: 1297 case CPU_CLUSTER_PM_EXIT: 1298 omap_gpio_unidle(bank); 1299 break; 1300 } 1301 1302 out_unlock: 1303 raw_spin_unlock_irqrestore(&bank->lock, flags); 1304 1305 return ret; 1306 } 1307 1308 static const struct omap_gpio_reg_offs omap2_gpio_regs = { 1309 .revision = OMAP24XX_GPIO_REVISION, 1310 .sysconfig = OMAP24XX_GPIO_SYSCONFIG, 1311 .direction = OMAP24XX_GPIO_OE, 1312 .datain = OMAP24XX_GPIO_DATAIN, 1313 .dataout = OMAP24XX_GPIO_DATAOUT, 1314 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1315 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1316 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1317 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1318 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1319 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1320 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1321 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1322 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1323 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1324 .ctrl = OMAP24XX_GPIO_CTRL, 1325 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1326 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1327 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1328 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1329 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1330 }; 1331 1332 static const struct omap_gpio_reg_offs omap4_gpio_regs = { 1333 .revision = OMAP4_GPIO_REVISION, 1334 .sysconfig = OMAP4_GPIO_SYSCONFIG, 1335 .direction = OMAP4_GPIO_OE, 1336 .datain = OMAP4_GPIO_DATAIN, 1337 .dataout = OMAP4_GPIO_DATAOUT, 1338 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1339 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1340 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1341 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1342 .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0, 1343 .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1, 1344 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1345 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1346 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1347 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1348 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1349 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1350 .ctrl = OMAP4_GPIO_CTRL, 1351 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1352 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1353 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1354 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1355 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1356 }; 1357 1358 static const struct omap_gpio_platform_data omap2_pdata = { 1359 .regs = &omap2_gpio_regs, 1360 .bank_width = 32, 1361 .dbck_flag = false, 1362 }; 1363 1364 static const struct omap_gpio_platform_data omap3_pdata = { 1365 .regs = &omap2_gpio_regs, 1366 .bank_width = 32, 1367 .dbck_flag = true, 1368 }; 1369 1370 static const struct omap_gpio_platform_data omap4_pdata = { 1371 .regs = &omap4_gpio_regs, 1372 .bank_width = 32, 1373 .dbck_flag = true, 1374 }; 1375 1376 static const struct of_device_id omap_gpio_match[] = { 1377 { 1378 .compatible = "ti,omap4-gpio", 1379 .data = &omap4_pdata, 1380 }, 1381 { 1382 .compatible = "ti,omap3-gpio", 1383 .data = &omap3_pdata, 1384 }, 1385 { 1386 .compatible = "ti,omap2-gpio", 1387 .data = &omap2_pdata, 1388 }, 1389 { }, 1390 }; 1391 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1392 1393 static int omap_gpio_probe(struct platform_device *pdev) 1394 { 1395 struct device *dev = &pdev->dev; 1396 struct device_node *node = dev->of_node; 1397 const struct omap_gpio_platform_data *pdata; 1398 struct gpio_bank *bank; 1399 int ret; 1400 1401 pdata = device_get_match_data(dev); 1402 1403 pdata = pdata ?: dev_get_platdata(dev); 1404 if (!pdata) 1405 return -EINVAL; 1406 1407 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); 1408 if (!bank) 1409 return -ENOMEM; 1410 1411 bank->dev = dev; 1412 1413 bank->irq = platform_get_irq(pdev, 0); 1414 if (bank->irq < 0) 1415 return bank->irq; 1416 1417 bank->chip.parent = dev; 1418 bank->chip.owner = THIS_MODULE; 1419 bank->dbck_flag = pdata->dbck_flag; 1420 bank->stride = pdata->bank_stride; 1421 bank->width = pdata->bank_width; 1422 bank->is_mpuio = pdata->is_mpuio; 1423 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1424 bank->regs = pdata->regs; 1425 1426 if (node) { 1427 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1428 bank->loses_context = true; 1429 } else { 1430 bank->loses_context = pdata->loses_context; 1431 1432 if (bank->loses_context) 1433 bank->get_context_loss_count = 1434 pdata->get_context_loss_count; 1435 } 1436 1437 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1438 bank->set_dataout = omap_set_gpio_dataout_reg; 1439 else 1440 bank->set_dataout = omap_set_gpio_dataout_mask; 1441 1442 raw_spin_lock_init(&bank->lock); 1443 raw_spin_lock_init(&bank->wa_lock); 1444 1445 /* Static mapping, never released */ 1446 bank->base = devm_platform_ioremap_resource(pdev, 0); 1447 if (IS_ERR(bank->base)) { 1448 return PTR_ERR(bank->base); 1449 } 1450 1451 if (bank->dbck_flag) { 1452 bank->dbck = devm_clk_get(dev, "dbclk"); 1453 if (IS_ERR(bank->dbck)) { 1454 dev_err(dev, 1455 "Could not get gpio dbck. Disable debounce\n"); 1456 bank->dbck_flag = false; 1457 } else { 1458 clk_prepare(bank->dbck); 1459 } 1460 } 1461 1462 platform_set_drvdata(pdev, bank); 1463 1464 pm_runtime_enable(dev); 1465 pm_runtime_get_sync(dev); 1466 1467 if (bank->is_mpuio) 1468 omap_mpuio_init(bank); 1469 1470 omap_gpio_mod_init(bank); 1471 1472 ret = omap_gpio_chip_init(bank, dev); 1473 if (ret) { 1474 pm_runtime_put_sync(dev); 1475 pm_runtime_disable(dev); 1476 if (bank->dbck_flag) 1477 clk_unprepare(bank->dbck); 1478 return ret; 1479 } 1480 1481 omap_gpio_show_rev(bank); 1482 1483 bank->nb.notifier_call = gpio_omap_cpu_notifier; 1484 cpu_pm_register_notifier(&bank->nb); 1485 1486 pm_runtime_put(dev); 1487 1488 return 0; 1489 } 1490 1491 static void omap_gpio_remove(struct platform_device *pdev) 1492 { 1493 struct gpio_bank *bank = platform_get_drvdata(pdev); 1494 1495 cpu_pm_unregister_notifier(&bank->nb); 1496 gpiochip_remove(&bank->chip); 1497 pm_runtime_disable(&pdev->dev); 1498 if (bank->dbck_flag) 1499 clk_unprepare(bank->dbck); 1500 } 1501 1502 static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev) 1503 { 1504 struct gpio_bank *bank = dev_get_drvdata(dev); 1505 unsigned long flags; 1506 1507 raw_spin_lock_irqsave(&bank->lock, flags); 1508 omap_gpio_idle(bank, true); 1509 bank->is_suspended = true; 1510 raw_spin_unlock_irqrestore(&bank->lock, flags); 1511 1512 return 0; 1513 } 1514 1515 static int __maybe_unused omap_gpio_runtime_resume(struct device *dev) 1516 { 1517 struct gpio_bank *bank = dev_get_drvdata(dev); 1518 unsigned long flags; 1519 1520 raw_spin_lock_irqsave(&bank->lock, flags); 1521 omap_gpio_unidle(bank); 1522 bank->is_suspended = false; 1523 raw_spin_unlock_irqrestore(&bank->lock, flags); 1524 1525 return 0; 1526 } 1527 1528 static int __maybe_unused omap_gpio_suspend(struct device *dev) 1529 { 1530 struct gpio_bank *bank = dev_get_drvdata(dev); 1531 1532 if (bank->is_suspended) 1533 return 0; 1534 1535 bank->needs_resume = 1; 1536 1537 return omap_gpio_runtime_suspend(dev); 1538 } 1539 1540 static int __maybe_unused omap_gpio_resume(struct device *dev) 1541 { 1542 struct gpio_bank *bank = dev_get_drvdata(dev); 1543 1544 if (!bank->needs_resume) 1545 return 0; 1546 1547 bank->needs_resume = 0; 1548 1549 return omap_gpio_runtime_resume(dev); 1550 } 1551 1552 static const struct dev_pm_ops gpio_pm_ops = { 1553 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1554 NULL) 1555 SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume) 1556 }; 1557 1558 static struct platform_driver omap_gpio_driver = { 1559 .probe = omap_gpio_probe, 1560 .remove_new = omap_gpio_remove, 1561 .driver = { 1562 .name = "omap_gpio", 1563 .pm = &gpio_pm_ops, 1564 .of_match_table = omap_gpio_match, 1565 }, 1566 }; 1567 1568 /* 1569 * gpio driver register needs to be done before 1570 * machine_init functions access gpio APIs. 1571 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1572 */ 1573 static int __init omap_gpio_drv_reg(void) 1574 { 1575 return platform_driver_register(&omap_gpio_driver); 1576 } 1577 postcore_initcall(omap_gpio_drv_reg); 1578 1579 static void __exit omap_gpio_exit(void) 1580 { 1581 platform_driver_unregister(&omap_gpio_driver); 1582 } 1583 module_exit(omap_gpio_exit); 1584 1585 MODULE_DESCRIPTION("omap gpio driver"); 1586 MODULE_ALIAS("platform:gpio-omap"); 1587 MODULE_LICENSE("GPL v2"); 1588