1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support functions for OMAP GPIO 4 * 5 * Copyright (C) 2003-2005 Nokia Corporation 6 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 7 * 8 * Copyright (C) 2009 Texas Instruments 9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 10 */ 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/err.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 #include <linux/cpu_pm.h> 21 #include <linux/device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm.h> 24 #include <linux/of.h> 25 #include <linux/gpio/driver.h> 26 #include <linux/bitops.h> 27 #include <linux/platform_data/gpio-omap.h> 28 29 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 30 31 struct gpio_regs { 32 u32 sysconfig; 33 u32 irqenable1; 34 u32 irqenable2; 35 u32 wake_en; 36 u32 ctrl; 37 u32 oe; 38 u32 leveldetect0; 39 u32 leveldetect1; 40 u32 risingdetect; 41 u32 fallingdetect; 42 u32 dataout; 43 u32 debounce; 44 u32 debounce_en; 45 }; 46 47 struct gpio_bank { 48 void __iomem *base; 49 const struct omap_gpio_reg_offs *regs; 50 struct device *dev; 51 52 int irq; 53 u32 non_wakeup_gpios; 54 u32 enabled_non_wakeup_gpios; 55 struct gpio_regs context; 56 u32 saved_datain; 57 u32 level_mask; 58 u32 toggle_mask; 59 raw_spinlock_t lock; 60 raw_spinlock_t wa_lock; 61 struct gpio_chip chip; 62 struct clk *dbck; 63 struct notifier_block nb; 64 unsigned int is_suspended:1; 65 unsigned int needs_resume:1; 66 u32 mod_usage; 67 u32 irq_usage; 68 u32 dbck_enable_mask; 69 bool dbck_enabled; 70 bool is_mpuio; 71 bool dbck_flag; 72 bool loses_context; 73 bool context_valid; 74 int stride; 75 u32 width; 76 int context_loss_count; 77 78 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 79 int (*get_context_loss_count)(struct device *dev); 80 }; 81 82 #define GPIO_MOD_CTRL_BIT BIT(0) 83 84 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 85 #define LINE_USED(line, offset) (line & (BIT(offset))) 86 87 static void omap_gpio_unmask_irq(struct irq_data *d); 88 89 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 90 { 91 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 92 return gpiochip_get_data(chip); 93 } 94 95 static inline u32 omap_gpio_rmw(void __iomem *reg, u32 mask, bool set) 96 { 97 u32 val = readl_relaxed(reg); 98 99 if (set) 100 val |= mask; 101 else 102 val &= ~mask; 103 104 writel_relaxed(val, reg); 105 106 return val; 107 } 108 109 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 110 int is_input) 111 { 112 bank->context.oe = omap_gpio_rmw(bank->base + bank->regs->direction, 113 BIT(gpio), is_input); 114 } 115 116 117 /* set data out value using dedicate set/clear register */ 118 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 119 int enable) 120 { 121 void __iomem *reg = bank->base; 122 u32 l = BIT(offset); 123 124 if (enable) { 125 reg += bank->regs->set_dataout; 126 bank->context.dataout |= l; 127 } else { 128 reg += bank->regs->clr_dataout; 129 bank->context.dataout &= ~l; 130 } 131 132 writel_relaxed(l, reg); 133 } 134 135 /* set data out value using mask register */ 136 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 137 int enable) 138 { 139 bank->context.dataout = omap_gpio_rmw(bank->base + bank->regs->dataout, 140 BIT(offset), enable); 141 } 142 143 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 144 { 145 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 146 clk_enable(bank->dbck); 147 bank->dbck_enabled = true; 148 149 writel_relaxed(bank->dbck_enable_mask, 150 bank->base + bank->regs->debounce_en); 151 } 152 } 153 154 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 155 { 156 if (bank->dbck_enable_mask && bank->dbck_enabled) { 157 /* 158 * Disable debounce before cutting it's clock. If debounce is 159 * enabled but the clock is not, GPIO module seems to be unable 160 * to detect events and generate interrupts at least on OMAP3. 161 */ 162 writel_relaxed(0, bank->base + bank->regs->debounce_en); 163 164 clk_disable(bank->dbck); 165 bank->dbck_enabled = false; 166 } 167 } 168 169 /** 170 * omap2_set_gpio_debounce - low level gpio debounce time 171 * @bank: the gpio bank we're acting upon 172 * @offset: the gpio number on this @bank 173 * @debounce: debounce time to use 174 * 175 * OMAP's debounce time is in 31us steps 176 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 177 * so we need to convert and round up to the closest unit. 178 * 179 * Return: 0 on success, negative error otherwise. 180 */ 181 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 182 unsigned debounce) 183 { 184 u32 val; 185 u32 l; 186 bool enable = !!debounce; 187 188 if (!bank->dbck_flag) 189 return -ENOTSUPP; 190 191 if (enable) { 192 debounce = DIV_ROUND_UP(debounce, 31) - 1; 193 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) 194 return -EINVAL; 195 } 196 197 l = BIT(offset); 198 199 clk_enable(bank->dbck); 200 writel_relaxed(debounce, bank->base + bank->regs->debounce); 201 202 val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable); 203 bank->dbck_enable_mask = val; 204 205 clk_disable(bank->dbck); 206 /* 207 * Enable debounce clock per module. 208 * This call is mandatory because in omap_gpio_request() when 209 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 210 * runtime callbck fails to turn on dbck because dbck_enable_mask 211 * used within _gpio_dbck_enable() is still not initialized at 212 * that point. Therefore we have to enable dbck here. 213 */ 214 omap_gpio_dbck_enable(bank); 215 if (bank->dbck_enable_mask) { 216 bank->context.debounce = debounce; 217 bank->context.debounce_en = val; 218 } 219 220 return 0; 221 } 222 223 /** 224 * omap_clear_gpio_debounce - clear debounce settings for a gpio 225 * @bank: the gpio bank we're acting upon 226 * @offset: the gpio number on this @bank 227 * 228 * If a gpio is using debounce, then clear the debounce enable bit and if 229 * this is the only gpio in this bank using debounce, then clear the debounce 230 * time too. The debounce clock will also be disabled when calling this function 231 * if this is the only gpio in the bank using debounce. 232 */ 233 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 234 { 235 u32 gpio_bit = BIT(offset); 236 237 if (!bank->dbck_flag) 238 return; 239 240 if (!(bank->dbck_enable_mask & gpio_bit)) 241 return; 242 243 bank->dbck_enable_mask &= ~gpio_bit; 244 bank->context.debounce_en &= ~gpio_bit; 245 writel_relaxed(bank->context.debounce_en, 246 bank->base + bank->regs->debounce_en); 247 248 if (!bank->dbck_enable_mask) { 249 bank->context.debounce = 0; 250 writel_relaxed(bank->context.debounce, bank->base + 251 bank->regs->debounce); 252 clk_disable(bank->dbck); 253 bank->dbck_enabled = false; 254 } 255 } 256 257 /* 258 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. 259 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs 260 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none 261 * are capable waking up the system from off mode. 262 */ 263 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) 264 { 265 u32 no_wake = bank->non_wakeup_gpios; 266 267 if (no_wake) 268 return !!(~no_wake & gpio_mask); 269 270 return false; 271 } 272 273 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 274 unsigned trigger) 275 { 276 void __iomem *base = bank->base; 277 u32 gpio_bit = BIT(gpio); 278 279 omap_gpio_rmw(base + bank->regs->leveldetect0, gpio_bit, 280 trigger & IRQ_TYPE_LEVEL_LOW); 281 omap_gpio_rmw(base + bank->regs->leveldetect1, gpio_bit, 282 trigger & IRQ_TYPE_LEVEL_HIGH); 283 284 /* 285 * We need the edge detection enabled for to allow the GPIO block 286 * to be woken from idle state. Set the appropriate edge detection 287 * in addition to the level detection. 288 */ 289 omap_gpio_rmw(base + bank->regs->risingdetect, gpio_bit, 290 trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)); 291 omap_gpio_rmw(base + bank->regs->fallingdetect, gpio_bit, 292 trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)); 293 294 bank->context.leveldetect0 = 295 readl_relaxed(bank->base + bank->regs->leveldetect0); 296 bank->context.leveldetect1 = 297 readl_relaxed(bank->base + bank->regs->leveldetect1); 298 bank->context.risingdetect = 299 readl_relaxed(bank->base + bank->regs->risingdetect); 300 bank->context.fallingdetect = 301 readl_relaxed(bank->base + bank->regs->fallingdetect); 302 303 bank->level_mask = bank->context.leveldetect0 | 304 bank->context.leveldetect1; 305 306 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 307 if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { 308 /* 309 * Log the edge gpio and manually trigger the IRQ 310 * after resume if the input level changes 311 * to avoid irq lost during PER RET/OFF mode 312 * Applies for omap2 non-wakeup gpio and all omap3 gpios 313 */ 314 if (trigger & IRQ_TYPE_EDGE_BOTH) 315 bank->enabled_non_wakeup_gpios |= gpio_bit; 316 else 317 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 318 } 319 } 320 321 /* 322 * This only applies to chips that can't do both rising and falling edge 323 * detection at once. For all other chips, this function is a noop. 324 */ 325 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 326 { 327 if (IS_ENABLED(CONFIG_ARCH_OMAP1) && bank->regs->irqctrl) { 328 void __iomem *reg = bank->base + bank->regs->irqctrl; 329 330 writel_relaxed(readl_relaxed(reg) ^ BIT(gpio), reg); 331 } 332 } 333 334 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 335 unsigned trigger) 336 { 337 void __iomem *reg = bank->base; 338 u32 l = 0; 339 340 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 341 omap_set_gpio_trigger(bank, gpio, trigger); 342 } else if (bank->regs->irqctrl) { 343 reg += bank->regs->irqctrl; 344 345 l = readl_relaxed(reg); 346 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 347 bank->toggle_mask |= BIT(gpio); 348 if (trigger & IRQ_TYPE_EDGE_RISING) 349 l |= BIT(gpio); 350 else if (trigger & IRQ_TYPE_EDGE_FALLING) 351 l &= ~(BIT(gpio)); 352 else 353 return -EINVAL; 354 355 writel_relaxed(l, reg); 356 } else if (bank->regs->edgectrl1) { 357 if (gpio & 0x08) 358 reg += bank->regs->edgectrl2; 359 else 360 reg += bank->regs->edgectrl1; 361 362 gpio &= 0x07; 363 l = readl_relaxed(reg); 364 l &= ~(3 << (gpio << 1)); 365 if (trigger & IRQ_TYPE_EDGE_RISING) 366 l |= 2 << (gpio << 1); 367 if (trigger & IRQ_TYPE_EDGE_FALLING) 368 l |= BIT(gpio << 1); 369 writel_relaxed(l, reg); 370 } 371 return 0; 372 } 373 374 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 375 { 376 if (bank->regs->pinctrl) { 377 void __iomem *reg = bank->base + bank->regs->pinctrl; 378 379 /* Claim the pin for MPU */ 380 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 381 } 382 383 if (bank->regs->ctrl && !BANK_USED(bank)) { 384 void __iomem *reg = bank->base + bank->regs->ctrl; 385 u32 ctrl; 386 387 ctrl = readl_relaxed(reg); 388 /* Module is enabled, clocks are not gated */ 389 ctrl &= ~GPIO_MOD_CTRL_BIT; 390 writel_relaxed(ctrl, reg); 391 bank->context.ctrl = ctrl; 392 } 393 } 394 395 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 396 { 397 if (bank->regs->ctrl && !BANK_USED(bank)) { 398 void __iomem *reg = bank->base + bank->regs->ctrl; 399 u32 ctrl; 400 401 ctrl = readl_relaxed(reg); 402 /* Module is disabled, clocks are gated */ 403 ctrl |= GPIO_MOD_CTRL_BIT; 404 writel_relaxed(ctrl, reg); 405 bank->context.ctrl = ctrl; 406 } 407 } 408 409 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 410 { 411 void __iomem *reg = bank->base + bank->regs->direction; 412 413 return readl_relaxed(reg) & BIT(offset); 414 } 415 416 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 417 { 418 if (!LINE_USED(bank->mod_usage, offset)) { 419 omap_enable_gpio_module(bank, offset); 420 omap_set_gpio_direction(bank, offset, 1); 421 } 422 bank->irq_usage |= BIT(offset); 423 } 424 425 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 426 { 427 struct gpio_bank *bank = omap_irq_data_get_bank(d); 428 int retval; 429 unsigned long flags; 430 unsigned offset = d->hwirq; 431 432 if (type & ~IRQ_TYPE_SENSE_MASK) 433 return -EINVAL; 434 435 if (!bank->regs->leveldetect0 && 436 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 437 return -EINVAL; 438 439 raw_spin_lock_irqsave(&bank->lock, flags); 440 retval = omap_set_gpio_triggering(bank, offset, type); 441 if (retval) { 442 raw_spin_unlock_irqrestore(&bank->lock, flags); 443 goto error; 444 } 445 omap_gpio_init_irq(bank, offset); 446 if (!omap_gpio_is_input(bank, offset)) { 447 raw_spin_unlock_irqrestore(&bank->lock, flags); 448 retval = -EINVAL; 449 goto error; 450 } 451 raw_spin_unlock_irqrestore(&bank->lock, flags); 452 453 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 454 irq_set_handler_locked(d, handle_level_irq); 455 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 456 /* 457 * Edge IRQs are already cleared/acked in irq_handler and 458 * not need to be masked, as result handle_edge_irq() 459 * logic is excessed here and may cause lose of interrupts. 460 * So just use handle_simple_irq. 461 */ 462 irq_set_handler_locked(d, handle_simple_irq); 463 464 return 0; 465 466 error: 467 return retval; 468 } 469 470 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 471 { 472 void __iomem *reg = bank->base; 473 474 reg += bank->regs->irqstatus; 475 writel_relaxed(gpio_mask, reg); 476 477 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 478 if (bank->regs->irqstatus2) { 479 reg = bank->base + bank->regs->irqstatus2; 480 writel_relaxed(gpio_mask, reg); 481 } 482 483 /* Flush posted write for the irq status to avoid spurious interrupts */ 484 readl_relaxed(reg); 485 } 486 487 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 488 unsigned offset) 489 { 490 omap_clear_gpio_irqbank(bank, BIT(offset)); 491 } 492 493 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 494 { 495 void __iomem *reg = bank->base; 496 u32 l; 497 u32 mask = (BIT(bank->width)) - 1; 498 499 reg += bank->regs->irqenable; 500 l = readl_relaxed(reg); 501 if (bank->regs->irqenable_inv) 502 l = ~l; 503 l &= mask; 504 return l; 505 } 506 507 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 508 unsigned offset, int enable) 509 { 510 void __iomem *reg = bank->base; 511 u32 gpio_mask = BIT(offset); 512 513 if (bank->regs->set_irqenable && bank->regs->clr_irqenable) { 514 if (enable) { 515 reg += bank->regs->set_irqenable; 516 bank->context.irqenable1 |= gpio_mask; 517 } else { 518 reg += bank->regs->clr_irqenable; 519 bank->context.irqenable1 &= ~gpio_mask; 520 } 521 writel_relaxed(gpio_mask, reg); 522 } else { 523 bank->context.irqenable1 = 524 omap_gpio_rmw(reg + bank->regs->irqenable, gpio_mask, 525 enable ^ bank->regs->irqenable_inv); 526 } 527 528 /* 529 * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM 530 * note requiring correlation between the IRQ enable registers and 531 * the wakeup registers. In any case, we want wakeup from idle 532 * enabled for the GPIOs which support this feature. 533 */ 534 if (bank->regs->wkup_en && 535 (bank->regs->edgectrl1 || !(bank->non_wakeup_gpios & gpio_mask))) { 536 bank->context.wake_en = 537 omap_gpio_rmw(bank->base + bank->regs->wkup_en, 538 gpio_mask, enable); 539 } 540 } 541 542 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 543 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 544 { 545 struct gpio_bank *bank = omap_irq_data_get_bank(d); 546 547 return irq_set_irq_wake(bank->irq, enable); 548 } 549 550 /* 551 * We need to unmask the GPIO bank interrupt as soon as possible to 552 * avoid missing GPIO interrupts for other lines in the bank. 553 * Then we need to mask-read-clear-unmask the triggered GPIO lines 554 * in the bank to avoid missing nested interrupts for a GPIO line. 555 * If we wait to unmask individual GPIO lines in the bank after the 556 * line's interrupt handler has been run, we may miss some nested 557 * interrupts. 558 */ 559 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 560 { 561 void __iomem *isr_reg = NULL; 562 u32 enabled, isr, edge; 563 unsigned int bit; 564 struct gpio_bank *bank = gpiobank; 565 unsigned long wa_lock_flags; 566 unsigned long lock_flags; 567 568 isr_reg = bank->base + bank->regs->irqstatus; 569 if (WARN_ON(!isr_reg)) 570 goto exit; 571 572 if (WARN_ONCE(!pm_runtime_active(bank->chip.parent), 573 "gpio irq%i while runtime suspended?\n", irq)) 574 return IRQ_NONE; 575 576 while (1) { 577 raw_spin_lock_irqsave(&bank->lock, lock_flags); 578 579 enabled = omap_get_gpio_irqbank_mask(bank); 580 isr = readl_relaxed(isr_reg) & enabled; 581 582 /* 583 * Clear edge sensitive interrupts before calling handler(s) 584 * so subsequent edge transitions are not missed while the 585 * handlers are running. 586 */ 587 edge = isr & ~bank->level_mask; 588 if (edge) 589 omap_clear_gpio_irqbank(bank, edge); 590 591 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 592 593 if (!isr) 594 break; 595 596 while (isr) { 597 bit = __ffs(isr); 598 isr &= ~(BIT(bit)); 599 600 raw_spin_lock_irqsave(&bank->lock, lock_flags); 601 /* 602 * Some chips can't respond to both rising and falling 603 * at the same time. If this irq was requested with 604 * both flags, we need to flip the ICR data for the IRQ 605 * to respond to the IRQ for the opposite direction. 606 * This will be indicated in the bank toggle_mask. 607 */ 608 if (bank->toggle_mask & (BIT(bit))) 609 omap_toggle_gpio_edge_triggering(bank, bit); 610 611 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 612 613 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 614 615 generic_handle_domain_irq(bank->chip.irq.domain, bit); 616 617 raw_spin_unlock_irqrestore(&bank->wa_lock, 618 wa_lock_flags); 619 } 620 } 621 exit: 622 return IRQ_HANDLED; 623 } 624 625 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 626 { 627 struct gpio_bank *bank = omap_irq_data_get_bank(d); 628 unsigned long flags; 629 unsigned offset = d->hwirq; 630 631 raw_spin_lock_irqsave(&bank->lock, flags); 632 633 if (!LINE_USED(bank->mod_usage, offset)) 634 omap_set_gpio_direction(bank, offset, 1); 635 omap_enable_gpio_module(bank, offset); 636 bank->irq_usage |= BIT(offset); 637 638 raw_spin_unlock_irqrestore(&bank->lock, flags); 639 omap_gpio_unmask_irq(d); 640 641 return 0; 642 } 643 644 static void omap_gpio_irq_shutdown(struct irq_data *d) 645 { 646 struct gpio_bank *bank = omap_irq_data_get_bank(d); 647 unsigned long flags; 648 unsigned offset = d->hwirq; 649 650 raw_spin_lock_irqsave(&bank->lock, flags); 651 bank->irq_usage &= ~(BIT(offset)); 652 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 653 omap_clear_gpio_irqstatus(bank, offset); 654 omap_set_gpio_irqenable(bank, offset, 0); 655 if (!LINE_USED(bank->mod_usage, offset)) 656 omap_clear_gpio_debounce(bank, offset); 657 omap_disable_gpio_module(bank, offset); 658 raw_spin_unlock_irqrestore(&bank->lock, flags); 659 } 660 661 static void omap_gpio_irq_bus_lock(struct irq_data *data) 662 { 663 struct gpio_bank *bank = omap_irq_data_get_bank(data); 664 665 pm_runtime_get_sync(bank->chip.parent); 666 } 667 668 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 669 { 670 struct gpio_bank *bank = omap_irq_data_get_bank(data); 671 672 pm_runtime_put(bank->chip.parent); 673 } 674 675 static void omap_gpio_mask_irq(struct irq_data *d) 676 { 677 struct gpio_bank *bank = omap_irq_data_get_bank(d); 678 unsigned offset = d->hwirq; 679 unsigned long flags; 680 681 raw_spin_lock_irqsave(&bank->lock, flags); 682 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 683 omap_set_gpio_irqenable(bank, offset, 0); 684 raw_spin_unlock_irqrestore(&bank->lock, flags); 685 gpiochip_disable_irq(&bank->chip, offset); 686 } 687 688 static void omap_gpio_unmask_irq(struct irq_data *d) 689 { 690 struct gpio_bank *bank = omap_irq_data_get_bank(d); 691 unsigned offset = d->hwirq; 692 u32 trigger = irqd_get_trigger_type(d); 693 unsigned long flags; 694 695 gpiochip_enable_irq(&bank->chip, offset); 696 raw_spin_lock_irqsave(&bank->lock, flags); 697 omap_set_gpio_irqenable(bank, offset, 1); 698 699 /* 700 * For level-triggered GPIOs, clearing must be done after the source 701 * is cleared, thus after the handler has run. OMAP4 needs this done 702 * after enabing the interrupt to clear the wakeup status. 703 */ 704 if (bank->regs->leveldetect0 && bank->regs->wkup_en && 705 trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 706 omap_clear_gpio_irqstatus(bank, offset); 707 708 if (trigger) 709 omap_set_gpio_triggering(bank, offset, trigger); 710 711 raw_spin_unlock_irqrestore(&bank->lock, flags); 712 } 713 714 static void omap_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p) 715 { 716 struct gpio_bank *bank = omap_irq_data_get_bank(d); 717 718 seq_puts(p, dev_name(bank->dev)); 719 } 720 721 static const struct irq_chip omap_gpio_irq_chip = { 722 .irq_startup = omap_gpio_irq_startup, 723 .irq_shutdown = omap_gpio_irq_shutdown, 724 .irq_mask = omap_gpio_mask_irq, 725 .irq_unmask = omap_gpio_unmask_irq, 726 .irq_set_type = omap_gpio_irq_type, 727 .irq_set_wake = omap_gpio_wake_enable, 728 .irq_bus_lock = omap_gpio_irq_bus_lock, 729 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 730 .irq_print_chip = omap_gpio_irq_print_chip, 731 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 732 GPIOCHIP_IRQ_RESOURCE_HELPERS, 733 }; 734 735 static const struct irq_chip omap_gpio_irq_chip_nowake = { 736 .irq_startup = omap_gpio_irq_startup, 737 .irq_shutdown = omap_gpio_irq_shutdown, 738 .irq_mask = omap_gpio_mask_irq, 739 .irq_unmask = omap_gpio_unmask_irq, 740 .irq_set_type = omap_gpio_irq_type, 741 .irq_bus_lock = omap_gpio_irq_bus_lock, 742 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 743 .irq_print_chip = omap_gpio_irq_print_chip, 744 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 745 GPIOCHIP_IRQ_RESOURCE_HELPERS, 746 }; 747 748 /*---------------------------------------------------------------------*/ 749 750 static int omap_mpuio_suspend_noirq(struct device *dev) 751 { 752 struct gpio_bank *bank = dev_get_drvdata(dev); 753 void __iomem *mask_reg = bank->base + 754 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 755 unsigned long flags; 756 757 raw_spin_lock_irqsave(&bank->lock, flags); 758 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 759 raw_spin_unlock_irqrestore(&bank->lock, flags); 760 761 return 0; 762 } 763 764 static int omap_mpuio_resume_noirq(struct device *dev) 765 { 766 struct gpio_bank *bank = dev_get_drvdata(dev); 767 void __iomem *mask_reg = bank->base + 768 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 769 unsigned long flags; 770 771 raw_spin_lock_irqsave(&bank->lock, flags); 772 writel_relaxed(bank->context.wake_en, mask_reg); 773 raw_spin_unlock_irqrestore(&bank->lock, flags); 774 775 return 0; 776 } 777 778 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 779 .suspend_noirq = omap_mpuio_suspend_noirq, 780 .resume_noirq = omap_mpuio_resume_noirq, 781 }; 782 783 /* use platform_driver for this. */ 784 static struct platform_driver omap_mpuio_driver = { 785 .driver = { 786 .name = "mpuio", 787 .pm = &omap_mpuio_dev_pm_ops, 788 }, 789 }; 790 791 static struct platform_device omap_mpuio_device = { 792 .name = "mpuio", 793 .id = -1, 794 .dev = { 795 .driver = &omap_mpuio_driver.driver, 796 } 797 /* could list the /proc/iomem resources */ 798 }; 799 800 static inline void omap_mpuio_init(struct gpio_bank *bank) 801 { 802 platform_set_drvdata(&omap_mpuio_device, bank); 803 804 if (platform_driver_register(&omap_mpuio_driver) == 0) 805 (void) platform_device_register(&omap_mpuio_device); 806 } 807 808 /*---------------------------------------------------------------------*/ 809 810 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 811 { 812 struct gpio_bank *bank = gpiochip_get_data(chip); 813 unsigned long flags; 814 815 pm_runtime_get_sync(chip->parent); 816 817 raw_spin_lock_irqsave(&bank->lock, flags); 818 omap_enable_gpio_module(bank, offset); 819 bank->mod_usage |= BIT(offset); 820 raw_spin_unlock_irqrestore(&bank->lock, flags); 821 822 return 0; 823 } 824 825 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 826 { 827 struct gpio_bank *bank = gpiochip_get_data(chip); 828 unsigned long flags; 829 830 raw_spin_lock_irqsave(&bank->lock, flags); 831 bank->mod_usage &= ~(BIT(offset)); 832 if (!LINE_USED(bank->irq_usage, offset)) { 833 omap_set_gpio_direction(bank, offset, 1); 834 omap_clear_gpio_debounce(bank, offset); 835 } 836 omap_disable_gpio_module(bank, offset); 837 raw_spin_unlock_irqrestore(&bank->lock, flags); 838 839 pm_runtime_put(chip->parent); 840 } 841 842 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 843 { 844 struct gpio_bank *bank = gpiochip_get_data(chip); 845 846 if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset)) 847 return GPIO_LINE_DIRECTION_IN; 848 849 return GPIO_LINE_DIRECTION_OUT; 850 } 851 852 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 853 { 854 struct gpio_bank *bank; 855 unsigned long flags; 856 857 bank = gpiochip_get_data(chip); 858 raw_spin_lock_irqsave(&bank->lock, flags); 859 omap_set_gpio_direction(bank, offset, 1); 860 raw_spin_unlock_irqrestore(&bank->lock, flags); 861 return 0; 862 } 863 864 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 865 { 866 struct gpio_bank *bank = gpiochip_get_data(chip); 867 void __iomem *reg; 868 869 if (omap_gpio_is_input(bank, offset)) 870 reg = bank->base + bank->regs->datain; 871 else 872 reg = bank->base + bank->regs->dataout; 873 874 return (readl_relaxed(reg) & BIT(offset)) != 0; 875 } 876 877 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 878 { 879 struct gpio_bank *bank; 880 unsigned long flags; 881 882 bank = gpiochip_get_data(chip); 883 raw_spin_lock_irqsave(&bank->lock, flags); 884 bank->set_dataout(bank, offset, value); 885 omap_set_gpio_direction(bank, offset, 0); 886 raw_spin_unlock_irqrestore(&bank->lock, flags); 887 return 0; 888 } 889 890 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, 891 unsigned long *bits) 892 { 893 struct gpio_bank *bank = gpiochip_get_data(chip); 894 void __iomem *base = bank->base; 895 u32 direction, m, val = 0; 896 897 direction = readl_relaxed(base + bank->regs->direction); 898 899 m = direction & *mask; 900 if (m) 901 val |= readl_relaxed(base + bank->regs->datain) & m; 902 903 m = ~direction & *mask; 904 if (m) 905 val |= readl_relaxed(base + bank->regs->dataout) & m; 906 907 *bits = val; 908 909 return 0; 910 } 911 912 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 913 unsigned debounce) 914 { 915 struct gpio_bank *bank; 916 unsigned long flags; 917 int ret; 918 919 bank = gpiochip_get_data(chip); 920 921 raw_spin_lock_irqsave(&bank->lock, flags); 922 ret = omap2_set_gpio_debounce(bank, offset, debounce); 923 raw_spin_unlock_irqrestore(&bank->lock, flags); 924 925 if (ret) 926 dev_info(chip->parent, 927 "Could not set line %u debounce to %u microseconds (%d)", 928 offset, debounce, ret); 929 930 return ret; 931 } 932 933 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, 934 unsigned long config) 935 { 936 u32 debounce; 937 int ret = -ENOTSUPP; 938 939 switch (pinconf_to_config_param(config)) { 940 case PIN_CONFIG_BIAS_DISABLE: 941 case PIN_CONFIG_BIAS_PULL_UP: 942 case PIN_CONFIG_BIAS_PULL_DOWN: 943 ret = gpiochip_generic_config(chip, offset, config); 944 break; 945 case PIN_CONFIG_INPUT_DEBOUNCE: 946 debounce = pinconf_to_config_argument(config); 947 ret = omap_gpio_debounce(chip, offset, debounce); 948 break; 949 default: 950 break; 951 } 952 953 return ret; 954 } 955 956 static int omap_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) 957 { 958 struct gpio_bank *bank; 959 unsigned long flags; 960 961 bank = gpiochip_get_data(chip); 962 raw_spin_lock_irqsave(&bank->lock, flags); 963 bank->set_dataout(bank, offset, value); 964 raw_spin_unlock_irqrestore(&bank->lock, flags); 965 966 return 0; 967 } 968 969 static int omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, 970 unsigned long *bits) 971 { 972 struct gpio_bank *bank = gpiochip_get_data(chip); 973 void __iomem *reg = bank->base + bank->regs->dataout; 974 unsigned long flags; 975 u32 l; 976 977 raw_spin_lock_irqsave(&bank->lock, flags); 978 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); 979 writel_relaxed(l, reg); 980 bank->context.dataout = l; 981 raw_spin_unlock_irqrestore(&bank->lock, flags); 982 983 return 0; 984 } 985 986 /*---------------------------------------------------------------------*/ 987 988 static void omap_gpio_show_rev(struct gpio_bank *bank) 989 { 990 static bool called; 991 u32 rev; 992 993 if (called || bank->regs->revision == USHRT_MAX) 994 return; 995 996 rev = readw_relaxed(bank->base + bank->regs->revision); 997 pr_info("OMAP GPIO hardware version %d.%d\n", 998 (rev >> 4) & 0x0f, rev & 0x0f); 999 1000 called = true; 1001 } 1002 1003 static void omap_gpio_mod_init(struct gpio_bank *bank) 1004 { 1005 void __iomem *base = bank->base; 1006 u32 l = 0xffffffff; 1007 1008 if (bank->width == 16) 1009 l = 0xffff; 1010 1011 if (bank->is_mpuio) { 1012 writel_relaxed(l, bank->base + bank->regs->irqenable); 1013 return; 1014 } 1015 1016 omap_gpio_rmw(base + bank->regs->irqenable, l, 1017 bank->regs->irqenable_inv); 1018 omap_gpio_rmw(base + bank->regs->irqstatus, l, 1019 !bank->regs->irqenable_inv); 1020 if (bank->regs->debounce_en) 1021 writel_relaxed(0, base + bank->regs->debounce_en); 1022 1023 /* Save OE default value (0xffffffff) in the context */ 1024 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1025 /* Initialize interface clk ungated, module enabled */ 1026 if (bank->regs->ctrl) 1027 writel_relaxed(0, base + bank->regs->ctrl); 1028 } 1029 1030 static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev) 1031 { 1032 struct gpio_irq_chip *irq; 1033 static int gpio; 1034 const char *label; 1035 int ret; 1036 1037 /* 1038 * REVISIT eventually switch from OMAP-specific gpio structs 1039 * over to the generic ones 1040 */ 1041 bank->chip.request = omap_gpio_request; 1042 bank->chip.free = omap_gpio_free; 1043 bank->chip.get_direction = omap_gpio_get_direction; 1044 bank->chip.direction_input = omap_gpio_input; 1045 bank->chip.get = omap_gpio_get; 1046 bank->chip.get_multiple = omap_gpio_get_multiple; 1047 bank->chip.direction_output = omap_gpio_output; 1048 bank->chip.set_config = omap_gpio_set_config; 1049 bank->chip.set_rv = omap_gpio_set; 1050 bank->chip.set_multiple_rv = omap_gpio_set_multiple; 1051 if (bank->is_mpuio) { 1052 bank->chip.label = "mpuio"; 1053 if (bank->regs->wkup_en) 1054 bank->chip.parent = &omap_mpuio_device.dev; 1055 } else { 1056 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d", 1057 gpio, gpio + bank->width - 1); 1058 if (!label) 1059 return -ENOMEM; 1060 bank->chip.label = label; 1061 } 1062 bank->chip.base = -1; 1063 bank->chip.ngpio = bank->width; 1064 1065 irq = &bank->chip.irq; 1066 /* MPUIO is a bit different, reading IRQ status clears it */ 1067 if (bank->is_mpuio && !bank->regs->wkup_en) 1068 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip_nowake); 1069 else 1070 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip); 1071 irq->handler = handle_bad_irq; 1072 irq->default_type = IRQ_TYPE_NONE; 1073 irq->num_parents = 1; 1074 irq->parents = &bank->irq; 1075 1076 ret = gpiochip_add_data(&bank->chip, bank); 1077 if (ret) 1078 return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n"); 1079 1080 irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev); 1081 ret = devm_request_irq(bank->chip.parent, bank->irq, 1082 omap_gpio_irq_handler, 1083 0, dev_name(bank->chip.parent), bank); 1084 if (ret) 1085 gpiochip_remove(&bank->chip); 1086 1087 if (!bank->is_mpuio) 1088 gpio += bank->width; 1089 1090 return ret; 1091 } 1092 1093 static void omap_gpio_init_context(struct gpio_bank *p) 1094 { 1095 const struct omap_gpio_reg_offs *regs = p->regs; 1096 void __iomem *base = p->base; 1097 1098 p->context.sysconfig = readl_relaxed(base + regs->sysconfig); 1099 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1100 p->context.oe = readl_relaxed(base + regs->direction); 1101 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1102 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1103 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1104 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1105 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1106 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1107 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1108 p->context.dataout = readl_relaxed(base + regs->dataout); 1109 1110 p->context_valid = true; 1111 } 1112 1113 static void omap_gpio_restore_context(struct gpio_bank *bank) 1114 { 1115 const struct omap_gpio_reg_offs *regs = bank->regs; 1116 void __iomem *base = bank->base; 1117 1118 writel_relaxed(bank->context.sysconfig, base + regs->sysconfig); 1119 writel_relaxed(bank->context.wake_en, base + regs->wkup_en); 1120 writel_relaxed(bank->context.ctrl, base + regs->ctrl); 1121 writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); 1122 writel_relaxed(bank->context.leveldetect1, base + regs->leveldetect1); 1123 writel_relaxed(bank->context.risingdetect, base + regs->risingdetect); 1124 writel_relaxed(bank->context.fallingdetect, base + regs->fallingdetect); 1125 writel_relaxed(bank->context.dataout, base + regs->dataout); 1126 writel_relaxed(bank->context.oe, base + regs->direction); 1127 1128 if (bank->dbck_enable_mask) { 1129 writel_relaxed(bank->context.debounce, base + regs->debounce); 1130 writel_relaxed(bank->context.debounce_en, 1131 base + regs->debounce_en); 1132 } 1133 1134 writel_relaxed(bank->context.irqenable1, base + regs->irqenable); 1135 writel_relaxed(bank->context.irqenable2, base + regs->irqenable2); 1136 } 1137 1138 static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) 1139 { 1140 struct device *dev = bank->chip.parent; 1141 void __iomem *base = bank->base; 1142 u32 mask, nowake; 1143 1144 bank->saved_datain = readl_relaxed(base + bank->regs->datain); 1145 1146 /* Save syconfig, it's runtime value can be different from init value */ 1147 if (bank->loses_context) 1148 bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig); 1149 1150 if (!bank->enabled_non_wakeup_gpios) 1151 goto update_gpio_context_count; 1152 1153 /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */ 1154 mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect; 1155 mask &= ~bank->context.risingdetect; 1156 bank->saved_datain |= mask; 1157 1158 /* Check for pending EDGE_RISING, ignore EDGE_BOTH */ 1159 mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect; 1160 mask &= ~bank->context.fallingdetect; 1161 bank->saved_datain &= ~mask; 1162 1163 if (!may_lose_context) 1164 goto update_gpio_context_count; 1165 1166 /* 1167 * If going to OFF, remove triggering for all wkup domain 1168 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1169 * generated. See OMAP2420 Errata item 1.101. 1170 */ 1171 if (!bank->loses_context && bank->enabled_non_wakeup_gpios) { 1172 nowake = bank->enabled_non_wakeup_gpios; 1173 omap_gpio_rmw(base + bank->regs->fallingdetect, nowake, ~nowake); 1174 omap_gpio_rmw(base + bank->regs->risingdetect, nowake, ~nowake); 1175 } 1176 1177 update_gpio_context_count: 1178 if (bank->get_context_loss_count) 1179 bank->context_loss_count = 1180 bank->get_context_loss_count(dev); 1181 1182 omap_gpio_dbck_disable(bank); 1183 } 1184 1185 static void omap_gpio_unidle(struct gpio_bank *bank) 1186 { 1187 struct device *dev = bank->chip.parent; 1188 u32 l = 0, gen, gen0, gen1; 1189 int c; 1190 1191 /* 1192 * On the first resume during the probe, the context has not 1193 * been initialised and so initialise it now. Also initialise 1194 * the context loss count. 1195 */ 1196 if (bank->loses_context && !bank->context_valid) { 1197 omap_gpio_init_context(bank); 1198 1199 if (bank->get_context_loss_count) 1200 bank->context_loss_count = 1201 bank->get_context_loss_count(dev); 1202 } 1203 1204 omap_gpio_dbck_enable(bank); 1205 1206 if (bank->loses_context) { 1207 if (!bank->get_context_loss_count) { 1208 omap_gpio_restore_context(bank); 1209 } else { 1210 c = bank->get_context_loss_count(dev); 1211 if (c != bank->context_loss_count) { 1212 omap_gpio_restore_context(bank); 1213 } else { 1214 return; 1215 } 1216 } 1217 } else { 1218 /* Restore changes done for OMAP2420 errata 1.101 */ 1219 writel_relaxed(bank->context.fallingdetect, 1220 bank->base + bank->regs->fallingdetect); 1221 writel_relaxed(bank->context.risingdetect, 1222 bank->base + bank->regs->risingdetect); 1223 } 1224 1225 l = readl_relaxed(bank->base + bank->regs->datain); 1226 1227 /* 1228 * Check if any of the non-wakeup interrupt GPIOs have changed 1229 * state. If so, generate an IRQ by software. This is 1230 * horribly racy, but it's the best we can do to work around 1231 * this silicon bug. 1232 */ 1233 l ^= bank->saved_datain; 1234 l &= bank->enabled_non_wakeup_gpios; 1235 1236 /* 1237 * No need to generate IRQs for the rising edge for gpio IRQs 1238 * configured with falling edge only; and vice versa. 1239 */ 1240 gen0 = l & bank->context.fallingdetect; 1241 gen0 &= bank->saved_datain; 1242 1243 gen1 = l & bank->context.risingdetect; 1244 gen1 &= ~(bank->saved_datain); 1245 1246 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1247 gen = l & (~(bank->context.fallingdetect) & 1248 ~(bank->context.risingdetect)); 1249 /* Consider all GPIO IRQs needed to be updated */ 1250 gen |= gen0 | gen1; 1251 1252 if (gen) { 1253 u32 old0, old1; 1254 1255 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1256 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1257 1258 if (!bank->regs->irqstatus_raw0) { 1259 writel_relaxed(old0 | gen, bank->base + 1260 bank->regs->leveldetect0); 1261 writel_relaxed(old1 | gen, bank->base + 1262 bank->regs->leveldetect1); 1263 } 1264 1265 if (bank->regs->irqstatus_raw0) { 1266 writel_relaxed(old0 | l, bank->base + 1267 bank->regs->leveldetect0); 1268 writel_relaxed(old1 | l, bank->base + 1269 bank->regs->leveldetect1); 1270 } 1271 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1272 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1273 } 1274 } 1275 1276 static int gpio_omap_cpu_notifier(struct notifier_block *nb, 1277 unsigned long cmd, void *v) 1278 { 1279 struct gpio_bank *bank; 1280 unsigned long flags; 1281 int ret = NOTIFY_OK; 1282 u32 isr, mask; 1283 1284 bank = container_of(nb, struct gpio_bank, nb); 1285 1286 raw_spin_lock_irqsave(&bank->lock, flags); 1287 if (bank->is_suspended) 1288 goto out_unlock; 1289 1290 switch (cmd) { 1291 case CPU_CLUSTER_PM_ENTER: 1292 mask = omap_get_gpio_irqbank_mask(bank); 1293 isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask; 1294 if (isr) { 1295 ret = NOTIFY_BAD; 1296 break; 1297 } 1298 omap_gpio_idle(bank, true); 1299 break; 1300 case CPU_CLUSTER_PM_ENTER_FAILED: 1301 case CPU_CLUSTER_PM_EXIT: 1302 omap_gpio_unidle(bank); 1303 break; 1304 } 1305 1306 out_unlock: 1307 raw_spin_unlock_irqrestore(&bank->lock, flags); 1308 1309 return ret; 1310 } 1311 1312 static const struct omap_gpio_reg_offs omap2_gpio_regs = { 1313 .revision = OMAP24XX_GPIO_REVISION, 1314 .sysconfig = OMAP24XX_GPIO_SYSCONFIG, 1315 .direction = OMAP24XX_GPIO_OE, 1316 .datain = OMAP24XX_GPIO_DATAIN, 1317 .dataout = OMAP24XX_GPIO_DATAOUT, 1318 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1319 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1320 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1321 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1322 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1323 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1324 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1325 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1326 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1327 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1328 .ctrl = OMAP24XX_GPIO_CTRL, 1329 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1330 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1331 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1332 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1333 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1334 }; 1335 1336 static const struct omap_gpio_reg_offs omap4_gpio_regs = { 1337 .revision = OMAP4_GPIO_REVISION, 1338 .sysconfig = OMAP4_GPIO_SYSCONFIG, 1339 .direction = OMAP4_GPIO_OE, 1340 .datain = OMAP4_GPIO_DATAIN, 1341 .dataout = OMAP4_GPIO_DATAOUT, 1342 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1343 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1344 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1345 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1346 .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0, 1347 .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1, 1348 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1349 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1350 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1351 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1352 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1353 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1354 .ctrl = OMAP4_GPIO_CTRL, 1355 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1356 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1357 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1358 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1359 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1360 }; 1361 1362 static const struct omap_gpio_platform_data omap2_pdata = { 1363 .regs = &omap2_gpio_regs, 1364 .bank_width = 32, 1365 .dbck_flag = false, 1366 }; 1367 1368 static const struct omap_gpio_platform_data omap3_pdata = { 1369 .regs = &omap2_gpio_regs, 1370 .bank_width = 32, 1371 .dbck_flag = true, 1372 }; 1373 1374 static const struct omap_gpio_platform_data omap4_pdata = { 1375 .regs = &omap4_gpio_regs, 1376 .bank_width = 32, 1377 .dbck_flag = true, 1378 }; 1379 1380 static const struct of_device_id omap_gpio_match[] = { 1381 { 1382 .compatible = "ti,omap4-gpio", 1383 .data = &omap4_pdata, 1384 }, 1385 { 1386 .compatible = "ti,omap3-gpio", 1387 .data = &omap3_pdata, 1388 }, 1389 { 1390 .compatible = "ti,omap2-gpio", 1391 .data = &omap2_pdata, 1392 }, 1393 { }, 1394 }; 1395 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1396 1397 static int omap_gpio_probe(struct platform_device *pdev) 1398 { 1399 struct device *dev = &pdev->dev; 1400 struct device_node *node = dev->of_node; 1401 const struct omap_gpio_platform_data *pdata; 1402 struct gpio_bank *bank; 1403 int ret; 1404 1405 pdata = device_get_match_data(dev); 1406 1407 pdata = pdata ?: dev_get_platdata(dev); 1408 if (!pdata) 1409 return -EINVAL; 1410 1411 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); 1412 if (!bank) 1413 return -ENOMEM; 1414 1415 bank->dev = dev; 1416 1417 bank->irq = platform_get_irq(pdev, 0); 1418 if (bank->irq < 0) 1419 return bank->irq; 1420 1421 bank->chip.parent = dev; 1422 bank->chip.owner = THIS_MODULE; 1423 bank->dbck_flag = pdata->dbck_flag; 1424 bank->stride = pdata->bank_stride; 1425 bank->width = pdata->bank_width; 1426 bank->is_mpuio = pdata->is_mpuio; 1427 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1428 bank->regs = pdata->regs; 1429 1430 if (node) { 1431 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1432 bank->loses_context = true; 1433 } else { 1434 bank->loses_context = pdata->loses_context; 1435 1436 if (bank->loses_context) 1437 bank->get_context_loss_count = 1438 pdata->get_context_loss_count; 1439 } 1440 1441 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1442 bank->set_dataout = omap_set_gpio_dataout_reg; 1443 else 1444 bank->set_dataout = omap_set_gpio_dataout_mask; 1445 1446 raw_spin_lock_init(&bank->lock); 1447 raw_spin_lock_init(&bank->wa_lock); 1448 1449 /* Static mapping, never released */ 1450 bank->base = devm_platform_ioremap_resource(pdev, 0); 1451 if (IS_ERR(bank->base)) { 1452 return PTR_ERR(bank->base); 1453 } 1454 1455 if (bank->dbck_flag) { 1456 bank->dbck = devm_clk_get(dev, "dbclk"); 1457 if (IS_ERR(bank->dbck)) { 1458 dev_err(dev, 1459 "Could not get gpio dbck. Disable debounce\n"); 1460 bank->dbck_flag = false; 1461 } else { 1462 clk_prepare(bank->dbck); 1463 } 1464 } 1465 1466 platform_set_drvdata(pdev, bank); 1467 1468 pm_runtime_enable(dev); 1469 pm_runtime_get_sync(dev); 1470 1471 if (bank->is_mpuio) 1472 omap_mpuio_init(bank); 1473 1474 omap_gpio_mod_init(bank); 1475 1476 ret = omap_gpio_chip_init(bank, dev); 1477 if (ret) { 1478 pm_runtime_put_sync(dev); 1479 pm_runtime_disable(dev); 1480 if (bank->dbck_flag) 1481 clk_unprepare(bank->dbck); 1482 return ret; 1483 } 1484 1485 omap_gpio_show_rev(bank); 1486 1487 bank->nb.notifier_call = gpio_omap_cpu_notifier; 1488 cpu_pm_register_notifier(&bank->nb); 1489 1490 pm_runtime_put(dev); 1491 1492 return 0; 1493 } 1494 1495 static void omap_gpio_remove(struct platform_device *pdev) 1496 { 1497 struct gpio_bank *bank = platform_get_drvdata(pdev); 1498 1499 cpu_pm_unregister_notifier(&bank->nb); 1500 gpiochip_remove(&bank->chip); 1501 pm_runtime_disable(&pdev->dev); 1502 if (bank->dbck_flag) 1503 clk_unprepare(bank->dbck); 1504 } 1505 1506 static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev) 1507 { 1508 struct gpio_bank *bank = dev_get_drvdata(dev); 1509 unsigned long flags; 1510 1511 raw_spin_lock_irqsave(&bank->lock, flags); 1512 omap_gpio_idle(bank, true); 1513 bank->is_suspended = true; 1514 raw_spin_unlock_irqrestore(&bank->lock, flags); 1515 1516 return 0; 1517 } 1518 1519 static int __maybe_unused omap_gpio_runtime_resume(struct device *dev) 1520 { 1521 struct gpio_bank *bank = dev_get_drvdata(dev); 1522 unsigned long flags; 1523 1524 raw_spin_lock_irqsave(&bank->lock, flags); 1525 omap_gpio_unidle(bank); 1526 bank->is_suspended = false; 1527 raw_spin_unlock_irqrestore(&bank->lock, flags); 1528 1529 return 0; 1530 } 1531 1532 static int __maybe_unused omap_gpio_suspend(struct device *dev) 1533 { 1534 struct gpio_bank *bank = dev_get_drvdata(dev); 1535 1536 if (bank->is_suspended) 1537 return 0; 1538 1539 bank->needs_resume = 1; 1540 1541 return omap_gpio_runtime_suspend(dev); 1542 } 1543 1544 static int __maybe_unused omap_gpio_resume(struct device *dev) 1545 { 1546 struct gpio_bank *bank = dev_get_drvdata(dev); 1547 1548 if (!bank->needs_resume) 1549 return 0; 1550 1551 bank->needs_resume = 0; 1552 1553 return omap_gpio_runtime_resume(dev); 1554 } 1555 1556 static const struct dev_pm_ops gpio_pm_ops = { 1557 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1558 NULL) 1559 SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume) 1560 }; 1561 1562 static struct platform_driver omap_gpio_driver = { 1563 .probe = omap_gpio_probe, 1564 .remove = omap_gpio_remove, 1565 .driver = { 1566 .name = "omap_gpio", 1567 .pm = &gpio_pm_ops, 1568 .of_match_table = omap_gpio_match, 1569 }, 1570 }; 1571 1572 /* 1573 * gpio driver register needs to be done before 1574 * machine_init functions access gpio APIs. 1575 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1576 */ 1577 static int __init omap_gpio_drv_reg(void) 1578 { 1579 return platform_driver_register(&omap_gpio_driver); 1580 } 1581 postcore_initcall(omap_gpio_drv_reg); 1582 1583 static void __exit omap_gpio_exit(void) 1584 { 1585 platform_driver_unregister(&omap_gpio_driver); 1586 } 1587 module_exit(omap_gpio_exit); 1588 1589 MODULE_DESCRIPTION("omap gpio driver"); 1590 MODULE_ALIAS("platform:gpio-omap"); 1591 MODULE_LICENSE("GPL v2"); 1592