1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Support functions for OMAP GPIO 4 * 5 * Copyright (C) 2003-2005 Nokia Corporation 6 * Written by Juha Yrjölä <juha.yrjola@nokia.com> 7 * 8 * Copyright (C) 2009 Texas Instruments 9 * Added OMAP4 support - Santosh Shilimkar <santosh.shilimkar@ti.com> 10 */ 11 12 #include <linux/init.h> 13 #include <linux/module.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/syscore_ops.h> 17 #include <linux/err.h> 18 #include <linux/clk.h> 19 #include <linux/io.h> 20 #include <linux/cpu_pm.h> 21 #include <linux/device.h> 22 #include <linux/pm_runtime.h> 23 #include <linux/pm.h> 24 #include <linux/of.h> 25 #include <linux/gpio/driver.h> 26 #include <linux/bitops.h> 27 #include <linux/platform_data/gpio-omap.h> 28 29 #define OMAP4_GPIO_DEBOUNCINGTIME_MASK 0xFF 30 31 struct gpio_regs { 32 u32 sysconfig; 33 u32 irqenable1; 34 u32 irqenable2; 35 u32 wake_en; 36 u32 ctrl; 37 u32 oe; 38 u32 leveldetect0; 39 u32 leveldetect1; 40 u32 risingdetect; 41 u32 fallingdetect; 42 u32 dataout; 43 u32 debounce; 44 u32 debounce_en; 45 }; 46 47 struct gpio_bank { 48 void __iomem *base; 49 const struct omap_gpio_reg_offs *regs; 50 struct device *dev; 51 52 int irq; 53 u32 non_wakeup_gpios; 54 u32 enabled_non_wakeup_gpios; 55 struct gpio_regs context; 56 u32 saved_datain; 57 u32 level_mask; 58 u32 toggle_mask; 59 raw_spinlock_t lock; 60 raw_spinlock_t wa_lock; 61 struct gpio_chip chip; 62 struct clk *dbck; 63 struct notifier_block nb; 64 unsigned int is_suspended:1; 65 unsigned int needs_resume:1; 66 u32 mod_usage; 67 u32 irq_usage; 68 u32 dbck_enable_mask; 69 bool dbck_enabled; 70 bool is_mpuio; 71 bool dbck_flag; 72 bool loses_context; 73 bool context_valid; 74 int stride; 75 u32 width; 76 int context_loss_count; 77 78 void (*set_dataout)(struct gpio_bank *bank, unsigned gpio, int enable); 79 int (*get_context_loss_count)(struct device *dev); 80 }; 81 82 #define GPIO_MOD_CTRL_BIT BIT(0) 83 84 #define BANK_USED(bank) (bank->mod_usage || bank->irq_usage) 85 #define LINE_USED(line, offset) (line & (BIT(offset))) 86 87 static void omap_gpio_unmask_irq(struct irq_data *d); 88 89 static inline struct gpio_bank *omap_irq_data_get_bank(struct irq_data *d) 90 { 91 struct gpio_chip *chip = irq_data_get_irq_chip_data(d); 92 return gpiochip_get_data(chip); 93 } 94 95 static inline u32 omap_gpio_rmw(void __iomem *reg, u32 mask, bool set) 96 { 97 u32 val = readl_relaxed(reg); 98 99 if (set) 100 val |= mask; 101 else 102 val &= ~mask; 103 104 writel_relaxed(val, reg); 105 106 return val; 107 } 108 109 static void omap_set_gpio_direction(struct gpio_bank *bank, int gpio, 110 int is_input) 111 { 112 bank->context.oe = omap_gpio_rmw(bank->base + bank->regs->direction, 113 BIT(gpio), is_input); 114 } 115 116 117 /* set data out value using dedicate set/clear register */ 118 static void omap_set_gpio_dataout_reg(struct gpio_bank *bank, unsigned offset, 119 int enable) 120 { 121 void __iomem *reg = bank->base; 122 u32 l = BIT(offset); 123 124 if (enable) { 125 reg += bank->regs->set_dataout; 126 bank->context.dataout |= l; 127 } else { 128 reg += bank->regs->clr_dataout; 129 bank->context.dataout &= ~l; 130 } 131 132 writel_relaxed(l, reg); 133 } 134 135 /* set data out value using mask register */ 136 static void omap_set_gpio_dataout_mask(struct gpio_bank *bank, unsigned offset, 137 int enable) 138 { 139 bank->context.dataout = omap_gpio_rmw(bank->base + bank->regs->dataout, 140 BIT(offset), enable); 141 } 142 143 static inline void omap_gpio_dbck_enable(struct gpio_bank *bank) 144 { 145 if (bank->dbck_enable_mask && !bank->dbck_enabled) { 146 clk_enable(bank->dbck); 147 bank->dbck_enabled = true; 148 149 writel_relaxed(bank->dbck_enable_mask, 150 bank->base + bank->regs->debounce_en); 151 } 152 } 153 154 static inline void omap_gpio_dbck_disable(struct gpio_bank *bank) 155 { 156 if (bank->dbck_enable_mask && bank->dbck_enabled) { 157 /* 158 * Disable debounce before cutting it's clock. If debounce is 159 * enabled but the clock is not, GPIO module seems to be unable 160 * to detect events and generate interrupts at least on OMAP3. 161 */ 162 writel_relaxed(0, bank->base + bank->regs->debounce_en); 163 164 clk_disable(bank->dbck); 165 bank->dbck_enabled = false; 166 } 167 } 168 169 /** 170 * omap2_set_gpio_debounce - low level gpio debounce time 171 * @bank: the gpio bank we're acting upon 172 * @offset: the gpio number on this @bank 173 * @debounce: debounce time to use 174 * 175 * OMAP's debounce time is in 31us steps 176 * <debounce time> = (GPIO_DEBOUNCINGTIME[7:0].DEBOUNCETIME + 1) x 31 177 * so we need to convert and round up to the closest unit. 178 * 179 * Return: 0 on success, negative error otherwise. 180 */ 181 static int omap2_set_gpio_debounce(struct gpio_bank *bank, unsigned offset, 182 unsigned debounce) 183 { 184 u32 val; 185 u32 l; 186 bool enable = !!debounce; 187 188 if (!bank->dbck_flag) 189 return -ENOTSUPP; 190 191 if (enable) { 192 debounce = DIV_ROUND_UP(debounce, 31) - 1; 193 if ((debounce & OMAP4_GPIO_DEBOUNCINGTIME_MASK) != debounce) 194 return -EINVAL; 195 } 196 197 l = BIT(offset); 198 199 clk_enable(bank->dbck); 200 writel_relaxed(debounce, bank->base + bank->regs->debounce); 201 202 val = omap_gpio_rmw(bank->base + bank->regs->debounce_en, l, enable); 203 bank->dbck_enable_mask = val; 204 205 clk_disable(bank->dbck); 206 /* 207 * Enable debounce clock per module. 208 * This call is mandatory because in omap_gpio_request() when 209 * *_runtime_get_sync() is called, _gpio_dbck_enable() within 210 * runtime callbck fails to turn on dbck because dbck_enable_mask 211 * used within _gpio_dbck_enable() is still not initialized at 212 * that point. Therefore we have to enable dbck here. 213 */ 214 omap_gpio_dbck_enable(bank); 215 if (bank->dbck_enable_mask) { 216 bank->context.debounce = debounce; 217 bank->context.debounce_en = val; 218 } 219 220 return 0; 221 } 222 223 /** 224 * omap_clear_gpio_debounce - clear debounce settings for a gpio 225 * @bank: the gpio bank we're acting upon 226 * @offset: the gpio number on this @bank 227 * 228 * If a gpio is using debounce, then clear the debounce enable bit and if 229 * this is the only gpio in this bank using debounce, then clear the debounce 230 * time too. The debounce clock will also be disabled when calling this function 231 * if this is the only gpio in the bank using debounce. 232 */ 233 static void omap_clear_gpio_debounce(struct gpio_bank *bank, unsigned offset) 234 { 235 u32 gpio_bit = BIT(offset); 236 237 if (!bank->dbck_flag) 238 return; 239 240 if (!(bank->dbck_enable_mask & gpio_bit)) 241 return; 242 243 bank->dbck_enable_mask &= ~gpio_bit; 244 bank->context.debounce_en &= ~gpio_bit; 245 writel_relaxed(bank->context.debounce_en, 246 bank->base + bank->regs->debounce_en); 247 248 if (!bank->dbck_enable_mask) { 249 bank->context.debounce = 0; 250 writel_relaxed(bank->context.debounce, bank->base + 251 bank->regs->debounce); 252 clk_disable(bank->dbck); 253 bank->dbck_enabled = false; 254 } 255 } 256 257 /* 258 * Off mode wake-up capable GPIOs in bank(s) that are in the wakeup domain. 259 * See TRM section for GPIO for "Wake-Up Generation" for the list of GPIOs 260 * in wakeup domain. If bank->non_wakeup_gpios is not configured, assume none 261 * are capable waking up the system from off mode. 262 */ 263 static bool omap_gpio_is_off_wakeup_capable(struct gpio_bank *bank, u32 gpio_mask) 264 { 265 u32 no_wake = bank->non_wakeup_gpios; 266 267 if (no_wake) 268 return !!(~no_wake & gpio_mask); 269 270 return false; 271 } 272 273 static inline void omap_set_gpio_trigger(struct gpio_bank *bank, int gpio, 274 unsigned trigger) 275 { 276 void __iomem *base = bank->base; 277 u32 gpio_bit = BIT(gpio); 278 279 omap_gpio_rmw(base + bank->regs->leveldetect0, gpio_bit, 280 trigger & IRQ_TYPE_LEVEL_LOW); 281 omap_gpio_rmw(base + bank->regs->leveldetect1, gpio_bit, 282 trigger & IRQ_TYPE_LEVEL_HIGH); 283 284 /* 285 * We need the edge detection enabled for to allow the GPIO block 286 * to be woken from idle state. Set the appropriate edge detection 287 * in addition to the level detection. 288 */ 289 omap_gpio_rmw(base + bank->regs->risingdetect, gpio_bit, 290 trigger & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)); 291 omap_gpio_rmw(base + bank->regs->fallingdetect, gpio_bit, 292 trigger & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW)); 293 294 bank->context.leveldetect0 = 295 readl_relaxed(bank->base + bank->regs->leveldetect0); 296 bank->context.leveldetect1 = 297 readl_relaxed(bank->base + bank->regs->leveldetect1); 298 bank->context.risingdetect = 299 readl_relaxed(bank->base + bank->regs->risingdetect); 300 bank->context.fallingdetect = 301 readl_relaxed(bank->base + bank->regs->fallingdetect); 302 303 bank->level_mask = bank->context.leveldetect0 | 304 bank->context.leveldetect1; 305 306 /* This part needs to be executed always for OMAP{34xx, 44xx} */ 307 if (!bank->regs->irqctrl && !omap_gpio_is_off_wakeup_capable(bank, gpio)) { 308 /* 309 * Log the edge gpio and manually trigger the IRQ 310 * after resume if the input level changes 311 * to avoid irq lost during PER RET/OFF mode 312 * Applies for omap2 non-wakeup gpio and all omap3 gpios 313 */ 314 if (trigger & IRQ_TYPE_EDGE_BOTH) 315 bank->enabled_non_wakeup_gpios |= gpio_bit; 316 else 317 bank->enabled_non_wakeup_gpios &= ~gpio_bit; 318 } 319 } 320 321 /* 322 * This only applies to chips that can't do both rising and falling edge 323 * detection at once. For all other chips, this function is a noop. 324 */ 325 static void omap_toggle_gpio_edge_triggering(struct gpio_bank *bank, int gpio) 326 { 327 if (IS_ENABLED(CONFIG_ARCH_OMAP1) && bank->regs->irqctrl) { 328 void __iomem *reg = bank->base + bank->regs->irqctrl; 329 330 writel_relaxed(readl_relaxed(reg) ^ BIT(gpio), reg); 331 } 332 } 333 334 static int omap_set_gpio_triggering(struct gpio_bank *bank, int gpio, 335 unsigned trigger) 336 { 337 void __iomem *reg = bank->base; 338 u32 l = 0; 339 340 if (bank->regs->leveldetect0 && bank->regs->wkup_en) { 341 omap_set_gpio_trigger(bank, gpio, trigger); 342 } else if (bank->regs->irqctrl) { 343 reg += bank->regs->irqctrl; 344 345 l = readl_relaxed(reg); 346 if ((trigger & IRQ_TYPE_SENSE_MASK) == IRQ_TYPE_EDGE_BOTH) 347 bank->toggle_mask |= BIT(gpio); 348 if (trigger & IRQ_TYPE_EDGE_RISING) 349 l |= BIT(gpio); 350 else if (trigger & IRQ_TYPE_EDGE_FALLING) 351 l &= ~(BIT(gpio)); 352 else 353 return -EINVAL; 354 355 writel_relaxed(l, reg); 356 } else if (bank->regs->edgectrl1) { 357 if (gpio & 0x08) 358 reg += bank->regs->edgectrl2; 359 else 360 reg += bank->regs->edgectrl1; 361 362 gpio &= 0x07; 363 l = readl_relaxed(reg); 364 l &= ~(3 << (gpio << 1)); 365 if (trigger & IRQ_TYPE_EDGE_RISING) 366 l |= 2 << (gpio << 1); 367 if (trigger & IRQ_TYPE_EDGE_FALLING) 368 l |= BIT(gpio << 1); 369 writel_relaxed(l, reg); 370 } 371 return 0; 372 } 373 374 static void omap_enable_gpio_module(struct gpio_bank *bank, unsigned offset) 375 { 376 if (bank->regs->pinctrl) { 377 void __iomem *reg = bank->base + bank->regs->pinctrl; 378 379 /* Claim the pin for MPU */ 380 writel_relaxed(readl_relaxed(reg) | (BIT(offset)), reg); 381 } 382 383 if (bank->regs->ctrl && !BANK_USED(bank)) { 384 void __iomem *reg = bank->base + bank->regs->ctrl; 385 u32 ctrl; 386 387 ctrl = readl_relaxed(reg); 388 /* Module is enabled, clocks are not gated */ 389 ctrl &= ~GPIO_MOD_CTRL_BIT; 390 writel_relaxed(ctrl, reg); 391 bank->context.ctrl = ctrl; 392 } 393 } 394 395 static void omap_disable_gpio_module(struct gpio_bank *bank, unsigned offset) 396 { 397 if (bank->regs->ctrl && !BANK_USED(bank)) { 398 void __iomem *reg = bank->base + bank->regs->ctrl; 399 u32 ctrl; 400 401 ctrl = readl_relaxed(reg); 402 /* Module is disabled, clocks are gated */ 403 ctrl |= GPIO_MOD_CTRL_BIT; 404 writel_relaxed(ctrl, reg); 405 bank->context.ctrl = ctrl; 406 } 407 } 408 409 static int omap_gpio_is_input(struct gpio_bank *bank, unsigned offset) 410 { 411 void __iomem *reg = bank->base + bank->regs->direction; 412 413 return readl_relaxed(reg) & BIT(offset); 414 } 415 416 static void omap_gpio_init_irq(struct gpio_bank *bank, unsigned offset) 417 { 418 if (!LINE_USED(bank->mod_usage, offset)) { 419 omap_enable_gpio_module(bank, offset); 420 omap_set_gpio_direction(bank, offset, 1); 421 } 422 bank->irq_usage |= BIT(offset); 423 } 424 425 static int omap_gpio_irq_type(struct irq_data *d, unsigned type) 426 { 427 struct gpio_bank *bank = omap_irq_data_get_bank(d); 428 int retval; 429 unsigned long flags; 430 unsigned offset = d->hwirq; 431 432 if (type & ~IRQ_TYPE_SENSE_MASK) 433 return -EINVAL; 434 435 if (!bank->regs->leveldetect0 && 436 (type & (IRQ_TYPE_LEVEL_LOW|IRQ_TYPE_LEVEL_HIGH))) 437 return -EINVAL; 438 439 raw_spin_lock_irqsave(&bank->lock, flags); 440 retval = omap_set_gpio_triggering(bank, offset, type); 441 if (retval) { 442 raw_spin_unlock_irqrestore(&bank->lock, flags); 443 goto error; 444 } 445 omap_gpio_init_irq(bank, offset); 446 if (!omap_gpio_is_input(bank, offset)) { 447 raw_spin_unlock_irqrestore(&bank->lock, flags); 448 retval = -EINVAL; 449 goto error; 450 } 451 raw_spin_unlock_irqrestore(&bank->lock, flags); 452 453 if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) 454 irq_set_handler_locked(d, handle_level_irq); 455 else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) 456 /* 457 * Edge IRQs are already cleared/acked in irq_handler and 458 * not need to be masked, as result handle_edge_irq() 459 * logic is excessed here and may cause lose of interrupts. 460 * So just use handle_simple_irq. 461 */ 462 irq_set_handler_locked(d, handle_simple_irq); 463 464 return 0; 465 466 error: 467 return retval; 468 } 469 470 static void omap_clear_gpio_irqbank(struct gpio_bank *bank, int gpio_mask) 471 { 472 void __iomem *reg = bank->base; 473 474 reg += bank->regs->irqstatus; 475 writel_relaxed(gpio_mask, reg); 476 477 /* Workaround for clearing DSP GPIO interrupts to allow retention */ 478 if (bank->regs->irqstatus2) { 479 reg = bank->base + bank->regs->irqstatus2; 480 writel_relaxed(gpio_mask, reg); 481 } 482 483 /* Flush posted write for the irq status to avoid spurious interrupts */ 484 readl_relaxed(reg); 485 } 486 487 static inline void omap_clear_gpio_irqstatus(struct gpio_bank *bank, 488 unsigned offset) 489 { 490 omap_clear_gpio_irqbank(bank, BIT(offset)); 491 } 492 493 static u32 omap_get_gpio_irqbank_mask(struct gpio_bank *bank) 494 { 495 void __iomem *reg = bank->base; 496 u32 l; 497 u32 mask = (BIT(bank->width)) - 1; 498 499 reg += bank->regs->irqenable; 500 l = readl_relaxed(reg); 501 if (bank->regs->irqenable_inv) 502 l = ~l; 503 l &= mask; 504 return l; 505 } 506 507 static inline void omap_set_gpio_irqenable(struct gpio_bank *bank, 508 unsigned offset, int enable) 509 { 510 void __iomem *reg = bank->base; 511 u32 gpio_mask = BIT(offset); 512 513 if (bank->regs->set_irqenable && bank->regs->clr_irqenable) { 514 if (enable) { 515 reg += bank->regs->set_irqenable; 516 bank->context.irqenable1 |= gpio_mask; 517 } else { 518 reg += bank->regs->clr_irqenable; 519 bank->context.irqenable1 &= ~gpio_mask; 520 } 521 writel_relaxed(gpio_mask, reg); 522 } else { 523 bank->context.irqenable1 = 524 omap_gpio_rmw(reg + bank->regs->irqenable, gpio_mask, 525 enable ^ bank->regs->irqenable_inv); 526 } 527 528 /* 529 * Program GPIO wakeup along with IRQ enable to satisfy OMAP4430 TRM 530 * note requiring correlation between the IRQ enable registers and 531 * the wakeup registers. In any case, we want wakeup from idle 532 * enabled for the GPIOs which support this feature. 533 */ 534 if (bank->regs->wkup_en && 535 (bank->regs->edgectrl1 || !(bank->non_wakeup_gpios & gpio_mask))) { 536 bank->context.wake_en = 537 omap_gpio_rmw(bank->base + bank->regs->wkup_en, 538 gpio_mask, enable); 539 } 540 } 541 542 /* Use disable_irq_wake() and enable_irq_wake() functions from drivers */ 543 static int omap_gpio_wake_enable(struct irq_data *d, unsigned int enable) 544 { 545 struct gpio_bank *bank = omap_irq_data_get_bank(d); 546 547 return irq_set_irq_wake(bank->irq, enable); 548 } 549 550 /* 551 * We need to unmask the GPIO bank interrupt as soon as possible to 552 * avoid missing GPIO interrupts for other lines in the bank. 553 * Then we need to mask-read-clear-unmask the triggered GPIO lines 554 * in the bank to avoid missing nested interrupts for a GPIO line. 555 * If we wait to unmask individual GPIO lines in the bank after the 556 * line's interrupt handler has been run, we may miss some nested 557 * interrupts. 558 */ 559 static irqreturn_t omap_gpio_irq_handler(int irq, void *gpiobank) 560 { 561 void __iomem *isr_reg = NULL; 562 u32 enabled, isr, edge; 563 unsigned int bit; 564 struct gpio_bank *bank = gpiobank; 565 unsigned long wa_lock_flags; 566 unsigned long lock_flags; 567 568 isr_reg = bank->base + bank->regs->irqstatus; 569 if (WARN_ON(!isr_reg)) 570 goto exit; 571 572 if (WARN_ONCE(!pm_runtime_active(bank->chip.parent), 573 "gpio irq%i while runtime suspended?\n", irq)) 574 return IRQ_NONE; 575 576 while (1) { 577 raw_spin_lock_irqsave(&bank->lock, lock_flags); 578 579 enabled = omap_get_gpio_irqbank_mask(bank); 580 isr = readl_relaxed(isr_reg) & enabled; 581 582 /* 583 * Clear edge sensitive interrupts before calling handler(s) 584 * so subsequent edge transitions are not missed while the 585 * handlers are running. 586 */ 587 edge = isr & ~bank->level_mask; 588 if (edge) 589 omap_clear_gpio_irqbank(bank, edge); 590 591 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 592 593 if (!isr) 594 break; 595 596 while (isr) { 597 bit = __ffs(isr); 598 isr &= ~(BIT(bit)); 599 600 raw_spin_lock_irqsave(&bank->lock, lock_flags); 601 /* 602 * Some chips can't respond to both rising and falling 603 * at the same time. If this irq was requested with 604 * both flags, we need to flip the ICR data for the IRQ 605 * to respond to the IRQ for the opposite direction. 606 * This will be indicated in the bank toggle_mask. 607 */ 608 if (bank->toggle_mask & (BIT(bit))) 609 omap_toggle_gpio_edge_triggering(bank, bit); 610 611 raw_spin_unlock_irqrestore(&bank->lock, lock_flags); 612 613 raw_spin_lock_irqsave(&bank->wa_lock, wa_lock_flags); 614 615 generic_handle_domain_irq(bank->chip.irq.domain, bit); 616 617 raw_spin_unlock_irqrestore(&bank->wa_lock, 618 wa_lock_flags); 619 } 620 } 621 exit: 622 return IRQ_HANDLED; 623 } 624 625 static unsigned int omap_gpio_irq_startup(struct irq_data *d) 626 { 627 struct gpio_bank *bank = omap_irq_data_get_bank(d); 628 unsigned long flags; 629 unsigned offset = d->hwirq; 630 631 raw_spin_lock_irqsave(&bank->lock, flags); 632 633 if (!LINE_USED(bank->mod_usage, offset)) 634 omap_set_gpio_direction(bank, offset, 1); 635 omap_enable_gpio_module(bank, offset); 636 bank->irq_usage |= BIT(offset); 637 638 raw_spin_unlock_irqrestore(&bank->lock, flags); 639 omap_gpio_unmask_irq(d); 640 641 return 0; 642 } 643 644 static void omap_gpio_irq_shutdown(struct irq_data *d) 645 { 646 struct gpio_bank *bank = omap_irq_data_get_bank(d); 647 unsigned long flags; 648 unsigned offset = d->hwirq; 649 650 raw_spin_lock_irqsave(&bank->lock, flags); 651 bank->irq_usage &= ~(BIT(offset)); 652 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 653 omap_clear_gpio_irqstatus(bank, offset); 654 omap_set_gpio_irqenable(bank, offset, 0); 655 if (!LINE_USED(bank->mod_usage, offset)) 656 omap_clear_gpio_debounce(bank, offset); 657 omap_disable_gpio_module(bank, offset); 658 raw_spin_unlock_irqrestore(&bank->lock, flags); 659 } 660 661 static void omap_gpio_irq_bus_lock(struct irq_data *data) 662 { 663 struct gpio_bank *bank = omap_irq_data_get_bank(data); 664 665 pm_runtime_get_sync(bank->chip.parent); 666 } 667 668 static void gpio_irq_bus_sync_unlock(struct irq_data *data) 669 { 670 struct gpio_bank *bank = omap_irq_data_get_bank(data); 671 672 pm_runtime_put(bank->chip.parent); 673 } 674 675 static void omap_gpio_mask_irq(struct irq_data *d) 676 { 677 struct gpio_bank *bank = omap_irq_data_get_bank(d); 678 unsigned offset = d->hwirq; 679 unsigned long flags; 680 681 raw_spin_lock_irqsave(&bank->lock, flags); 682 omap_set_gpio_triggering(bank, offset, IRQ_TYPE_NONE); 683 omap_set_gpio_irqenable(bank, offset, 0); 684 raw_spin_unlock_irqrestore(&bank->lock, flags); 685 gpiochip_disable_irq(&bank->chip, offset); 686 } 687 688 static void omap_gpio_unmask_irq(struct irq_data *d) 689 { 690 struct gpio_bank *bank = omap_irq_data_get_bank(d); 691 unsigned offset = d->hwirq; 692 u32 trigger = irqd_get_trigger_type(d); 693 unsigned long flags; 694 695 gpiochip_enable_irq(&bank->chip, offset); 696 raw_spin_lock_irqsave(&bank->lock, flags); 697 omap_set_gpio_irqenable(bank, offset, 1); 698 699 /* 700 * For level-triggered GPIOs, clearing must be done after the source 701 * is cleared, thus after the handler has run. OMAP4 needs this done 702 * after enabing the interrupt to clear the wakeup status. 703 */ 704 if (bank->regs->leveldetect0 && bank->regs->wkup_en && 705 trigger & (IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW)) 706 omap_clear_gpio_irqstatus(bank, offset); 707 708 if (trigger) 709 omap_set_gpio_triggering(bank, offset, trigger); 710 711 raw_spin_unlock_irqrestore(&bank->lock, flags); 712 } 713 714 static void omap_gpio_irq_print_chip(struct irq_data *d, struct seq_file *p) 715 { 716 struct gpio_bank *bank = omap_irq_data_get_bank(d); 717 718 seq_printf(p, dev_name(bank->dev)); 719 } 720 721 static const struct irq_chip omap_gpio_irq_chip = { 722 .irq_startup = omap_gpio_irq_startup, 723 .irq_shutdown = omap_gpio_irq_shutdown, 724 .irq_mask = omap_gpio_mask_irq, 725 .irq_unmask = omap_gpio_unmask_irq, 726 .irq_set_type = omap_gpio_irq_type, 727 .irq_set_wake = omap_gpio_wake_enable, 728 .irq_bus_lock = omap_gpio_irq_bus_lock, 729 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 730 .irq_print_chip = omap_gpio_irq_print_chip, 731 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 732 GPIOCHIP_IRQ_RESOURCE_HELPERS, 733 }; 734 735 static const struct irq_chip omap_gpio_irq_chip_nowake = { 736 .irq_startup = omap_gpio_irq_startup, 737 .irq_shutdown = omap_gpio_irq_shutdown, 738 .irq_mask = omap_gpio_mask_irq, 739 .irq_unmask = omap_gpio_unmask_irq, 740 .irq_set_type = omap_gpio_irq_type, 741 .irq_bus_lock = omap_gpio_irq_bus_lock, 742 .irq_bus_sync_unlock = gpio_irq_bus_sync_unlock, 743 .irq_print_chip = omap_gpio_irq_print_chip, 744 .flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_IMMUTABLE, 745 GPIOCHIP_IRQ_RESOURCE_HELPERS, 746 }; 747 748 /*---------------------------------------------------------------------*/ 749 750 static int omap_mpuio_suspend_noirq(struct device *dev) 751 { 752 struct gpio_bank *bank = dev_get_drvdata(dev); 753 void __iomem *mask_reg = bank->base + 754 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 755 unsigned long flags; 756 757 raw_spin_lock_irqsave(&bank->lock, flags); 758 writel_relaxed(0xffff & ~bank->context.wake_en, mask_reg); 759 raw_spin_unlock_irqrestore(&bank->lock, flags); 760 761 return 0; 762 } 763 764 static int omap_mpuio_resume_noirq(struct device *dev) 765 { 766 struct gpio_bank *bank = dev_get_drvdata(dev); 767 void __iomem *mask_reg = bank->base + 768 OMAP_MPUIO_GPIO_MASKIT / bank->stride; 769 unsigned long flags; 770 771 raw_spin_lock_irqsave(&bank->lock, flags); 772 writel_relaxed(bank->context.wake_en, mask_reg); 773 raw_spin_unlock_irqrestore(&bank->lock, flags); 774 775 return 0; 776 } 777 778 static const struct dev_pm_ops omap_mpuio_dev_pm_ops = { 779 .suspend_noirq = omap_mpuio_suspend_noirq, 780 .resume_noirq = omap_mpuio_resume_noirq, 781 }; 782 783 /* use platform_driver for this. */ 784 static struct platform_driver omap_mpuio_driver = { 785 .driver = { 786 .name = "mpuio", 787 .pm = &omap_mpuio_dev_pm_ops, 788 }, 789 }; 790 791 static struct platform_device omap_mpuio_device = { 792 .name = "mpuio", 793 .id = -1, 794 .dev = { 795 .driver = &omap_mpuio_driver.driver, 796 } 797 /* could list the /proc/iomem resources */ 798 }; 799 800 static inline void omap_mpuio_init(struct gpio_bank *bank) 801 { 802 platform_set_drvdata(&omap_mpuio_device, bank); 803 804 if (platform_driver_register(&omap_mpuio_driver) == 0) 805 (void) platform_device_register(&omap_mpuio_device); 806 } 807 808 /*---------------------------------------------------------------------*/ 809 810 static int omap_gpio_request(struct gpio_chip *chip, unsigned offset) 811 { 812 struct gpio_bank *bank = gpiochip_get_data(chip); 813 unsigned long flags; 814 815 pm_runtime_get_sync(chip->parent); 816 817 raw_spin_lock_irqsave(&bank->lock, flags); 818 omap_enable_gpio_module(bank, offset); 819 bank->mod_usage |= BIT(offset); 820 raw_spin_unlock_irqrestore(&bank->lock, flags); 821 822 return 0; 823 } 824 825 static void omap_gpio_free(struct gpio_chip *chip, unsigned offset) 826 { 827 struct gpio_bank *bank = gpiochip_get_data(chip); 828 unsigned long flags; 829 830 raw_spin_lock_irqsave(&bank->lock, flags); 831 bank->mod_usage &= ~(BIT(offset)); 832 if (!LINE_USED(bank->irq_usage, offset)) { 833 omap_set_gpio_direction(bank, offset, 1); 834 omap_clear_gpio_debounce(bank, offset); 835 } 836 omap_disable_gpio_module(bank, offset); 837 raw_spin_unlock_irqrestore(&bank->lock, flags); 838 839 pm_runtime_put(chip->parent); 840 } 841 842 static int omap_gpio_get_direction(struct gpio_chip *chip, unsigned offset) 843 { 844 struct gpio_bank *bank = gpiochip_get_data(chip); 845 846 if (readl_relaxed(bank->base + bank->regs->direction) & BIT(offset)) 847 return GPIO_LINE_DIRECTION_IN; 848 849 return GPIO_LINE_DIRECTION_OUT; 850 } 851 852 static int omap_gpio_input(struct gpio_chip *chip, unsigned offset) 853 { 854 struct gpio_bank *bank; 855 unsigned long flags; 856 857 bank = gpiochip_get_data(chip); 858 raw_spin_lock_irqsave(&bank->lock, flags); 859 omap_set_gpio_direction(bank, offset, 1); 860 raw_spin_unlock_irqrestore(&bank->lock, flags); 861 return 0; 862 } 863 864 static int omap_gpio_get(struct gpio_chip *chip, unsigned offset) 865 { 866 struct gpio_bank *bank = gpiochip_get_data(chip); 867 void __iomem *reg; 868 869 if (omap_gpio_is_input(bank, offset)) 870 reg = bank->base + bank->regs->datain; 871 else 872 reg = bank->base + bank->regs->dataout; 873 874 return (readl_relaxed(reg) & BIT(offset)) != 0; 875 } 876 877 static int omap_gpio_output(struct gpio_chip *chip, unsigned offset, int value) 878 { 879 struct gpio_bank *bank; 880 unsigned long flags; 881 882 bank = gpiochip_get_data(chip); 883 raw_spin_lock_irqsave(&bank->lock, flags); 884 bank->set_dataout(bank, offset, value); 885 omap_set_gpio_direction(bank, offset, 0); 886 raw_spin_unlock_irqrestore(&bank->lock, flags); 887 return 0; 888 } 889 890 static int omap_gpio_get_multiple(struct gpio_chip *chip, unsigned long *mask, 891 unsigned long *bits) 892 { 893 struct gpio_bank *bank = gpiochip_get_data(chip); 894 void __iomem *base = bank->base; 895 u32 direction, m, val = 0; 896 897 direction = readl_relaxed(base + bank->regs->direction); 898 899 m = direction & *mask; 900 if (m) 901 val |= readl_relaxed(base + bank->regs->datain) & m; 902 903 m = ~direction & *mask; 904 if (m) 905 val |= readl_relaxed(base + bank->regs->dataout) & m; 906 907 *bits = val; 908 909 return 0; 910 } 911 912 static int omap_gpio_debounce(struct gpio_chip *chip, unsigned offset, 913 unsigned debounce) 914 { 915 struct gpio_bank *bank; 916 unsigned long flags; 917 int ret; 918 919 bank = gpiochip_get_data(chip); 920 921 raw_spin_lock_irqsave(&bank->lock, flags); 922 ret = omap2_set_gpio_debounce(bank, offset, debounce); 923 raw_spin_unlock_irqrestore(&bank->lock, flags); 924 925 if (ret) 926 dev_info(chip->parent, 927 "Could not set line %u debounce to %u microseconds (%d)", 928 offset, debounce, ret); 929 930 return ret; 931 } 932 933 static int omap_gpio_set_config(struct gpio_chip *chip, unsigned offset, 934 unsigned long config) 935 { 936 u32 debounce; 937 int ret = -ENOTSUPP; 938 939 switch (pinconf_to_config_param(config)) { 940 case PIN_CONFIG_BIAS_DISABLE: 941 case PIN_CONFIG_BIAS_PULL_UP: 942 case PIN_CONFIG_BIAS_PULL_DOWN: 943 ret = gpiochip_generic_config(chip, offset, config); 944 break; 945 case PIN_CONFIG_INPUT_DEBOUNCE: 946 debounce = pinconf_to_config_argument(config); 947 ret = omap_gpio_debounce(chip, offset, debounce); 948 break; 949 default: 950 break; 951 } 952 953 return ret; 954 } 955 956 static void omap_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 957 { 958 struct gpio_bank *bank; 959 unsigned long flags; 960 961 bank = gpiochip_get_data(chip); 962 raw_spin_lock_irqsave(&bank->lock, flags); 963 bank->set_dataout(bank, offset, value); 964 raw_spin_unlock_irqrestore(&bank->lock, flags); 965 } 966 967 static void omap_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, 968 unsigned long *bits) 969 { 970 struct gpio_bank *bank = gpiochip_get_data(chip); 971 void __iomem *reg = bank->base + bank->regs->dataout; 972 unsigned long flags; 973 u32 l; 974 975 raw_spin_lock_irqsave(&bank->lock, flags); 976 l = (readl_relaxed(reg) & ~*mask) | (*bits & *mask); 977 writel_relaxed(l, reg); 978 bank->context.dataout = l; 979 raw_spin_unlock_irqrestore(&bank->lock, flags); 980 } 981 982 /*---------------------------------------------------------------------*/ 983 984 static void omap_gpio_show_rev(struct gpio_bank *bank) 985 { 986 static bool called; 987 u32 rev; 988 989 if (called || bank->regs->revision == USHRT_MAX) 990 return; 991 992 rev = readw_relaxed(bank->base + bank->regs->revision); 993 pr_info("OMAP GPIO hardware version %d.%d\n", 994 (rev >> 4) & 0x0f, rev & 0x0f); 995 996 called = true; 997 } 998 999 static void omap_gpio_mod_init(struct gpio_bank *bank) 1000 { 1001 void __iomem *base = bank->base; 1002 u32 l = 0xffffffff; 1003 1004 if (bank->width == 16) 1005 l = 0xffff; 1006 1007 if (bank->is_mpuio) { 1008 writel_relaxed(l, bank->base + bank->regs->irqenable); 1009 return; 1010 } 1011 1012 omap_gpio_rmw(base + bank->regs->irqenable, l, 1013 bank->regs->irqenable_inv); 1014 omap_gpio_rmw(base + bank->regs->irqstatus, l, 1015 !bank->regs->irqenable_inv); 1016 if (bank->regs->debounce_en) 1017 writel_relaxed(0, base + bank->regs->debounce_en); 1018 1019 /* Save OE default value (0xffffffff) in the context */ 1020 bank->context.oe = readl_relaxed(bank->base + bank->regs->direction); 1021 /* Initialize interface clk ungated, module enabled */ 1022 if (bank->regs->ctrl) 1023 writel_relaxed(0, base + bank->regs->ctrl); 1024 } 1025 1026 static int omap_gpio_chip_init(struct gpio_bank *bank, struct device *pm_dev) 1027 { 1028 struct gpio_irq_chip *irq; 1029 static int gpio; 1030 const char *label; 1031 int ret; 1032 1033 /* 1034 * REVISIT eventually switch from OMAP-specific gpio structs 1035 * over to the generic ones 1036 */ 1037 bank->chip.request = omap_gpio_request; 1038 bank->chip.free = omap_gpio_free; 1039 bank->chip.get_direction = omap_gpio_get_direction; 1040 bank->chip.direction_input = omap_gpio_input; 1041 bank->chip.get = omap_gpio_get; 1042 bank->chip.get_multiple = omap_gpio_get_multiple; 1043 bank->chip.direction_output = omap_gpio_output; 1044 bank->chip.set_config = omap_gpio_set_config; 1045 bank->chip.set = omap_gpio_set; 1046 bank->chip.set_multiple = omap_gpio_set_multiple; 1047 if (bank->is_mpuio) { 1048 bank->chip.label = "mpuio"; 1049 if (bank->regs->wkup_en) 1050 bank->chip.parent = &omap_mpuio_device.dev; 1051 bank->chip.base = OMAP_MPUIO(0); 1052 } else { 1053 label = devm_kasprintf(bank->chip.parent, GFP_KERNEL, "gpio-%d-%d", 1054 gpio, gpio + bank->width - 1); 1055 if (!label) 1056 return -ENOMEM; 1057 bank->chip.label = label; 1058 bank->chip.base = -1; 1059 } 1060 bank->chip.ngpio = bank->width; 1061 1062 irq = &bank->chip.irq; 1063 /* MPUIO is a bit different, reading IRQ status clears it */ 1064 if (bank->is_mpuio && !bank->regs->wkup_en) 1065 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip_nowake); 1066 else 1067 gpio_irq_chip_set_chip(irq, &omap_gpio_irq_chip); 1068 irq->handler = handle_bad_irq; 1069 irq->default_type = IRQ_TYPE_NONE; 1070 irq->num_parents = 1; 1071 irq->parents = &bank->irq; 1072 1073 ret = gpiochip_add_data(&bank->chip, bank); 1074 if (ret) 1075 return dev_err_probe(bank->chip.parent, ret, "Could not register gpio chip\n"); 1076 1077 irq_domain_set_pm_device(bank->chip.irq.domain, pm_dev); 1078 ret = devm_request_irq(bank->chip.parent, bank->irq, 1079 omap_gpio_irq_handler, 1080 0, dev_name(bank->chip.parent), bank); 1081 if (ret) 1082 gpiochip_remove(&bank->chip); 1083 1084 if (!bank->is_mpuio) 1085 gpio += bank->width; 1086 1087 return ret; 1088 } 1089 1090 static void omap_gpio_init_context(struct gpio_bank *p) 1091 { 1092 const struct omap_gpio_reg_offs *regs = p->regs; 1093 void __iomem *base = p->base; 1094 1095 p->context.sysconfig = readl_relaxed(base + regs->sysconfig); 1096 p->context.ctrl = readl_relaxed(base + regs->ctrl); 1097 p->context.oe = readl_relaxed(base + regs->direction); 1098 p->context.wake_en = readl_relaxed(base + regs->wkup_en); 1099 p->context.leveldetect0 = readl_relaxed(base + regs->leveldetect0); 1100 p->context.leveldetect1 = readl_relaxed(base + regs->leveldetect1); 1101 p->context.risingdetect = readl_relaxed(base + regs->risingdetect); 1102 p->context.fallingdetect = readl_relaxed(base + regs->fallingdetect); 1103 p->context.irqenable1 = readl_relaxed(base + regs->irqenable); 1104 p->context.irqenable2 = readl_relaxed(base + regs->irqenable2); 1105 p->context.dataout = readl_relaxed(base + regs->dataout); 1106 1107 p->context_valid = true; 1108 } 1109 1110 static void omap_gpio_restore_context(struct gpio_bank *bank) 1111 { 1112 const struct omap_gpio_reg_offs *regs = bank->regs; 1113 void __iomem *base = bank->base; 1114 1115 writel_relaxed(bank->context.sysconfig, base + regs->sysconfig); 1116 writel_relaxed(bank->context.wake_en, base + regs->wkup_en); 1117 writel_relaxed(bank->context.ctrl, base + regs->ctrl); 1118 writel_relaxed(bank->context.leveldetect0, base + regs->leveldetect0); 1119 writel_relaxed(bank->context.leveldetect1, base + regs->leveldetect1); 1120 writel_relaxed(bank->context.risingdetect, base + regs->risingdetect); 1121 writel_relaxed(bank->context.fallingdetect, base + regs->fallingdetect); 1122 writel_relaxed(bank->context.dataout, base + regs->dataout); 1123 writel_relaxed(bank->context.oe, base + regs->direction); 1124 1125 if (bank->dbck_enable_mask) { 1126 writel_relaxed(bank->context.debounce, base + regs->debounce); 1127 writel_relaxed(bank->context.debounce_en, 1128 base + regs->debounce_en); 1129 } 1130 1131 writel_relaxed(bank->context.irqenable1, base + regs->irqenable); 1132 writel_relaxed(bank->context.irqenable2, base + regs->irqenable2); 1133 } 1134 1135 static void omap_gpio_idle(struct gpio_bank *bank, bool may_lose_context) 1136 { 1137 struct device *dev = bank->chip.parent; 1138 void __iomem *base = bank->base; 1139 u32 mask, nowake; 1140 1141 bank->saved_datain = readl_relaxed(base + bank->regs->datain); 1142 1143 /* Save syconfig, it's runtime value can be different from init value */ 1144 if (bank->loses_context) 1145 bank->context.sysconfig = readl_relaxed(base + bank->regs->sysconfig); 1146 1147 if (!bank->enabled_non_wakeup_gpios) 1148 goto update_gpio_context_count; 1149 1150 /* Check for pending EDGE_FALLING, ignore EDGE_BOTH */ 1151 mask = bank->enabled_non_wakeup_gpios & bank->context.fallingdetect; 1152 mask &= ~bank->context.risingdetect; 1153 bank->saved_datain |= mask; 1154 1155 /* Check for pending EDGE_RISING, ignore EDGE_BOTH */ 1156 mask = bank->enabled_non_wakeup_gpios & bank->context.risingdetect; 1157 mask &= ~bank->context.fallingdetect; 1158 bank->saved_datain &= ~mask; 1159 1160 if (!may_lose_context) 1161 goto update_gpio_context_count; 1162 1163 /* 1164 * If going to OFF, remove triggering for all wkup domain 1165 * non-wakeup GPIOs. Otherwise spurious IRQs will be 1166 * generated. See OMAP2420 Errata item 1.101. 1167 */ 1168 if (!bank->loses_context && bank->enabled_non_wakeup_gpios) { 1169 nowake = bank->enabled_non_wakeup_gpios; 1170 omap_gpio_rmw(base + bank->regs->fallingdetect, nowake, ~nowake); 1171 omap_gpio_rmw(base + bank->regs->risingdetect, nowake, ~nowake); 1172 } 1173 1174 update_gpio_context_count: 1175 if (bank->get_context_loss_count) 1176 bank->context_loss_count = 1177 bank->get_context_loss_count(dev); 1178 1179 omap_gpio_dbck_disable(bank); 1180 } 1181 1182 static void omap_gpio_unidle(struct gpio_bank *bank) 1183 { 1184 struct device *dev = bank->chip.parent; 1185 u32 l = 0, gen, gen0, gen1; 1186 int c; 1187 1188 /* 1189 * On the first resume during the probe, the context has not 1190 * been initialised and so initialise it now. Also initialise 1191 * the context loss count. 1192 */ 1193 if (bank->loses_context && !bank->context_valid) { 1194 omap_gpio_init_context(bank); 1195 1196 if (bank->get_context_loss_count) 1197 bank->context_loss_count = 1198 bank->get_context_loss_count(dev); 1199 } 1200 1201 omap_gpio_dbck_enable(bank); 1202 1203 if (bank->loses_context) { 1204 if (!bank->get_context_loss_count) { 1205 omap_gpio_restore_context(bank); 1206 } else { 1207 c = bank->get_context_loss_count(dev); 1208 if (c != bank->context_loss_count) { 1209 omap_gpio_restore_context(bank); 1210 } else { 1211 return; 1212 } 1213 } 1214 } else { 1215 /* Restore changes done for OMAP2420 errata 1.101 */ 1216 writel_relaxed(bank->context.fallingdetect, 1217 bank->base + bank->regs->fallingdetect); 1218 writel_relaxed(bank->context.risingdetect, 1219 bank->base + bank->regs->risingdetect); 1220 } 1221 1222 l = readl_relaxed(bank->base + bank->regs->datain); 1223 1224 /* 1225 * Check if any of the non-wakeup interrupt GPIOs have changed 1226 * state. If so, generate an IRQ by software. This is 1227 * horribly racy, but it's the best we can do to work around 1228 * this silicon bug. 1229 */ 1230 l ^= bank->saved_datain; 1231 l &= bank->enabled_non_wakeup_gpios; 1232 1233 /* 1234 * No need to generate IRQs for the rising edge for gpio IRQs 1235 * configured with falling edge only; and vice versa. 1236 */ 1237 gen0 = l & bank->context.fallingdetect; 1238 gen0 &= bank->saved_datain; 1239 1240 gen1 = l & bank->context.risingdetect; 1241 gen1 &= ~(bank->saved_datain); 1242 1243 /* FIXME: Consider GPIO IRQs with level detections properly! */ 1244 gen = l & (~(bank->context.fallingdetect) & 1245 ~(bank->context.risingdetect)); 1246 /* Consider all GPIO IRQs needed to be updated */ 1247 gen |= gen0 | gen1; 1248 1249 if (gen) { 1250 u32 old0, old1; 1251 1252 old0 = readl_relaxed(bank->base + bank->regs->leveldetect0); 1253 old1 = readl_relaxed(bank->base + bank->regs->leveldetect1); 1254 1255 if (!bank->regs->irqstatus_raw0) { 1256 writel_relaxed(old0 | gen, bank->base + 1257 bank->regs->leveldetect0); 1258 writel_relaxed(old1 | gen, bank->base + 1259 bank->regs->leveldetect1); 1260 } 1261 1262 if (bank->regs->irqstatus_raw0) { 1263 writel_relaxed(old0 | l, bank->base + 1264 bank->regs->leveldetect0); 1265 writel_relaxed(old1 | l, bank->base + 1266 bank->regs->leveldetect1); 1267 } 1268 writel_relaxed(old0, bank->base + bank->regs->leveldetect0); 1269 writel_relaxed(old1, bank->base + bank->regs->leveldetect1); 1270 } 1271 } 1272 1273 static int gpio_omap_cpu_notifier(struct notifier_block *nb, 1274 unsigned long cmd, void *v) 1275 { 1276 struct gpio_bank *bank; 1277 unsigned long flags; 1278 int ret = NOTIFY_OK; 1279 u32 isr, mask; 1280 1281 bank = container_of(nb, struct gpio_bank, nb); 1282 1283 raw_spin_lock_irqsave(&bank->lock, flags); 1284 if (bank->is_suspended) 1285 goto out_unlock; 1286 1287 switch (cmd) { 1288 case CPU_CLUSTER_PM_ENTER: 1289 mask = omap_get_gpio_irqbank_mask(bank); 1290 isr = readl_relaxed(bank->base + bank->regs->irqstatus) & mask; 1291 if (isr) { 1292 ret = NOTIFY_BAD; 1293 break; 1294 } 1295 omap_gpio_idle(bank, true); 1296 break; 1297 case CPU_CLUSTER_PM_ENTER_FAILED: 1298 case CPU_CLUSTER_PM_EXIT: 1299 omap_gpio_unidle(bank); 1300 break; 1301 } 1302 1303 out_unlock: 1304 raw_spin_unlock_irqrestore(&bank->lock, flags); 1305 1306 return ret; 1307 } 1308 1309 static const struct omap_gpio_reg_offs omap2_gpio_regs = { 1310 .revision = OMAP24XX_GPIO_REVISION, 1311 .sysconfig = OMAP24XX_GPIO_SYSCONFIG, 1312 .direction = OMAP24XX_GPIO_OE, 1313 .datain = OMAP24XX_GPIO_DATAIN, 1314 .dataout = OMAP24XX_GPIO_DATAOUT, 1315 .set_dataout = OMAP24XX_GPIO_SETDATAOUT, 1316 .clr_dataout = OMAP24XX_GPIO_CLEARDATAOUT, 1317 .irqstatus = OMAP24XX_GPIO_IRQSTATUS1, 1318 .irqstatus2 = OMAP24XX_GPIO_IRQSTATUS2, 1319 .irqenable = OMAP24XX_GPIO_IRQENABLE1, 1320 .irqenable2 = OMAP24XX_GPIO_IRQENABLE2, 1321 .set_irqenable = OMAP24XX_GPIO_SETIRQENABLE1, 1322 .clr_irqenable = OMAP24XX_GPIO_CLEARIRQENABLE1, 1323 .debounce = OMAP24XX_GPIO_DEBOUNCE_VAL, 1324 .debounce_en = OMAP24XX_GPIO_DEBOUNCE_EN, 1325 .ctrl = OMAP24XX_GPIO_CTRL, 1326 .wkup_en = OMAP24XX_GPIO_WAKE_EN, 1327 .leveldetect0 = OMAP24XX_GPIO_LEVELDETECT0, 1328 .leveldetect1 = OMAP24XX_GPIO_LEVELDETECT1, 1329 .risingdetect = OMAP24XX_GPIO_RISINGDETECT, 1330 .fallingdetect = OMAP24XX_GPIO_FALLINGDETECT, 1331 }; 1332 1333 static const struct omap_gpio_reg_offs omap4_gpio_regs = { 1334 .revision = OMAP4_GPIO_REVISION, 1335 .sysconfig = OMAP4_GPIO_SYSCONFIG, 1336 .direction = OMAP4_GPIO_OE, 1337 .datain = OMAP4_GPIO_DATAIN, 1338 .dataout = OMAP4_GPIO_DATAOUT, 1339 .set_dataout = OMAP4_GPIO_SETDATAOUT, 1340 .clr_dataout = OMAP4_GPIO_CLEARDATAOUT, 1341 .irqstatus = OMAP4_GPIO_IRQSTATUS0, 1342 .irqstatus2 = OMAP4_GPIO_IRQSTATUS1, 1343 .irqstatus_raw0 = OMAP4_GPIO_IRQSTATUSRAW0, 1344 .irqstatus_raw1 = OMAP4_GPIO_IRQSTATUSRAW1, 1345 .irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1346 .irqenable2 = OMAP4_GPIO_IRQSTATUSSET1, 1347 .set_irqenable = OMAP4_GPIO_IRQSTATUSSET0, 1348 .clr_irqenable = OMAP4_GPIO_IRQSTATUSCLR0, 1349 .debounce = OMAP4_GPIO_DEBOUNCINGTIME, 1350 .debounce_en = OMAP4_GPIO_DEBOUNCENABLE, 1351 .ctrl = OMAP4_GPIO_CTRL, 1352 .wkup_en = OMAP4_GPIO_IRQWAKEN0, 1353 .leveldetect0 = OMAP4_GPIO_LEVELDETECT0, 1354 .leveldetect1 = OMAP4_GPIO_LEVELDETECT1, 1355 .risingdetect = OMAP4_GPIO_RISINGDETECT, 1356 .fallingdetect = OMAP4_GPIO_FALLINGDETECT, 1357 }; 1358 1359 static const struct omap_gpio_platform_data omap2_pdata = { 1360 .regs = &omap2_gpio_regs, 1361 .bank_width = 32, 1362 .dbck_flag = false, 1363 }; 1364 1365 static const struct omap_gpio_platform_data omap3_pdata = { 1366 .regs = &omap2_gpio_regs, 1367 .bank_width = 32, 1368 .dbck_flag = true, 1369 }; 1370 1371 static const struct omap_gpio_platform_data omap4_pdata = { 1372 .regs = &omap4_gpio_regs, 1373 .bank_width = 32, 1374 .dbck_flag = true, 1375 }; 1376 1377 static const struct of_device_id omap_gpio_match[] = { 1378 { 1379 .compatible = "ti,omap4-gpio", 1380 .data = &omap4_pdata, 1381 }, 1382 { 1383 .compatible = "ti,omap3-gpio", 1384 .data = &omap3_pdata, 1385 }, 1386 { 1387 .compatible = "ti,omap2-gpio", 1388 .data = &omap2_pdata, 1389 }, 1390 { }, 1391 }; 1392 MODULE_DEVICE_TABLE(of, omap_gpio_match); 1393 1394 static int omap_gpio_probe(struct platform_device *pdev) 1395 { 1396 struct device *dev = &pdev->dev; 1397 struct device_node *node = dev->of_node; 1398 const struct omap_gpio_platform_data *pdata; 1399 struct gpio_bank *bank; 1400 int ret; 1401 1402 pdata = device_get_match_data(dev); 1403 1404 pdata = pdata ?: dev_get_platdata(dev); 1405 if (!pdata) 1406 return -EINVAL; 1407 1408 bank = devm_kzalloc(dev, sizeof(*bank), GFP_KERNEL); 1409 if (!bank) 1410 return -ENOMEM; 1411 1412 bank->dev = dev; 1413 1414 bank->irq = platform_get_irq(pdev, 0); 1415 if (bank->irq < 0) 1416 return bank->irq; 1417 1418 bank->chip.parent = dev; 1419 bank->chip.owner = THIS_MODULE; 1420 bank->dbck_flag = pdata->dbck_flag; 1421 bank->stride = pdata->bank_stride; 1422 bank->width = pdata->bank_width; 1423 bank->is_mpuio = pdata->is_mpuio; 1424 bank->non_wakeup_gpios = pdata->non_wakeup_gpios; 1425 bank->regs = pdata->regs; 1426 1427 if (node) { 1428 if (!of_property_read_bool(node, "ti,gpio-always-on")) 1429 bank->loses_context = true; 1430 } else { 1431 bank->loses_context = pdata->loses_context; 1432 1433 if (bank->loses_context) 1434 bank->get_context_loss_count = 1435 pdata->get_context_loss_count; 1436 } 1437 1438 if (bank->regs->set_dataout && bank->regs->clr_dataout) 1439 bank->set_dataout = omap_set_gpio_dataout_reg; 1440 else 1441 bank->set_dataout = omap_set_gpio_dataout_mask; 1442 1443 raw_spin_lock_init(&bank->lock); 1444 raw_spin_lock_init(&bank->wa_lock); 1445 1446 /* Static mapping, never released */ 1447 bank->base = devm_platform_ioremap_resource(pdev, 0); 1448 if (IS_ERR(bank->base)) { 1449 return PTR_ERR(bank->base); 1450 } 1451 1452 if (bank->dbck_flag) { 1453 bank->dbck = devm_clk_get(dev, "dbclk"); 1454 if (IS_ERR(bank->dbck)) { 1455 dev_err(dev, 1456 "Could not get gpio dbck. Disable debounce\n"); 1457 bank->dbck_flag = false; 1458 } else { 1459 clk_prepare(bank->dbck); 1460 } 1461 } 1462 1463 platform_set_drvdata(pdev, bank); 1464 1465 pm_runtime_enable(dev); 1466 pm_runtime_get_sync(dev); 1467 1468 if (bank->is_mpuio) 1469 omap_mpuio_init(bank); 1470 1471 omap_gpio_mod_init(bank); 1472 1473 ret = omap_gpio_chip_init(bank, dev); 1474 if (ret) { 1475 pm_runtime_put_sync(dev); 1476 pm_runtime_disable(dev); 1477 if (bank->dbck_flag) 1478 clk_unprepare(bank->dbck); 1479 return ret; 1480 } 1481 1482 omap_gpio_show_rev(bank); 1483 1484 bank->nb.notifier_call = gpio_omap_cpu_notifier; 1485 cpu_pm_register_notifier(&bank->nb); 1486 1487 pm_runtime_put(dev); 1488 1489 return 0; 1490 } 1491 1492 static int omap_gpio_remove(struct platform_device *pdev) 1493 { 1494 struct gpio_bank *bank = platform_get_drvdata(pdev); 1495 1496 cpu_pm_unregister_notifier(&bank->nb); 1497 gpiochip_remove(&bank->chip); 1498 pm_runtime_disable(&pdev->dev); 1499 if (bank->dbck_flag) 1500 clk_unprepare(bank->dbck); 1501 1502 return 0; 1503 } 1504 1505 static int __maybe_unused omap_gpio_runtime_suspend(struct device *dev) 1506 { 1507 struct gpio_bank *bank = dev_get_drvdata(dev); 1508 unsigned long flags; 1509 1510 raw_spin_lock_irqsave(&bank->lock, flags); 1511 omap_gpio_idle(bank, true); 1512 bank->is_suspended = true; 1513 raw_spin_unlock_irqrestore(&bank->lock, flags); 1514 1515 return 0; 1516 } 1517 1518 static int __maybe_unused omap_gpio_runtime_resume(struct device *dev) 1519 { 1520 struct gpio_bank *bank = dev_get_drvdata(dev); 1521 unsigned long flags; 1522 1523 raw_spin_lock_irqsave(&bank->lock, flags); 1524 omap_gpio_unidle(bank); 1525 bank->is_suspended = false; 1526 raw_spin_unlock_irqrestore(&bank->lock, flags); 1527 1528 return 0; 1529 } 1530 1531 static int __maybe_unused omap_gpio_suspend(struct device *dev) 1532 { 1533 struct gpio_bank *bank = dev_get_drvdata(dev); 1534 1535 if (bank->is_suspended) 1536 return 0; 1537 1538 bank->needs_resume = 1; 1539 1540 return omap_gpio_runtime_suspend(dev); 1541 } 1542 1543 static int __maybe_unused omap_gpio_resume(struct device *dev) 1544 { 1545 struct gpio_bank *bank = dev_get_drvdata(dev); 1546 1547 if (!bank->needs_resume) 1548 return 0; 1549 1550 bank->needs_resume = 0; 1551 1552 return omap_gpio_runtime_resume(dev); 1553 } 1554 1555 static const struct dev_pm_ops gpio_pm_ops = { 1556 SET_RUNTIME_PM_OPS(omap_gpio_runtime_suspend, omap_gpio_runtime_resume, 1557 NULL) 1558 SET_LATE_SYSTEM_SLEEP_PM_OPS(omap_gpio_suspend, omap_gpio_resume) 1559 }; 1560 1561 static struct platform_driver omap_gpio_driver = { 1562 .probe = omap_gpio_probe, 1563 .remove = omap_gpio_remove, 1564 .driver = { 1565 .name = "omap_gpio", 1566 .pm = &gpio_pm_ops, 1567 .of_match_table = omap_gpio_match, 1568 }, 1569 }; 1570 1571 /* 1572 * gpio driver register needs to be done before 1573 * machine_init functions access gpio APIs. 1574 * Hence omap_gpio_drv_reg() is a postcore_initcall. 1575 */ 1576 static int __init omap_gpio_drv_reg(void) 1577 { 1578 return platform_driver_register(&omap_gpio_driver); 1579 } 1580 postcore_initcall(omap_gpio_drv_reg); 1581 1582 static void __exit omap_gpio_exit(void) 1583 { 1584 platform_driver_unregister(&omap_gpio_driver); 1585 } 1586 module_exit(omap_gpio_exit); 1587 1588 MODULE_DESCRIPTION("omap gpio driver"); 1589 MODULE_ALIAS("platform:gpio-omap"); 1590 MODULE_LICENSE("GPL v2"); 1591