1 /* 2 * linux/arch/arm/plat-pxa/gpio.c 3 * 4 * Generic PXA GPIO handling 5 * 6 * Author: Nicolas Pitre 7 * Created: Jun 15, 2001 8 * Copyright: MontaVista Software Inc. 9 * 10 * This program is free software; you can redistribute it and/or modify 11 * it under the terms of the GNU General Public License version 2 as 12 * published by the Free Software Foundation. 13 */ 14 #include <linux/gpio.h> 15 #include <linux/init.h> 16 #include <linux/irq.h> 17 #include <linux/io.h> 18 #include <linux/syscore_ops.h> 19 #include <linux/slab.h> 20 21 #include <mach/gpio-pxa.h> 22 23 int pxa_last_gpio; 24 25 struct pxa_gpio_chip { 26 struct gpio_chip chip; 27 void __iomem *regbase; 28 char label[10]; 29 30 unsigned long irq_mask; 31 unsigned long irq_edge_rise; 32 unsigned long irq_edge_fall; 33 34 #ifdef CONFIG_PM 35 unsigned long saved_gplr; 36 unsigned long saved_gpdr; 37 unsigned long saved_grer; 38 unsigned long saved_gfer; 39 #endif 40 }; 41 42 enum { 43 PXA25X_GPIO = 0, 44 PXA26X_GPIO, 45 PXA27X_GPIO, 46 PXA3XX_GPIO, 47 PXA93X_GPIO, 48 MMP_GPIO = 0x10, 49 MMP2_GPIO, 50 }; 51 52 static DEFINE_SPINLOCK(gpio_lock); 53 static struct pxa_gpio_chip *pxa_gpio_chips; 54 static int gpio_type; 55 56 #define for_each_gpio_chip(i, c) \ 57 for (i = 0, c = &pxa_gpio_chips[0]; i <= pxa_last_gpio; i += 32, c++) 58 59 static inline void __iomem *gpio_chip_base(struct gpio_chip *c) 60 { 61 return container_of(c, struct pxa_gpio_chip, chip)->regbase; 62 } 63 64 static inline struct pxa_gpio_chip *gpio_to_pxachip(unsigned gpio) 65 { 66 return &pxa_gpio_chips[gpio_to_bank(gpio)]; 67 } 68 69 static inline int gpio_is_pxa_type(int type) 70 { 71 return (type & MMP_GPIO) == 0; 72 } 73 74 static inline int gpio_is_mmp_type(int type) 75 { 76 return (type & MMP_GPIO) != 0; 77 } 78 79 #ifdef CONFIG_ARCH_PXA 80 static inline int __pxa_gpio_to_irq(int gpio) 81 { 82 if (gpio_is_pxa_type(gpio_type)) 83 return PXA_GPIO_TO_IRQ(gpio); 84 return -1; 85 } 86 87 static inline int __pxa_irq_to_gpio(int irq) 88 { 89 if (gpio_is_pxa_type(gpio_type)) 90 return irq - PXA_GPIO_TO_IRQ(0); 91 return -1; 92 } 93 #else 94 static inline int __pxa_gpio_to_irq(int gpio) { return -1; } 95 static inline int __pxa_irq_to_gpio(int irq) { return -1; } 96 #endif 97 98 #ifdef CONFIG_ARCH_MMP 99 static inline int __mmp_gpio_to_irq(int gpio) 100 { 101 if (gpio_is_mmp_type(gpio_type)) 102 return MMP_GPIO_TO_IRQ(gpio); 103 return -1; 104 } 105 106 static inline int __mmp_irq_to_gpio(int irq) 107 { 108 if (gpio_is_mmp_type(gpio_type)) 109 return irq - MMP_GPIO_TO_IRQ(0); 110 return -1; 111 } 112 #else 113 static inline int __mmp_gpio_to_irq(int gpio) { return -1; } 114 static inline int __mmp_irq_to_gpio(int irq) { return -1; } 115 #endif 116 117 static int pxa_gpio_to_irq(struct gpio_chip *chip, unsigned offset) 118 { 119 int gpio, ret; 120 121 gpio = chip->base + offset; 122 ret = __pxa_gpio_to_irq(gpio); 123 if (ret >= 0) 124 return ret; 125 return __mmp_gpio_to_irq(gpio); 126 } 127 128 int pxa_irq_to_gpio(int irq) 129 { 130 int ret; 131 132 ret = __pxa_irq_to_gpio(irq); 133 if (ret >= 0) 134 return ret; 135 return __mmp_irq_to_gpio(irq); 136 } 137 138 static int pxa_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 139 { 140 void __iomem *base = gpio_chip_base(chip); 141 uint32_t value, mask = 1 << offset; 142 unsigned long flags; 143 144 spin_lock_irqsave(&gpio_lock, flags); 145 146 value = readl_relaxed(base + GPDR_OFFSET); 147 if (__gpio_is_inverted(chip->base + offset)) 148 value |= mask; 149 else 150 value &= ~mask; 151 writel_relaxed(value, base + GPDR_OFFSET); 152 153 spin_unlock_irqrestore(&gpio_lock, flags); 154 return 0; 155 } 156 157 static int pxa_gpio_direction_output(struct gpio_chip *chip, 158 unsigned offset, int value) 159 { 160 void __iomem *base = gpio_chip_base(chip); 161 uint32_t tmp, mask = 1 << offset; 162 unsigned long flags; 163 164 writel_relaxed(mask, base + (value ? GPSR_OFFSET : GPCR_OFFSET)); 165 166 spin_lock_irqsave(&gpio_lock, flags); 167 168 tmp = readl_relaxed(base + GPDR_OFFSET); 169 if (__gpio_is_inverted(chip->base + offset)) 170 tmp &= ~mask; 171 else 172 tmp |= mask; 173 writel_relaxed(tmp, base + GPDR_OFFSET); 174 175 spin_unlock_irqrestore(&gpio_lock, flags); 176 return 0; 177 } 178 179 static int pxa_gpio_get(struct gpio_chip *chip, unsigned offset) 180 { 181 return readl_relaxed(gpio_chip_base(chip) + GPLR_OFFSET) & (1 << offset); 182 } 183 184 static void pxa_gpio_set(struct gpio_chip *chip, unsigned offset, int value) 185 { 186 writel_relaxed(1 << offset, gpio_chip_base(chip) + 187 (value ? GPSR_OFFSET : GPCR_OFFSET)); 188 } 189 190 static int __init pxa_init_gpio_chip(int gpio_end) 191 { 192 int i, gpio, nbanks = gpio_to_bank(gpio_end) + 1; 193 struct pxa_gpio_chip *chips; 194 195 chips = kzalloc(nbanks * sizeof(struct pxa_gpio_chip), GFP_KERNEL); 196 if (chips == NULL) { 197 pr_err("%s: failed to allocate GPIO chips\n", __func__); 198 return -ENOMEM; 199 } 200 201 for (i = 0, gpio = 0; i < nbanks; i++, gpio += 32) { 202 struct gpio_chip *c = &chips[i].chip; 203 204 sprintf(chips[i].label, "gpio-%d", i); 205 chips[i].regbase = GPIO_BANK(i); 206 207 c->base = gpio; 208 c->label = chips[i].label; 209 210 c->direction_input = pxa_gpio_direction_input; 211 c->direction_output = pxa_gpio_direction_output; 212 c->get = pxa_gpio_get; 213 c->set = pxa_gpio_set; 214 c->to_irq = pxa_gpio_to_irq; 215 216 /* number of GPIOs on last bank may be less than 32 */ 217 c->ngpio = (gpio + 31 > gpio_end) ? (gpio_end - gpio + 1) : 32; 218 gpiochip_add(c); 219 } 220 pxa_gpio_chips = chips; 221 return 0; 222 } 223 224 /* Update only those GRERx and GFERx edge detection register bits if those 225 * bits are set in c->irq_mask 226 */ 227 static inline void update_edge_detect(struct pxa_gpio_chip *c) 228 { 229 uint32_t grer, gfer; 230 231 grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~c->irq_mask; 232 gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~c->irq_mask; 233 grer |= c->irq_edge_rise & c->irq_mask; 234 gfer |= c->irq_edge_fall & c->irq_mask; 235 writel_relaxed(grer, c->regbase + GRER_OFFSET); 236 writel_relaxed(gfer, c->regbase + GFER_OFFSET); 237 } 238 239 static int pxa_gpio_irq_type(struct irq_data *d, unsigned int type) 240 { 241 struct pxa_gpio_chip *c; 242 int gpio = pxa_irq_to_gpio(d->irq); 243 unsigned long gpdr, mask = GPIO_bit(gpio); 244 245 c = gpio_to_pxachip(gpio); 246 247 if (type == IRQ_TYPE_PROBE) { 248 /* Don't mess with enabled GPIOs using preconfigured edges or 249 * GPIOs set to alternate function or to output during probe 250 */ 251 if ((c->irq_edge_rise | c->irq_edge_fall) & GPIO_bit(gpio)) 252 return 0; 253 254 if (__gpio_is_occupied(gpio)) 255 return 0; 256 257 type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 258 } 259 260 gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); 261 262 if (__gpio_is_inverted(gpio)) 263 writel_relaxed(gpdr | mask, c->regbase + GPDR_OFFSET); 264 else 265 writel_relaxed(gpdr & ~mask, c->regbase + GPDR_OFFSET); 266 267 if (type & IRQ_TYPE_EDGE_RISING) 268 c->irq_edge_rise |= mask; 269 else 270 c->irq_edge_rise &= ~mask; 271 272 if (type & IRQ_TYPE_EDGE_FALLING) 273 c->irq_edge_fall |= mask; 274 else 275 c->irq_edge_fall &= ~mask; 276 277 update_edge_detect(c); 278 279 pr_debug("%s: IRQ%d (GPIO%d) - edge%s%s\n", __func__, d->irq, gpio, 280 ((type & IRQ_TYPE_EDGE_RISING) ? " rising" : ""), 281 ((type & IRQ_TYPE_EDGE_FALLING) ? " falling" : "")); 282 return 0; 283 } 284 285 static void pxa_gpio_demux_handler(unsigned int irq, struct irq_desc *desc) 286 { 287 struct pxa_gpio_chip *c; 288 int loop, gpio, gpio_base, n; 289 unsigned long gedr; 290 291 do { 292 loop = 0; 293 for_each_gpio_chip(gpio, c) { 294 gpio_base = c->chip.base; 295 296 gedr = readl_relaxed(c->regbase + GEDR_OFFSET); 297 gedr = gedr & c->irq_mask; 298 writel_relaxed(gedr, c->regbase + GEDR_OFFSET); 299 300 n = find_first_bit(&gedr, BITS_PER_LONG); 301 while (n < BITS_PER_LONG) { 302 loop = 1; 303 304 generic_handle_irq(gpio_to_irq(gpio_base + n)); 305 n = find_next_bit(&gedr, BITS_PER_LONG, n + 1); 306 } 307 } 308 } while (loop); 309 } 310 311 static void pxa_ack_muxed_gpio(struct irq_data *d) 312 { 313 int gpio = pxa_irq_to_gpio(d->irq); 314 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); 315 316 writel_relaxed(GPIO_bit(gpio), c->regbase + GEDR_OFFSET); 317 } 318 319 static void pxa_mask_muxed_gpio(struct irq_data *d) 320 { 321 int gpio = pxa_irq_to_gpio(d->irq); 322 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); 323 uint32_t grer, gfer; 324 325 c->irq_mask &= ~GPIO_bit(gpio); 326 327 grer = readl_relaxed(c->regbase + GRER_OFFSET) & ~GPIO_bit(gpio); 328 gfer = readl_relaxed(c->regbase + GFER_OFFSET) & ~GPIO_bit(gpio); 329 writel_relaxed(grer, c->regbase + GRER_OFFSET); 330 writel_relaxed(gfer, c->regbase + GFER_OFFSET); 331 } 332 333 static void pxa_unmask_muxed_gpio(struct irq_data *d) 334 { 335 int gpio = pxa_irq_to_gpio(d->irq); 336 struct pxa_gpio_chip *c = gpio_to_pxachip(gpio); 337 338 c->irq_mask |= GPIO_bit(gpio); 339 update_edge_detect(c); 340 } 341 342 static struct irq_chip pxa_muxed_gpio_chip = { 343 .name = "GPIO", 344 .irq_ack = pxa_ack_muxed_gpio, 345 .irq_mask = pxa_mask_muxed_gpio, 346 .irq_unmask = pxa_unmask_muxed_gpio, 347 .irq_set_type = pxa_gpio_irq_type, 348 }; 349 350 static int pxa_gpio_nums(void) 351 { 352 int count = 0; 353 354 #ifdef CONFIG_ARCH_PXA 355 if (cpu_is_pxa25x()) { 356 #ifdef CONFIG_CPU_PXA26x 357 count = 89; 358 gpio_type = PXA26X_GPIO; 359 #elif defined(CONFIG_PXA25x) 360 count = 84; 361 gpio_type = PXA26X_GPIO; 362 #endif /* CONFIG_CPU_PXA26x */ 363 } else if (cpu_is_pxa27x()) { 364 count = 120; 365 gpio_type = PXA27X_GPIO; 366 } else if (cpu_is_pxa93x() || cpu_is_pxa95x()) { 367 count = 191; 368 gpio_type = PXA93X_GPIO; 369 } else if (cpu_is_pxa3xx()) { 370 count = 127; 371 gpio_type = PXA3XX_GPIO; 372 } 373 #endif /* CONFIG_ARCH_PXA */ 374 375 #ifdef CONFIG_ARCH_MMP 376 if (cpu_is_pxa168() || cpu_is_pxa910()) { 377 count = 127; 378 gpio_type = MMP_GPIO; 379 } else if (cpu_is_mmp2()) { 380 count = 191; 381 gpio_type = MMP2_GPIO; 382 } 383 #endif /* CONFIG_ARCH_MMP */ 384 return count; 385 } 386 387 void __init pxa_init_gpio(int mux_irq, int start, int end, set_wake_t fn) 388 { 389 struct pxa_gpio_chip *c; 390 int gpio, irq; 391 392 pxa_last_gpio = pxa_gpio_nums(); 393 if (!pxa_last_gpio) 394 return; 395 396 /* Initialize GPIO chips */ 397 pxa_init_gpio_chip(end); 398 399 /* clear all GPIO edge detects */ 400 for_each_gpio_chip(gpio, c) { 401 writel_relaxed(0, c->regbase + GFER_OFFSET); 402 writel_relaxed(0, c->regbase + GRER_OFFSET); 403 writel_relaxed(~0,c->regbase + GEDR_OFFSET); 404 } 405 406 #ifdef CONFIG_ARCH_PXA 407 irq = gpio_to_irq(0); 408 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, 409 handle_edge_irq); 410 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 411 irq_set_chained_handler(IRQ_GPIO0, pxa_gpio_demux_handler); 412 413 irq = gpio_to_irq(1); 414 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, 415 handle_edge_irq); 416 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 417 irq_set_chained_handler(IRQ_GPIO1, pxa_gpio_demux_handler); 418 #endif 419 420 for (irq = gpio_to_irq(start); irq <= gpio_to_irq(end); irq++) { 421 irq_set_chip_and_handler(irq, &pxa_muxed_gpio_chip, 422 handle_edge_irq); 423 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); 424 } 425 426 /* Install handler for GPIO>=2 edge detect interrupts */ 427 irq_set_chained_handler(mux_irq, pxa_gpio_demux_handler); 428 pxa_muxed_gpio_chip.irq_set_wake = fn; 429 } 430 431 #ifdef CONFIG_PM 432 static int pxa_gpio_suspend(void) 433 { 434 struct pxa_gpio_chip *c; 435 int gpio; 436 437 for_each_gpio_chip(gpio, c) { 438 c->saved_gplr = readl_relaxed(c->regbase + GPLR_OFFSET); 439 c->saved_gpdr = readl_relaxed(c->regbase + GPDR_OFFSET); 440 c->saved_grer = readl_relaxed(c->regbase + GRER_OFFSET); 441 c->saved_gfer = readl_relaxed(c->regbase + GFER_OFFSET); 442 443 /* Clear GPIO transition detect bits */ 444 writel_relaxed(0xffffffff, c->regbase + GEDR_OFFSET); 445 } 446 return 0; 447 } 448 449 static void pxa_gpio_resume(void) 450 { 451 struct pxa_gpio_chip *c; 452 int gpio; 453 454 for_each_gpio_chip(gpio, c) { 455 /* restore level with set/clear */ 456 writel_relaxed( c->saved_gplr, c->regbase + GPSR_OFFSET); 457 writel_relaxed(~c->saved_gplr, c->regbase + GPCR_OFFSET); 458 459 writel_relaxed(c->saved_grer, c->regbase + GRER_OFFSET); 460 writel_relaxed(c->saved_gfer, c->regbase + GFER_OFFSET); 461 writel_relaxed(c->saved_gpdr, c->regbase + GPDR_OFFSET); 462 } 463 } 464 #else 465 #define pxa_gpio_suspend NULL 466 #define pxa_gpio_resume NULL 467 #endif 468 469 struct syscore_ops pxa_gpio_syscore_ops = { 470 .suspend = pxa_gpio_suspend, 471 .resume = pxa_gpio_resume, 472 }; 473