1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/drivers/mfd/ucb1x00-core.c 4 * 5 * Copyright (C) 2001 Russell King, All Rights Reserved. 6 * 7 * The UCB1x00 core driver provides basic services for handling IO, 8 * the ADC, interrupts, and accessing registers. It is designed 9 * such that everything goes through this layer, thereby providing 10 * a consistent locking methodology, as well as allowing the drivers 11 * to be used on other non-MCP-enabled hardware platforms. 12 * 13 * Note that all locks are private to this file. Nothing else may 14 * touch them. 15 */ 16 #include <linux/module.h> 17 #include <linux/kernel.h> 18 #include <linux/sched.h> 19 #include <linux/slab.h> 20 #include <linux/init.h> 21 #include <linux/errno.h> 22 #include <linux/interrupt.h> 23 #include <linux/irq.h> 24 #include <linux/device.h> 25 #include <linux/mutex.h> 26 #include <linux/mfd/ucb1x00.h> 27 #include <linux/pm.h> 28 #include <linux/gpio/driver.h> 29 30 static DEFINE_MUTEX(ucb1x00_mutex); 31 static LIST_HEAD(ucb1x00_drivers); 32 static LIST_HEAD(ucb1x00_devices); 33 34 /** 35 * ucb1x00_io_set_dir - set IO direction 36 * @ucb: UCB1x00 structure describing chip 37 * @in: bitfield of IO pins to be set as inputs 38 * @out: bitfield of IO pins to be set as outputs 39 * 40 * Set the IO direction of the ten general purpose IO pins on 41 * the UCB1x00 chip. The @in bitfield has priority over the 42 * @out bitfield, in that if you specify a pin as both input 43 * and output, it will end up as an input. 44 * 45 * ucb1x00_enable must have been called to enable the comms 46 * before using this function. 47 * 48 * This function takes a spinlock, disabling interrupts. 49 */ 50 void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out) 51 { 52 unsigned long flags; 53 54 spin_lock_irqsave(&ucb->io_lock, flags); 55 ucb->io_dir |= out; 56 ucb->io_dir &= ~in; 57 58 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 59 spin_unlock_irqrestore(&ucb->io_lock, flags); 60 } 61 62 /** 63 * ucb1x00_io_write - set or clear IO outputs 64 * @ucb: UCB1x00 structure describing chip 65 * @set: bitfield of IO pins to set to logic '1' 66 * @clear: bitfield of IO pins to set to logic '0' 67 * 68 * Set the IO output state of the specified IO pins. The value 69 * is retained if the pins are subsequently configured as inputs. 70 * The @clear bitfield has priority over the @set bitfield - 71 * outputs will be cleared. 72 * 73 * ucb1x00_enable must have been called to enable the comms 74 * before using this function. 75 * 76 * This function takes a spinlock, disabling interrupts. 77 */ 78 void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear) 79 { 80 unsigned long flags; 81 82 spin_lock_irqsave(&ucb->io_lock, flags); 83 ucb->io_out |= set; 84 ucb->io_out &= ~clear; 85 86 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 87 spin_unlock_irqrestore(&ucb->io_lock, flags); 88 } 89 90 /** 91 * ucb1x00_io_read - read the current state of the IO pins 92 * @ucb: UCB1x00 structure describing chip 93 * 94 * Return a bitfield describing the logic state of the ten 95 * general purpose IO pins. 96 * 97 * ucb1x00_enable must have been called to enable the comms 98 * before using this function. 99 * 100 * This function does not take any mutexes or spinlocks. 101 */ 102 unsigned int ucb1x00_io_read(struct ucb1x00 *ucb) 103 { 104 return ucb1x00_reg_read(ucb, UCB_IO_DATA); 105 } 106 107 static int ucb1x00_gpio_set(struct gpio_chip *chip, unsigned int offset, 108 int value) 109 { 110 struct ucb1x00 *ucb = gpiochip_get_data(chip); 111 unsigned long flags; 112 113 spin_lock_irqsave(&ucb->io_lock, flags); 114 if (value) 115 ucb->io_out |= 1 << offset; 116 else 117 ucb->io_out &= ~(1 << offset); 118 119 ucb1x00_enable(ucb); 120 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 121 ucb1x00_disable(ucb); 122 spin_unlock_irqrestore(&ucb->io_lock, flags); 123 124 return 0; 125 } 126 127 static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset) 128 { 129 struct ucb1x00 *ucb = gpiochip_get_data(chip); 130 unsigned val; 131 132 ucb1x00_enable(ucb); 133 val = ucb1x00_reg_read(ucb, UCB_IO_DATA); 134 ucb1x00_disable(ucb); 135 136 return !!(val & (1 << offset)); 137 } 138 139 static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset) 140 { 141 struct ucb1x00 *ucb = gpiochip_get_data(chip); 142 unsigned long flags; 143 144 spin_lock_irqsave(&ucb->io_lock, flags); 145 ucb->io_dir &= ~(1 << offset); 146 ucb1x00_enable(ucb); 147 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 148 ucb1x00_disable(ucb); 149 spin_unlock_irqrestore(&ucb->io_lock, flags); 150 151 return 0; 152 } 153 154 static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset 155 , int value) 156 { 157 struct ucb1x00 *ucb = gpiochip_get_data(chip); 158 unsigned long flags; 159 unsigned old, mask = 1 << offset; 160 161 spin_lock_irqsave(&ucb->io_lock, flags); 162 old = ucb->io_out; 163 if (value) 164 ucb->io_out |= mask; 165 else 166 ucb->io_out &= ~mask; 167 168 ucb1x00_enable(ucb); 169 if (old != ucb->io_out) 170 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 171 172 if (!(ucb->io_dir & mask)) { 173 ucb->io_dir |= mask; 174 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 175 } 176 ucb1x00_disable(ucb); 177 spin_unlock_irqrestore(&ucb->io_lock, flags); 178 179 return 0; 180 } 181 182 static int ucb1x00_to_irq(struct gpio_chip *chip, unsigned offset) 183 { 184 struct ucb1x00 *ucb = gpiochip_get_data(chip); 185 186 return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO; 187 } 188 189 /* 190 * UCB1300 data sheet says we must: 191 * 1. enable ADC => 5us (including reference startup time) 192 * 2. select input => 51*tsibclk => 4.3us 193 * 3. start conversion => 102*tsibclk => 8.5us 194 * (tsibclk = 1/11981000) 195 * Period between SIB 128-bit frames = 10.7us 196 */ 197 198 /** 199 * ucb1x00_adc_enable - enable the ADC converter 200 * @ucb: UCB1x00 structure describing chip 201 * 202 * Enable the ucb1x00 and ADC converter on the UCB1x00 for use. 203 * Any code wishing to use the ADC converter must call this 204 * function prior to using it. 205 * 206 * This function takes the ADC mutex to prevent two or more 207 * concurrent uses, and therefore may sleep. As a result, it 208 * can only be called from process context, not interrupt 209 * context. 210 * 211 * You should release the ADC as soon as possible using 212 * ucb1x00_adc_disable. 213 */ 214 void ucb1x00_adc_enable(struct ucb1x00 *ucb) 215 { 216 mutex_lock(&ucb->adc_mutex); 217 218 ucb->adc_cr |= UCB_ADC_ENA; 219 220 ucb1x00_enable(ucb); 221 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); 222 } 223 224 /** 225 * ucb1x00_adc_read - read the specified ADC channel 226 * @ucb: UCB1x00 structure describing chip 227 * @adc_channel: ADC channel mask 228 * @sync: wait for syncronisation pulse. 229 * 230 * Start an ADC conversion and wait for the result. Note that 231 * synchronised ADC conversions (via the ADCSYNC pin) must wait 232 * until the trigger is asserted and the conversion is finished. 233 * 234 * This function currently spins waiting for the conversion to 235 * complete (2 frames max without sync). 236 * 237 * If called for a synchronised ADC conversion, it may sleep 238 * with the ADC mutex held. 239 */ 240 unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) 241 { 242 unsigned int val; 243 244 if (sync) 245 adc_channel |= UCB_ADC_SYNC_ENA; 246 247 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel); 248 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START); 249 250 for (;;) { 251 val = ucb1x00_reg_read(ucb, UCB_ADC_DATA); 252 if (val & UCB_ADC_DAT_VAL) 253 break; 254 /* yield to other processes */ 255 set_current_state(TASK_INTERRUPTIBLE); 256 schedule_timeout(1); 257 } 258 259 return UCB_ADC_DAT(val); 260 } 261 262 /** 263 * ucb1x00_adc_disable - disable the ADC converter 264 * @ucb: UCB1x00 structure describing chip 265 * 266 * Disable the ADC converter and release the ADC mutex. 267 */ 268 void ucb1x00_adc_disable(struct ucb1x00 *ucb) 269 { 270 ucb->adc_cr &= ~UCB_ADC_ENA; 271 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); 272 ucb1x00_disable(ucb); 273 274 mutex_unlock(&ucb->adc_mutex); 275 } 276 277 /* 278 * UCB1x00 Interrupt handling. 279 * 280 * The UCB1x00 can generate interrupts when the SIBCLK is stopped. 281 * Since we need to read an internal register, we must re-enable 282 * SIBCLK to talk to the chip. We leave the clock running until 283 * we have finished processing all interrupts from the chip. 284 */ 285 static void ucb1x00_irq(struct irq_desc *desc) 286 { 287 struct ucb1x00 *ucb = irq_desc_get_handler_data(desc); 288 unsigned int isr, i; 289 290 ucb1x00_enable(ucb); 291 isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS); 292 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr); 293 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); 294 295 for (i = 0; i < 16 && isr; i++, isr >>= 1) 296 if (isr & 1) 297 generic_handle_irq(ucb->irq_base + i); 298 ucb1x00_disable(ucb); 299 } 300 301 static void ucb1x00_irq_update(struct ucb1x00 *ucb, unsigned mask) 302 { 303 ucb1x00_enable(ucb); 304 if (ucb->irq_ris_enbl & mask) 305 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & 306 ucb->irq_mask); 307 if (ucb->irq_fal_enbl & mask) 308 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & 309 ucb->irq_mask); 310 ucb1x00_disable(ucb); 311 } 312 313 static void ucb1x00_irq_noop(struct irq_data *data) 314 { 315 } 316 317 static void ucb1x00_irq_mask(struct irq_data *data) 318 { 319 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); 320 unsigned mask = 1 << (data->irq - ucb->irq_base); 321 322 raw_spin_lock(&ucb->irq_lock); 323 ucb->irq_mask &= ~mask; 324 ucb1x00_irq_update(ucb, mask); 325 raw_spin_unlock(&ucb->irq_lock); 326 } 327 328 static void ucb1x00_irq_unmask(struct irq_data *data) 329 { 330 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); 331 unsigned mask = 1 << (data->irq - ucb->irq_base); 332 333 raw_spin_lock(&ucb->irq_lock); 334 ucb->irq_mask |= mask; 335 ucb1x00_irq_update(ucb, mask); 336 raw_spin_unlock(&ucb->irq_lock); 337 } 338 339 static int ucb1x00_irq_set_type(struct irq_data *data, unsigned int type) 340 { 341 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); 342 unsigned mask = 1 << (data->irq - ucb->irq_base); 343 344 raw_spin_lock(&ucb->irq_lock); 345 if (type & IRQ_TYPE_EDGE_RISING) 346 ucb->irq_ris_enbl |= mask; 347 else 348 ucb->irq_ris_enbl &= ~mask; 349 350 if (type & IRQ_TYPE_EDGE_FALLING) 351 ucb->irq_fal_enbl |= mask; 352 else 353 ucb->irq_fal_enbl &= ~mask; 354 if (ucb->irq_mask & mask) { 355 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & 356 ucb->irq_mask); 357 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & 358 ucb->irq_mask); 359 } 360 raw_spin_unlock(&ucb->irq_lock); 361 362 return 0; 363 } 364 365 static int ucb1x00_irq_set_wake(struct irq_data *data, unsigned int on) 366 { 367 struct ucb1x00 *ucb = irq_data_get_irq_chip_data(data); 368 struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data; 369 unsigned mask = 1 << (data->irq - ucb->irq_base); 370 371 if (!pdata || !pdata->can_wakeup) 372 return -EINVAL; 373 374 raw_spin_lock(&ucb->irq_lock); 375 if (on) 376 ucb->irq_wake |= mask; 377 else 378 ucb->irq_wake &= ~mask; 379 raw_spin_unlock(&ucb->irq_lock); 380 381 return 0; 382 } 383 384 static struct irq_chip ucb1x00_irqchip = { 385 .name = "ucb1x00", 386 .irq_ack = ucb1x00_irq_noop, 387 .irq_mask = ucb1x00_irq_mask, 388 .irq_unmask = ucb1x00_irq_unmask, 389 .irq_set_type = ucb1x00_irq_set_type, 390 .irq_set_wake = ucb1x00_irq_set_wake, 391 }; 392 393 static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv) 394 { 395 struct ucb1x00_dev *dev; 396 int ret; 397 398 dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL); 399 if (!dev) 400 return -ENOMEM; 401 402 dev->ucb = ucb; 403 dev->drv = drv; 404 405 ret = drv->add(dev); 406 if (ret) { 407 kfree(dev); 408 return ret; 409 } 410 411 list_add_tail(&dev->dev_node, &ucb->devs); 412 list_add_tail(&dev->drv_node, &drv->devs); 413 414 return ret; 415 } 416 417 static void ucb1x00_remove_dev(struct ucb1x00_dev *dev) 418 { 419 dev->drv->remove(dev); 420 list_del(&dev->dev_node); 421 list_del(&dev->drv_node); 422 kfree(dev); 423 } 424 425 /* 426 * Try to probe our interrupt, rather than relying on lots of 427 * hard-coded machine dependencies. For reference, the expected 428 * IRQ mappings are: 429 * 430 * Machine Default IRQ 431 * adsbitsy IRQ_GPCIN4 432 * cerf IRQ_GPIO_UCB1200_IRQ 433 * flexanet IRQ_GPIO_GUI 434 * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ 435 * graphicsclient ADS_EXT_IRQ(8) 436 * graphicsmaster ADS_EXT_IRQ(8) 437 * lart LART_IRQ_UCB1200 438 * omnimeter IRQ_GPIO23 439 * pfs168 IRQ_GPIO_UCB1300_IRQ 440 * simpad IRQ_GPIO_UCB1300_IRQ 441 * shannon SHANNON_IRQ_GPIO_IRQ_CODEC 442 * yopy IRQ_GPIO_UCB1200_IRQ 443 */ 444 static int ucb1x00_detect_irq(struct ucb1x00 *ucb) 445 { 446 unsigned long mask; 447 448 mask = probe_irq_on(); 449 450 /* 451 * Enable the ADC interrupt. 452 */ 453 ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC); 454 ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC); 455 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff); 456 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); 457 458 /* 459 * Cause an ADC interrupt. 460 */ 461 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA); 462 ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START); 463 464 /* 465 * Wait for the conversion to complete. 466 */ 467 while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0); 468 ucb1x00_reg_write(ucb, UCB_ADC_CR, 0); 469 470 /* 471 * Disable and clear interrupt. 472 */ 473 ucb1x00_reg_write(ucb, UCB_IE_RIS, 0); 474 ucb1x00_reg_write(ucb, UCB_IE_FAL, 0); 475 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff); 476 ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); 477 478 /* 479 * Read triggered interrupt. 480 */ 481 return probe_irq_off(mask); 482 } 483 484 static void ucb1x00_release(struct device *dev) 485 { 486 struct ucb1x00 *ucb = classdev_to_ucb1x00(dev); 487 kfree(ucb); 488 } 489 490 static struct class ucb1x00_class = { 491 .name = "ucb1x00", 492 .dev_release = ucb1x00_release, 493 }; 494 495 static int ucb1x00_probe(struct mcp *mcp) 496 { 497 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data; 498 struct ucb1x00_driver *drv; 499 struct ucb1x00 *ucb; 500 unsigned id, i, irq_base; 501 int ret = -ENODEV; 502 503 /* Tell the platform to deassert the UCB1x00 reset */ 504 if (pdata && pdata->reset) 505 pdata->reset(UCB_RST_PROBE); 506 507 mcp_enable(mcp); 508 id = mcp_reg_read(mcp, UCB_ID); 509 mcp_disable(mcp); 510 511 if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) { 512 printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id); 513 goto out; 514 } 515 516 ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL); 517 ret = -ENOMEM; 518 if (!ucb) 519 goto out; 520 521 device_initialize(&ucb->dev); 522 ucb->dev.class = &ucb1x00_class; 523 ucb->dev.parent = &mcp->attached_device; 524 dev_set_name(&ucb->dev, "ucb1x00"); 525 526 raw_spin_lock_init(&ucb->irq_lock); 527 spin_lock_init(&ucb->io_lock); 528 mutex_init(&ucb->adc_mutex); 529 530 ucb->id = id; 531 ucb->mcp = mcp; 532 533 ret = device_add(&ucb->dev); 534 if (ret) 535 goto err_dev_add; 536 537 ucb1x00_enable(ucb); 538 ucb->irq = ucb1x00_detect_irq(ucb); 539 ucb1x00_disable(ucb); 540 if (!ucb->irq) { 541 dev_err(&ucb->dev, "IRQ probe failed\n"); 542 ret = -ENODEV; 543 goto err_no_irq; 544 } 545 546 ucb->gpio.base = -1; 547 irq_base = pdata ? pdata->irq_base : 0; 548 ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1); 549 if (ucb->irq_base < 0) { 550 dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n", 551 ucb->irq_base); 552 ret = ucb->irq_base; 553 goto err_irq_alloc; 554 } 555 556 for (i = 0; i < 16; i++) { 557 unsigned irq = ucb->irq_base + i; 558 559 irq_set_chip_and_handler(irq, &ucb1x00_irqchip, handle_edge_irq); 560 irq_set_chip_data(irq, ucb); 561 irq_clear_status_flags(irq, IRQ_NOREQUEST); 562 } 563 564 irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING); 565 irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb); 566 567 if (pdata && pdata->gpio_base) { 568 ucb->gpio.label = dev_name(&ucb->dev); 569 ucb->gpio.parent = &ucb->dev; 570 ucb->gpio.owner = THIS_MODULE; 571 ucb->gpio.base = pdata->gpio_base; 572 ucb->gpio.ngpio = 10; 573 ucb->gpio.set_rv = ucb1x00_gpio_set; 574 ucb->gpio.get = ucb1x00_gpio_get; 575 ucb->gpio.direction_input = ucb1x00_gpio_direction_input; 576 ucb->gpio.direction_output = ucb1x00_gpio_direction_output; 577 ucb->gpio.to_irq = ucb1x00_to_irq; 578 ret = gpiochip_add_data(&ucb->gpio, ucb); 579 if (ret) 580 goto err_gpio_add; 581 } else 582 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); 583 584 mcp_set_drvdata(mcp, ucb); 585 586 if (pdata) 587 device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup); 588 589 INIT_LIST_HEAD(&ucb->devs); 590 mutex_lock(&ucb1x00_mutex); 591 list_add_tail(&ucb->node, &ucb1x00_devices); 592 list_for_each_entry(drv, &ucb1x00_drivers, node) { 593 ucb1x00_add_dev(ucb, drv); 594 } 595 mutex_unlock(&ucb1x00_mutex); 596 597 return ret; 598 599 err_gpio_add: 600 irq_set_chained_handler(ucb->irq, NULL); 601 err_irq_alloc: 602 if (ucb->irq_base > 0) 603 irq_free_descs(ucb->irq_base, 16); 604 err_no_irq: 605 device_del(&ucb->dev); 606 err_dev_add: 607 put_device(&ucb->dev); 608 out: 609 if (pdata && pdata->reset) 610 pdata->reset(UCB_RST_PROBE_FAIL); 611 return ret; 612 } 613 614 static void ucb1x00_remove(struct mcp *mcp) 615 { 616 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data; 617 struct ucb1x00 *ucb = mcp_get_drvdata(mcp); 618 struct list_head *l, *n; 619 620 mutex_lock(&ucb1x00_mutex); 621 list_del(&ucb->node); 622 list_for_each_safe(l, n, &ucb->devs) { 623 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node); 624 ucb1x00_remove_dev(dev); 625 } 626 mutex_unlock(&ucb1x00_mutex); 627 628 if (ucb->gpio.base != -1) 629 gpiochip_remove(&ucb->gpio); 630 631 irq_set_chained_handler(ucb->irq, NULL); 632 irq_free_descs(ucb->irq_base, 16); 633 device_unregister(&ucb->dev); 634 635 if (pdata && pdata->reset) 636 pdata->reset(UCB_RST_REMOVE); 637 } 638 639 int ucb1x00_register_driver(struct ucb1x00_driver *drv) 640 { 641 struct ucb1x00 *ucb; 642 643 INIT_LIST_HEAD(&drv->devs); 644 mutex_lock(&ucb1x00_mutex); 645 list_add_tail(&drv->node, &ucb1x00_drivers); 646 list_for_each_entry(ucb, &ucb1x00_devices, node) { 647 ucb1x00_add_dev(ucb, drv); 648 } 649 mutex_unlock(&ucb1x00_mutex); 650 return 0; 651 } 652 653 void ucb1x00_unregister_driver(struct ucb1x00_driver *drv) 654 { 655 struct list_head *n, *l; 656 657 mutex_lock(&ucb1x00_mutex); 658 list_del(&drv->node); 659 list_for_each_safe(l, n, &drv->devs) { 660 struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node); 661 ucb1x00_remove_dev(dev); 662 } 663 mutex_unlock(&ucb1x00_mutex); 664 } 665 666 static int ucb1x00_suspend(struct device *dev) 667 { 668 struct ucb1x00_plat_data *pdata = dev_get_platdata(dev); 669 struct ucb1x00 *ucb = dev_get_drvdata(dev); 670 struct ucb1x00_dev *udev; 671 672 mutex_lock(&ucb1x00_mutex); 673 list_for_each_entry(udev, &ucb->devs, dev_node) { 674 if (udev->drv->suspend) 675 udev->drv->suspend(udev); 676 } 677 mutex_unlock(&ucb1x00_mutex); 678 679 if (ucb->irq_wake) { 680 unsigned long flags; 681 682 raw_spin_lock_irqsave(&ucb->irq_lock, flags); 683 ucb1x00_enable(ucb); 684 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & 685 ucb->irq_wake); 686 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & 687 ucb->irq_wake); 688 ucb1x00_disable(ucb); 689 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags); 690 691 enable_irq_wake(ucb->irq); 692 } else if (pdata && pdata->reset) 693 pdata->reset(UCB_RST_SUSPEND); 694 695 return 0; 696 } 697 698 static int ucb1x00_resume(struct device *dev) 699 { 700 struct ucb1x00_plat_data *pdata = dev_get_platdata(dev); 701 struct ucb1x00 *ucb = dev_get_drvdata(dev); 702 struct ucb1x00_dev *udev; 703 704 if (!ucb->irq_wake && pdata && pdata->reset) 705 pdata->reset(UCB_RST_RESUME); 706 707 ucb1x00_enable(ucb); 708 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); 709 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); 710 711 if (ucb->irq_wake) { 712 unsigned long flags; 713 714 raw_spin_lock_irqsave(&ucb->irq_lock, flags); 715 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & 716 ucb->irq_mask); 717 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & 718 ucb->irq_mask); 719 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags); 720 721 disable_irq_wake(ucb->irq); 722 } 723 ucb1x00_disable(ucb); 724 725 mutex_lock(&ucb1x00_mutex); 726 list_for_each_entry(udev, &ucb->devs, dev_node) { 727 if (udev->drv->resume) 728 udev->drv->resume(udev); 729 } 730 mutex_unlock(&ucb1x00_mutex); 731 return 0; 732 } 733 734 static DEFINE_SIMPLE_DEV_PM_OPS(ucb1x00_pm_ops, 735 ucb1x00_suspend, ucb1x00_resume); 736 737 static struct mcp_driver ucb1x00_driver = { 738 .drv = { 739 .name = "ucb1x00", 740 .owner = THIS_MODULE, 741 .pm = pm_sleep_ptr(&ucb1x00_pm_ops), 742 }, 743 .probe = ucb1x00_probe, 744 .remove = ucb1x00_remove, 745 }; 746 747 static int __init ucb1x00_init(void) 748 { 749 int ret = class_register(&ucb1x00_class); 750 if (ret == 0) { 751 ret = mcp_driver_register(&ucb1x00_driver); 752 if (ret) 753 class_unregister(&ucb1x00_class); 754 } 755 return ret; 756 } 757 758 static void __exit ucb1x00_exit(void) 759 { 760 mcp_driver_unregister(&ucb1x00_driver); 761 class_unregister(&ucb1x00_class); 762 } 763 764 module_init(ucb1x00_init); 765 module_exit(ucb1x00_exit); 766 767 EXPORT_SYMBOL(ucb1x00_io_set_dir); 768 EXPORT_SYMBOL(ucb1x00_io_write); 769 EXPORT_SYMBOL(ucb1x00_io_read); 770 771 EXPORT_SYMBOL(ucb1x00_adc_enable); 772 EXPORT_SYMBOL(ucb1x00_adc_read); 773 EXPORT_SYMBOL(ucb1x00_adc_disable); 774 775 EXPORT_SYMBOL(ucb1x00_register_driver); 776 EXPORT_SYMBOL(ucb1x00_unregister_driver); 777 778 MODULE_ALIAS("mcp:ucb1x00"); 779 MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 780 MODULE_DESCRIPTION("UCB1x00 core driver"); 781 MODULE_LICENSE("GPL"); 782