Lines Matching +full:sync +full:- +full:pins

1 // SPDX-License-Identifier: GPL-2.0-only
3 * linux/drivers/mfd/ucb1x00-core.c
11 * to be used on other non-MCP-enabled hardware platforms.
35 * ucb1x00_io_set_dir - set IO direction
37 * @in: bitfield of IO pins to be set as inputs
38 * @out: bitfield of IO pins to be set as outputs
40 * Set the IO direction of the ten general purpose IO pins on
54 spin_lock_irqsave(&ucb->io_lock, flags); in ucb1x00_io_set_dir()
55 ucb->io_dir |= out; in ucb1x00_io_set_dir()
56 ucb->io_dir &= ~in; in ucb1x00_io_set_dir()
58 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); in ucb1x00_io_set_dir()
59 spin_unlock_irqrestore(&ucb->io_lock, flags); in ucb1x00_io_set_dir()
63 * ucb1x00_io_write - set or clear IO outputs
65 * @set: bitfield of IO pins to set to logic '1'
66 * @clear: bitfield of IO pins to set to logic '0'
68 * Set the IO output state of the specified IO pins. The value
69 * is retained if the pins are subsequently configured as inputs.
70 * The @clear bitfield has priority over the @set bitfield -
82 spin_lock_irqsave(&ucb->io_lock, flags); in ucb1x00_io_write()
83 ucb->io_out |= set; in ucb1x00_io_write()
84 ucb->io_out &= ~clear; in ucb1x00_io_write()
86 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); in ucb1x00_io_write()
87 spin_unlock_irqrestore(&ucb->io_lock, flags); in ucb1x00_io_write()
91 * ucb1x00_io_read - read the current state of the IO pins
95 * general purpose IO pins.
113 spin_lock_irqsave(&ucb->io_lock, flags); in ucb1x00_gpio_set()
115 ucb->io_out |= 1 << offset; in ucb1x00_gpio_set()
117 ucb->io_out &= ~(1 << offset); in ucb1x00_gpio_set()
120 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); in ucb1x00_gpio_set()
122 spin_unlock_irqrestore(&ucb->io_lock, flags); in ucb1x00_gpio_set()
144 spin_lock_irqsave(&ucb->io_lock, flags); in ucb1x00_gpio_direction_input()
145 ucb->io_dir &= ~(1 << offset); in ucb1x00_gpio_direction_input()
147 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); in ucb1x00_gpio_direction_input()
149 spin_unlock_irqrestore(&ucb->io_lock, flags); in ucb1x00_gpio_direction_input()
161 spin_lock_irqsave(&ucb->io_lock, flags); in ucb1x00_gpio_direction_output()
162 old = ucb->io_out; in ucb1x00_gpio_direction_output()
164 ucb->io_out |= mask; in ucb1x00_gpio_direction_output()
166 ucb->io_out &= ~mask; in ucb1x00_gpio_direction_output()
169 if (old != ucb->io_out) in ucb1x00_gpio_direction_output()
170 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); in ucb1x00_gpio_direction_output()
172 if (!(ucb->io_dir & mask)) { in ucb1x00_gpio_direction_output()
173 ucb->io_dir |= mask; in ucb1x00_gpio_direction_output()
174 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); in ucb1x00_gpio_direction_output()
177 spin_unlock_irqrestore(&ucb->io_lock, flags); in ucb1x00_gpio_direction_output()
186 return ucb->irq_base > 0 ? ucb->irq_base + offset : -ENXIO; in ucb1x00_to_irq()
195 * Period between SIB 128-bit frames = 10.7us
199 * ucb1x00_adc_enable - enable the ADC converter
216 mutex_lock(&ucb->adc_mutex); in ucb1x00_adc_enable()
218 ucb->adc_cr |= UCB_ADC_ENA; in ucb1x00_adc_enable()
221 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); in ucb1x00_adc_enable()
225 * ucb1x00_adc_read - read the specified ADC channel
228 * @sync: wait for syncronisation pulse.
235 * complete (2 frames max without sync).
240 unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) in ucb1x00_adc_read() argument
244 if (sync) in ucb1x00_adc_read()
247 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel); in ucb1x00_adc_read()
248 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START); in ucb1x00_adc_read()
263 * ucb1x00_adc_disable - disable the ADC converter
270 ucb->adc_cr &= ~UCB_ADC_ENA; in ucb1x00_adc_disable()
271 ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); in ucb1x00_adc_disable()
274 mutex_unlock(&ucb->adc_mutex); in ucb1x00_adc_disable()
281 * Since we need to read an internal register, we must re-enable
297 generic_handle_irq(ucb->irq_base + i); in ucb1x00_irq()
304 if (ucb->irq_ris_enbl & mask) in ucb1x00_irq_update()
305 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & in ucb1x00_irq_update()
306 ucb->irq_mask); in ucb1x00_irq_update()
307 if (ucb->irq_fal_enbl & mask) in ucb1x00_irq_update()
308 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & in ucb1x00_irq_update()
309 ucb->irq_mask); in ucb1x00_irq_update()
320 unsigned mask = 1 << (data->irq - ucb->irq_base); in ucb1x00_irq_mask()
322 raw_spin_lock(&ucb->irq_lock); in ucb1x00_irq_mask()
323 ucb->irq_mask &= ~mask; in ucb1x00_irq_mask()
325 raw_spin_unlock(&ucb->irq_lock); in ucb1x00_irq_mask()
331 unsigned mask = 1 << (data->irq - ucb->irq_base); in ucb1x00_irq_unmask()
333 raw_spin_lock(&ucb->irq_lock); in ucb1x00_irq_unmask()
334 ucb->irq_mask |= mask; in ucb1x00_irq_unmask()
336 raw_spin_unlock(&ucb->irq_lock); in ucb1x00_irq_unmask()
342 unsigned mask = 1 << (data->irq - ucb->irq_base); in ucb1x00_irq_set_type()
344 raw_spin_lock(&ucb->irq_lock); in ucb1x00_irq_set_type()
346 ucb->irq_ris_enbl |= mask; in ucb1x00_irq_set_type()
348 ucb->irq_ris_enbl &= ~mask; in ucb1x00_irq_set_type()
351 ucb->irq_fal_enbl |= mask; in ucb1x00_irq_set_type()
353 ucb->irq_fal_enbl &= ~mask; in ucb1x00_irq_set_type()
354 if (ucb->irq_mask & mask) { in ucb1x00_irq_set_type()
355 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & in ucb1x00_irq_set_type()
356 ucb->irq_mask); in ucb1x00_irq_set_type()
357 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & in ucb1x00_irq_set_type()
358 ucb->irq_mask); in ucb1x00_irq_set_type()
360 raw_spin_unlock(&ucb->irq_lock); in ucb1x00_irq_set_type()
368 struct ucb1x00_plat_data *pdata = ucb->mcp->attached_device.platform_data; in ucb1x00_irq_set_wake()
369 unsigned mask = 1 << (data->irq - ucb->irq_base); in ucb1x00_irq_set_wake()
371 if (!pdata || !pdata->can_wakeup) in ucb1x00_irq_set_wake()
372 return -EINVAL; in ucb1x00_irq_set_wake()
374 raw_spin_lock(&ucb->irq_lock); in ucb1x00_irq_set_wake()
376 ucb->irq_wake |= mask; in ucb1x00_irq_set_wake()
378 ucb->irq_wake &= ~mask; in ucb1x00_irq_set_wake()
379 raw_spin_unlock(&ucb->irq_lock); in ucb1x00_irq_set_wake()
400 return -ENOMEM; in ucb1x00_add_dev()
402 dev->ucb = ucb; in ucb1x00_add_dev()
403 dev->drv = drv; in ucb1x00_add_dev()
405 ret = drv->add(dev); in ucb1x00_add_dev()
411 list_add_tail(&dev->dev_node, &ucb->devs); in ucb1x00_add_dev()
412 list_add_tail(&dev->drv_node, &drv->devs); in ucb1x00_add_dev()
419 dev->drv->remove(dev); in ucb1x00_remove_dev()
420 list_del(&dev->dev_node); in ucb1x00_remove_dev()
421 list_del(&dev->drv_node); in ucb1x00_remove_dev()
427 * hard-coded machine dependencies. For reference, the expected
497 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data; in ucb1x00_probe()
501 int ret = -ENODEV; in ucb1x00_probe()
504 if (pdata && pdata->reset) in ucb1x00_probe()
505 pdata->reset(UCB_RST_PROBE); in ucb1x00_probe()
517 ret = -ENOMEM; in ucb1x00_probe()
521 device_initialize(&ucb->dev); in ucb1x00_probe()
522 ucb->dev.class = &ucb1x00_class; in ucb1x00_probe()
523 ucb->dev.parent = &mcp->attached_device; in ucb1x00_probe()
524 dev_set_name(&ucb->dev, "ucb1x00"); in ucb1x00_probe()
526 raw_spin_lock_init(&ucb->irq_lock); in ucb1x00_probe()
527 spin_lock_init(&ucb->io_lock); in ucb1x00_probe()
528 mutex_init(&ucb->adc_mutex); in ucb1x00_probe()
530 ucb->id = id; in ucb1x00_probe()
531 ucb->mcp = mcp; in ucb1x00_probe()
533 ret = device_add(&ucb->dev); in ucb1x00_probe()
538 ucb->irq = ucb1x00_detect_irq(ucb); in ucb1x00_probe()
540 if (!ucb->irq) { in ucb1x00_probe()
541 dev_err(&ucb->dev, "IRQ probe failed\n"); in ucb1x00_probe()
542 ret = -ENODEV; in ucb1x00_probe()
546 ucb->gpio.base = -1; in ucb1x00_probe()
547 irq_base = pdata ? pdata->irq_base : 0; in ucb1x00_probe()
548 ucb->irq_base = irq_alloc_descs(-1, irq_base, 16, -1); in ucb1x00_probe()
549 if (ucb->irq_base < 0) { in ucb1x00_probe()
550 dev_err(&ucb->dev, "unable to allocate 16 irqs: %d\n", in ucb1x00_probe()
551 ucb->irq_base); in ucb1x00_probe()
552 ret = ucb->irq_base; in ucb1x00_probe()
557 unsigned irq = ucb->irq_base + i; in ucb1x00_probe()
564 irq_set_irq_type(ucb->irq, IRQ_TYPE_EDGE_RISING); in ucb1x00_probe()
565 irq_set_chained_handler_and_data(ucb->irq, ucb1x00_irq, ucb); in ucb1x00_probe()
567 if (pdata && pdata->gpio_base) { in ucb1x00_probe()
568 ucb->gpio.label = dev_name(&ucb->dev); in ucb1x00_probe()
569 ucb->gpio.parent = &ucb->dev; in ucb1x00_probe()
570 ucb->gpio.owner = THIS_MODULE; in ucb1x00_probe()
571 ucb->gpio.base = pdata->gpio_base; in ucb1x00_probe()
572 ucb->gpio.ngpio = 10; in ucb1x00_probe()
573 ucb->gpio.set = ucb1x00_gpio_set; in ucb1x00_probe()
574 ucb->gpio.get = ucb1x00_gpio_get; in ucb1x00_probe()
575 ucb->gpio.direction_input = ucb1x00_gpio_direction_input; in ucb1x00_probe()
576 ucb->gpio.direction_output = ucb1x00_gpio_direction_output; in ucb1x00_probe()
577 ucb->gpio.to_irq = ucb1x00_to_irq; in ucb1x00_probe()
578 ret = gpiochip_add_data(&ucb->gpio, ucb); in ucb1x00_probe()
582 dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); in ucb1x00_probe()
587 device_set_wakeup_capable(&ucb->dev, pdata->can_wakeup); in ucb1x00_probe()
589 INIT_LIST_HEAD(&ucb->devs); in ucb1x00_probe()
591 list_add_tail(&ucb->node, &ucb1x00_devices); in ucb1x00_probe()
600 irq_set_chained_handler(ucb->irq, NULL); in ucb1x00_probe()
602 if (ucb->irq_base > 0) in ucb1x00_probe()
603 irq_free_descs(ucb->irq_base, 16); in ucb1x00_probe()
605 device_del(&ucb->dev); in ucb1x00_probe()
607 put_device(&ucb->dev); in ucb1x00_probe()
609 if (pdata && pdata->reset) in ucb1x00_probe()
610 pdata->reset(UCB_RST_PROBE_FAIL); in ucb1x00_probe()
616 struct ucb1x00_plat_data *pdata = mcp->attached_device.platform_data; in ucb1x00_remove()
621 list_del(&ucb->node); in ucb1x00_remove()
622 list_for_each_safe(l, n, &ucb->devs) { in ucb1x00_remove()
628 if (ucb->gpio.base != -1) in ucb1x00_remove()
629 gpiochip_remove(&ucb->gpio); in ucb1x00_remove()
631 irq_set_chained_handler(ucb->irq, NULL); in ucb1x00_remove()
632 irq_free_descs(ucb->irq_base, 16); in ucb1x00_remove()
633 device_unregister(&ucb->dev); in ucb1x00_remove()
635 if (pdata && pdata->reset) in ucb1x00_remove()
636 pdata->reset(UCB_RST_REMOVE); in ucb1x00_remove()
643 INIT_LIST_HEAD(&drv->devs); in ucb1x00_register_driver()
645 list_add_tail(&drv->node, &ucb1x00_drivers); in ucb1x00_register_driver()
658 list_del(&drv->node); in ucb1x00_unregister_driver()
659 list_for_each_safe(l, n, &drv->devs) { in ucb1x00_unregister_driver()
673 list_for_each_entry(udev, &ucb->devs, dev_node) { in ucb1x00_suspend()
674 if (udev->drv->suspend) in ucb1x00_suspend()
675 udev->drv->suspend(udev); in ucb1x00_suspend()
679 if (ucb->irq_wake) { in ucb1x00_suspend()
682 raw_spin_lock_irqsave(&ucb->irq_lock, flags); in ucb1x00_suspend()
684 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & in ucb1x00_suspend()
685 ucb->irq_wake); in ucb1x00_suspend()
686 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & in ucb1x00_suspend()
687 ucb->irq_wake); in ucb1x00_suspend()
689 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags); in ucb1x00_suspend()
691 enable_irq_wake(ucb->irq); in ucb1x00_suspend()
692 } else if (pdata && pdata->reset) in ucb1x00_suspend()
693 pdata->reset(UCB_RST_SUSPEND); in ucb1x00_suspend()
704 if (!ucb->irq_wake && pdata && pdata->reset) in ucb1x00_resume()
705 pdata->reset(UCB_RST_RESUME); in ucb1x00_resume()
708 ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); in ucb1x00_resume()
709 ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); in ucb1x00_resume()
711 if (ucb->irq_wake) { in ucb1x00_resume()
714 raw_spin_lock_irqsave(&ucb->irq_lock, flags); in ucb1x00_resume()
715 ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl & in ucb1x00_resume()
716 ucb->irq_mask); in ucb1x00_resume()
717 ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl & in ucb1x00_resume()
718 ucb->irq_mask); in ucb1x00_resume()
719 raw_spin_unlock_irqrestore(&ucb->irq_lock, flags); in ucb1x00_resume()
721 disable_irq_wake(ucb->irq); in ucb1x00_resume()
726 list_for_each_entry(udev, &ucb->devs, dev_node) { in ucb1x00_resume()
727 if (udev->drv->resume) in ucb1x00_resume()
728 udev->drv->resume(udev); in ucb1x00_resume()