1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Driver for Motorola PCAP2 as present in EZX phones 4 * 5 * Copyright (C) 2006 Harald Welte <laforge@openezx.org> 6 * Copyright (C) 2009 Daniel Ribeiro <drwyrm@gmail.com> 7 */ 8 9 #include <linux/module.h> 10 #include <linux/kernel.h> 11 #include <linux/platform_device.h> 12 #include <linux/interrupt.h> 13 #include <linux/irq.h> 14 #include <linux/mfd/ezx-pcap.h> 15 #include <linux/spi/spi.h> 16 #include <linux/gpio.h> 17 #include <linux/slab.h> 18 19 #define PCAP_ADC_MAXQ 8 20 struct pcap_adc_request { 21 u8 bank; 22 u8 ch[2]; 23 u32 flags; 24 void (*callback)(void *, u16[]); 25 void *data; 26 }; 27 28 struct pcap_chip { 29 struct spi_device *spi; 30 31 /* IO */ 32 u32 buf; 33 spinlock_t io_lock; 34 35 /* IRQ */ 36 unsigned int irq_base; 37 u32 msr; 38 struct work_struct isr_work; 39 struct work_struct msr_work; 40 struct workqueue_struct *workqueue; 41 42 /* ADC */ 43 struct pcap_adc_request *adc_queue[PCAP_ADC_MAXQ]; 44 u8 adc_head; 45 u8 adc_tail; 46 spinlock_t adc_lock; 47 }; 48 49 /* IO */ 50 static int ezx_pcap_putget(struct pcap_chip *pcap, u32 *data) 51 { 52 struct spi_transfer t; 53 struct spi_message m; 54 int status; 55 56 memset(&t, 0, sizeof(t)); 57 spi_message_init(&m); 58 t.len = sizeof(u32); 59 spi_message_add_tail(&t, &m); 60 61 pcap->buf = *data; 62 t.tx_buf = (u8 *) &pcap->buf; 63 t.rx_buf = (u8 *) &pcap->buf; 64 status = spi_sync(pcap->spi, &m); 65 66 if (status == 0) 67 *data = pcap->buf; 68 69 return status; 70 } 71 72 int ezx_pcap_write(struct pcap_chip *pcap, u8 reg_num, u32 value) 73 { 74 unsigned long flags; 75 int ret; 76 77 spin_lock_irqsave(&pcap->io_lock, flags); 78 value &= PCAP_REGISTER_VALUE_MASK; 79 value |= PCAP_REGISTER_WRITE_OP_BIT 80 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 81 ret = ezx_pcap_putget(pcap, &value); 82 spin_unlock_irqrestore(&pcap->io_lock, flags); 83 84 return ret; 85 } 86 EXPORT_SYMBOL_GPL(ezx_pcap_write); 87 88 int ezx_pcap_read(struct pcap_chip *pcap, u8 reg_num, u32 *value) 89 { 90 unsigned long flags; 91 int ret; 92 93 spin_lock_irqsave(&pcap->io_lock, flags); 94 *value = PCAP_REGISTER_READ_OP_BIT 95 | (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 96 97 ret = ezx_pcap_putget(pcap, value); 98 spin_unlock_irqrestore(&pcap->io_lock, flags); 99 100 return ret; 101 } 102 EXPORT_SYMBOL_GPL(ezx_pcap_read); 103 104 int ezx_pcap_set_bits(struct pcap_chip *pcap, u8 reg_num, u32 mask, u32 val) 105 { 106 unsigned long flags; 107 int ret; 108 u32 tmp = PCAP_REGISTER_READ_OP_BIT | 109 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 110 111 spin_lock_irqsave(&pcap->io_lock, flags); 112 ret = ezx_pcap_putget(pcap, &tmp); 113 if (ret) 114 goto out_unlock; 115 116 tmp &= (PCAP_REGISTER_VALUE_MASK & ~mask); 117 tmp |= (val & mask) | PCAP_REGISTER_WRITE_OP_BIT | 118 (reg_num << PCAP_REGISTER_ADDRESS_SHIFT); 119 120 ret = ezx_pcap_putget(pcap, &tmp); 121 out_unlock: 122 spin_unlock_irqrestore(&pcap->io_lock, flags); 123 124 return ret; 125 } 126 EXPORT_SYMBOL_GPL(ezx_pcap_set_bits); 127 128 /* IRQ */ 129 int irq_to_pcap(struct pcap_chip *pcap, int irq) 130 { 131 return irq - pcap->irq_base; 132 } 133 EXPORT_SYMBOL_GPL(irq_to_pcap); 134 135 int pcap_to_irq(struct pcap_chip *pcap, int irq) 136 { 137 return pcap->irq_base + irq; 138 } 139 EXPORT_SYMBOL_GPL(pcap_to_irq); 140 141 static void pcap_mask_irq(struct irq_data *d) 142 { 143 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d); 144 145 pcap->msr |= 1 << irq_to_pcap(pcap, d->irq); 146 queue_work(pcap->workqueue, &pcap->msr_work); 147 } 148 149 static void pcap_unmask_irq(struct irq_data *d) 150 { 151 struct pcap_chip *pcap = irq_data_get_irq_chip_data(d); 152 153 pcap->msr &= ~(1 << irq_to_pcap(pcap, d->irq)); 154 queue_work(pcap->workqueue, &pcap->msr_work); 155 } 156 157 static struct irq_chip pcap_irq_chip = { 158 .name = "pcap", 159 .irq_disable = pcap_mask_irq, 160 .irq_mask = pcap_mask_irq, 161 .irq_unmask = pcap_unmask_irq, 162 }; 163 164 static void pcap_msr_work(struct work_struct *work) 165 { 166 struct pcap_chip *pcap = container_of(work, struct pcap_chip, msr_work); 167 168 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); 169 } 170 171 static void pcap_isr_work(struct work_struct *work) 172 { 173 struct pcap_chip *pcap = container_of(work, struct pcap_chip, isr_work); 174 struct pcap_platform_data *pdata = dev_get_platdata(&pcap->spi->dev); 175 u32 msr, isr, int_sel, service; 176 int irq; 177 178 do { 179 ezx_pcap_read(pcap, PCAP_REG_MSR, &msr); 180 ezx_pcap_read(pcap, PCAP_REG_ISR, &isr); 181 182 /* We can't service/ack irqs that are assigned to port 2 */ 183 if (!(pdata->config & PCAP_SECOND_PORT)) { 184 ezx_pcap_read(pcap, PCAP_REG_INT_SEL, &int_sel); 185 isr &= ~int_sel; 186 } 187 188 ezx_pcap_write(pcap, PCAP_REG_MSR, isr | msr); 189 ezx_pcap_write(pcap, PCAP_REG_ISR, isr); 190 191 service = isr & ~msr; 192 for (irq = pcap->irq_base; service; service >>= 1, irq++) { 193 if (service & 1) 194 generic_handle_irq_safe(irq); 195 } 196 ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr); 197 } while (gpio_get_value(pdata->gpio)); 198 } 199 200 static void pcap_irq_handler(struct irq_desc *desc) 201 { 202 struct pcap_chip *pcap = irq_desc_get_handler_data(desc); 203 204 desc->irq_data.chip->irq_ack(&desc->irq_data); 205 queue_work(pcap->workqueue, &pcap->isr_work); 206 } 207 208 /* ADC */ 209 void pcap_set_ts_bits(struct pcap_chip *pcap, u32 bits) 210 { 211 unsigned long flags; 212 u32 tmp; 213 214 spin_lock_irqsave(&pcap->adc_lock, flags); 215 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 216 tmp &= ~(PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 217 tmp |= bits & (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 218 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 219 spin_unlock_irqrestore(&pcap->adc_lock, flags); 220 } 221 EXPORT_SYMBOL_GPL(pcap_set_ts_bits); 222 223 static void pcap_disable_adc(struct pcap_chip *pcap) 224 { 225 u32 tmp; 226 227 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 228 tmp &= ~(PCAP_ADC_ADEN|PCAP_ADC_BATT_I_ADC|PCAP_ADC_BATT_I_POLARITY); 229 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 230 } 231 232 static void pcap_adc_trigger(struct pcap_chip *pcap) 233 { 234 unsigned long flags; 235 u32 tmp; 236 u8 head; 237 238 spin_lock_irqsave(&pcap->adc_lock, flags); 239 head = pcap->adc_head; 240 if (!pcap->adc_queue[head]) { 241 /* queue is empty, save power */ 242 pcap_disable_adc(pcap); 243 spin_unlock_irqrestore(&pcap->adc_lock, flags); 244 return; 245 } 246 /* start conversion on requested bank, save TS_M bits */ 247 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 248 tmp &= (PCAP_ADC_TS_M_MASK | PCAP_ADC_TS_REF_LOWPWR); 249 tmp |= pcap->adc_queue[head]->flags | PCAP_ADC_ADEN; 250 251 if (pcap->adc_queue[head]->bank == PCAP_ADC_BANK_1) 252 tmp |= PCAP_ADC_AD_SEL1; 253 254 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 255 spin_unlock_irqrestore(&pcap->adc_lock, flags); 256 ezx_pcap_write(pcap, PCAP_REG_ADR, PCAP_ADR_ASC); 257 } 258 259 static irqreturn_t pcap_adc_irq(int irq, void *_pcap) 260 { 261 struct pcap_chip *pcap = _pcap; 262 struct pcap_adc_request *req; 263 u16 res[2]; 264 u32 tmp; 265 266 spin_lock(&pcap->adc_lock); 267 req = pcap->adc_queue[pcap->adc_head]; 268 269 if (WARN(!req, "adc irq without pending request\n")) { 270 spin_unlock(&pcap->adc_lock); 271 return IRQ_HANDLED; 272 } 273 274 /* read requested channels results */ 275 ezx_pcap_read(pcap, PCAP_REG_ADC, &tmp); 276 tmp &= ~(PCAP_ADC_ADA1_MASK | PCAP_ADC_ADA2_MASK); 277 tmp |= (req->ch[0] << PCAP_ADC_ADA1_SHIFT); 278 tmp |= (req->ch[1] << PCAP_ADC_ADA2_SHIFT); 279 ezx_pcap_write(pcap, PCAP_REG_ADC, tmp); 280 ezx_pcap_read(pcap, PCAP_REG_ADR, &tmp); 281 res[0] = (tmp & PCAP_ADR_ADD1_MASK) >> PCAP_ADR_ADD1_SHIFT; 282 res[1] = (tmp & PCAP_ADR_ADD2_MASK) >> PCAP_ADR_ADD2_SHIFT; 283 284 pcap->adc_queue[pcap->adc_head] = NULL; 285 pcap->adc_head = (pcap->adc_head + 1) & (PCAP_ADC_MAXQ - 1); 286 spin_unlock(&pcap->adc_lock); 287 288 /* pass the results and release memory */ 289 req->callback(req->data, res); 290 kfree(req); 291 292 /* trigger next conversion (if any) on queue */ 293 pcap_adc_trigger(pcap); 294 295 return IRQ_HANDLED; 296 } 297 298 int pcap_adc_async(struct pcap_chip *pcap, u8 bank, u32 flags, u8 ch[], 299 void *callback, void *data) 300 { 301 struct pcap_adc_request *req; 302 unsigned long irq_flags; 303 304 /* This will be freed after we have a result */ 305 req = kmalloc(sizeof(struct pcap_adc_request), GFP_KERNEL); 306 if (!req) 307 return -ENOMEM; 308 309 req->bank = bank; 310 req->flags = flags; 311 req->ch[0] = ch[0]; 312 req->ch[1] = ch[1]; 313 req->callback = callback; 314 req->data = data; 315 316 spin_lock_irqsave(&pcap->adc_lock, irq_flags); 317 if (pcap->adc_queue[pcap->adc_tail]) { 318 spin_unlock_irqrestore(&pcap->adc_lock, irq_flags); 319 kfree(req); 320 return -EBUSY; 321 } 322 pcap->adc_queue[pcap->adc_tail] = req; 323 pcap->adc_tail = (pcap->adc_tail + 1) & (PCAP_ADC_MAXQ - 1); 324 spin_unlock_irqrestore(&pcap->adc_lock, irq_flags); 325 326 /* start conversion */ 327 pcap_adc_trigger(pcap); 328 329 return 0; 330 } 331 EXPORT_SYMBOL_GPL(pcap_adc_async); 332 333 /* subdevs */ 334 static int pcap_remove_subdev(struct device *dev, void *unused) 335 { 336 platform_device_unregister(to_platform_device(dev)); 337 return 0; 338 } 339 340 static int pcap_add_subdev(struct pcap_chip *pcap, 341 struct pcap_subdev *subdev) 342 { 343 struct platform_device *pdev; 344 int ret; 345 346 pdev = platform_device_alloc(subdev->name, subdev->id); 347 if (!pdev) 348 return -ENOMEM; 349 350 pdev->dev.parent = &pcap->spi->dev; 351 pdev->dev.platform_data = subdev->platform_data; 352 353 ret = platform_device_add(pdev); 354 if (ret) 355 platform_device_put(pdev); 356 357 return ret; 358 } 359 360 static void ezx_pcap_remove(struct spi_device *spi) 361 { 362 struct pcap_chip *pcap = spi_get_drvdata(spi); 363 unsigned long flags; 364 int i; 365 366 /* remove all registered subdevs */ 367 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); 368 369 /* cleanup ADC */ 370 spin_lock_irqsave(&pcap->adc_lock, flags); 371 for (i = 0; i < PCAP_ADC_MAXQ; i++) 372 kfree(pcap->adc_queue[i]); 373 spin_unlock_irqrestore(&pcap->adc_lock, flags); 374 375 /* cleanup irqchip */ 376 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) 377 irq_set_chip_and_handler(i, NULL, NULL); 378 379 destroy_workqueue(pcap->workqueue); 380 } 381 382 static int ezx_pcap_probe(struct spi_device *spi) 383 { 384 struct pcap_platform_data *pdata = dev_get_platdata(&spi->dev); 385 struct pcap_chip *pcap; 386 int i, adc_irq; 387 int ret = -ENODEV; 388 389 /* platform data is required */ 390 if (!pdata) 391 goto ret; 392 393 pcap = devm_kzalloc(&spi->dev, sizeof(*pcap), GFP_KERNEL); 394 if (!pcap) { 395 ret = -ENOMEM; 396 goto ret; 397 } 398 399 spin_lock_init(&pcap->io_lock); 400 spin_lock_init(&pcap->adc_lock); 401 INIT_WORK(&pcap->isr_work, pcap_isr_work); 402 INIT_WORK(&pcap->msr_work, pcap_msr_work); 403 spi_set_drvdata(spi, pcap); 404 405 /* setup spi */ 406 spi->bits_per_word = 32; 407 spi->mode = SPI_MODE_0 | (pdata->config & PCAP_CS_AH ? SPI_CS_HIGH : 0); 408 ret = spi_setup(spi); 409 if (ret) 410 goto ret; 411 412 pcap->spi = spi; 413 414 /* setup irq */ 415 pcap->irq_base = pdata->irq_base; 416 pcap->workqueue = create_singlethread_workqueue("pcapd"); 417 if (!pcap->workqueue) { 418 ret = -ENOMEM; 419 dev_err(&spi->dev, "can't create pcap thread\n"); 420 goto ret; 421 } 422 423 /* redirect interrupts to AP, except adcdone2 */ 424 if (!(pdata->config & PCAP_SECOND_PORT)) 425 ezx_pcap_write(pcap, PCAP_REG_INT_SEL, 426 (1 << PCAP_IRQ_ADCDONE2)); 427 428 /* setup irq chip */ 429 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) { 430 irq_set_chip_and_handler(i, &pcap_irq_chip, handle_simple_irq); 431 irq_set_chip_data(i, pcap); 432 irq_clear_status_flags(i, IRQ_NOREQUEST | IRQ_NOPROBE); 433 } 434 435 /* mask/ack all PCAP interrupts */ 436 ezx_pcap_write(pcap, PCAP_REG_MSR, PCAP_MASK_ALL_INTERRUPT); 437 ezx_pcap_write(pcap, PCAP_REG_ISR, PCAP_CLEAR_INTERRUPT_REGISTER); 438 pcap->msr = PCAP_MASK_ALL_INTERRUPT; 439 440 irq_set_irq_type(spi->irq, IRQ_TYPE_EDGE_RISING); 441 irq_set_chained_handler_and_data(spi->irq, pcap_irq_handler, pcap); 442 irq_set_irq_wake(spi->irq, 1); 443 444 /* ADC */ 445 adc_irq = pcap_to_irq(pcap, (pdata->config & PCAP_SECOND_PORT) ? 446 PCAP_IRQ_ADCDONE2 : PCAP_IRQ_ADCDONE); 447 448 ret = devm_request_irq(&spi->dev, adc_irq, pcap_adc_irq, 0, "ADC", 449 pcap); 450 if (ret) 451 goto free_irqchip; 452 453 /* setup subdevs */ 454 for (i = 0; i < pdata->num_subdevs; i++) { 455 ret = pcap_add_subdev(pcap, &pdata->subdevs[i]); 456 if (ret) 457 goto remove_subdevs; 458 } 459 460 /* board specific quirks */ 461 if (pdata->init) 462 pdata->init(pcap); 463 464 return 0; 465 466 remove_subdevs: 467 device_for_each_child(&spi->dev, NULL, pcap_remove_subdev); 468 free_irqchip: 469 for (i = pcap->irq_base; i < (pcap->irq_base + PCAP_NIRQS); i++) 470 irq_set_chip_and_handler(i, NULL, NULL); 471 /* destroy_workqueue: */ 472 destroy_workqueue(pcap->workqueue); 473 ret: 474 return ret; 475 } 476 477 static struct spi_driver ezxpcap_driver = { 478 .probe = ezx_pcap_probe, 479 .remove = ezx_pcap_remove, 480 .driver = { 481 .name = "ezx-pcap", 482 }, 483 }; 484 485 static int __init ezx_pcap_init(void) 486 { 487 return spi_register_driver(&ezxpcap_driver); 488 } 489 490 static void __exit ezx_pcap_exit(void) 491 { 492 spi_unregister_driver(&ezxpcap_driver); 493 } 494 495 subsys_initcall(ezx_pcap_init); 496 module_exit(ezx_pcap_exit); 497 498 MODULE_AUTHOR("Daniel Ribeiro / Harald Welte"); 499 MODULE_DESCRIPTION("Motorola PCAP2 ASIC Driver"); 500 MODULE_ALIAS("spi:ezx-pcap"); 501