1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * TI ADC MFD driver 4 * 5 * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/ 6 */ 7 8 #include <linux/kernel.h> 9 #include <linux/err.h> 10 #include <linux/module.h> 11 #include <linux/slab.h> 12 #include <linux/interrupt.h> 13 #include <linux/platform_device.h> 14 #include <linux/io.h> 15 #include <linux/iio/iio.h> 16 #include <linux/of.h> 17 #include <linux/of_device.h> 18 #include <linux/iio/machine.h> 19 #include <linux/iio/driver.h> 20 #include <linux/iopoll.h> 21 22 #include <linux/mfd/ti_am335x_tscadc.h> 23 #include <linux/iio/buffer.h> 24 #include <linux/iio/kfifo_buf.h> 25 26 #include <linux/dmaengine.h> 27 #include <linux/dma-mapping.h> 28 29 #define DMA_BUFFER_SIZE SZ_2K 30 31 struct tiadc_dma { 32 struct dma_slave_config conf; 33 struct dma_chan *chan; 34 dma_addr_t addr; 35 dma_cookie_t cookie; 36 u8 *buf; 37 int current_period; 38 int period_size; 39 u8 fifo_thresh; 40 }; 41 42 struct tiadc_device { 43 struct ti_tscadc_dev *mfd_tscadc; 44 struct tiadc_dma dma; 45 struct mutex fifo1_lock; /* to protect fifo access */ 46 int channels; 47 int total_ch_enabled; 48 u8 channel_line[8]; 49 u8 channel_step[8]; 50 int buffer_en_ch_steps; 51 u16 data[8]; 52 u32 open_delay[8], sample_delay[8], step_avg[8]; 53 }; 54 55 static unsigned int tiadc_readl(struct tiadc_device *adc, unsigned int reg) 56 { 57 return readl(adc->mfd_tscadc->tscadc_base + reg); 58 } 59 60 static void tiadc_writel(struct tiadc_device *adc, unsigned int reg, 61 unsigned int val) 62 { 63 writel(val, adc->mfd_tscadc->tscadc_base + reg); 64 } 65 66 static u32 get_adc_step_mask(struct tiadc_device *adc_dev) 67 { 68 u32 step_en; 69 70 step_en = ((1 << adc_dev->channels) - 1); 71 step_en <<= TOTAL_STEPS - adc_dev->channels + 1; 72 return step_en; 73 } 74 75 static u32 get_adc_chan_step_mask(struct tiadc_device *adc_dev, 76 struct iio_chan_spec const *chan) 77 { 78 int i; 79 80 for (i = 0; i < ARRAY_SIZE(adc_dev->channel_step); i++) { 81 if (chan->channel == adc_dev->channel_line[i]) { 82 u32 step; 83 84 step = adc_dev->channel_step[i]; 85 /* +1 for the charger */ 86 return 1 << (step + 1); 87 } 88 } 89 WARN_ON(1); 90 return 0; 91 } 92 93 static u32 get_adc_step_bit(struct tiadc_device *adc_dev, int chan) 94 { 95 return 1 << adc_dev->channel_step[chan]; 96 } 97 98 static int tiadc_wait_idle(struct tiadc_device *adc_dev) 99 { 100 u32 val; 101 102 return readl_poll_timeout(adc_dev->mfd_tscadc->tscadc_base + REG_ADCFSM, 103 val, !(val & SEQ_STATUS), 10, 104 IDLE_TIMEOUT_MS * 1000 * adc_dev->channels); 105 } 106 107 static void tiadc_step_config(struct iio_dev *indio_dev) 108 { 109 struct tiadc_device *adc_dev = iio_priv(indio_dev); 110 unsigned int stepconfig; 111 int i, steps = 0; 112 113 /* 114 * There are 16 configurable steps and 8 analog input 115 * lines available which are shared between Touchscreen and ADC. 116 * 117 * Steps forwards i.e. from 0 towards 16 are used by ADC 118 * depending on number of input lines needed. 119 * Channel would represent which analog input 120 * needs to be given to ADC to digitalize data. 121 */ 122 for (i = 0; i < adc_dev->channels; i++) { 123 int chan; 124 125 chan = adc_dev->channel_line[i]; 126 127 if (adc_dev->step_avg[i]) 128 stepconfig = STEPCONFIG_AVG(ffs(adc_dev->step_avg[i]) - 1) | 129 STEPCONFIG_FIFO1; 130 else 131 stepconfig = STEPCONFIG_FIFO1; 132 133 if (iio_buffer_enabled(indio_dev)) 134 stepconfig |= STEPCONFIG_MODE_SWCNT; 135 136 tiadc_writel(adc_dev, REG_STEPCONFIG(steps), 137 stepconfig | STEPCONFIG_INP(chan) | 138 STEPCONFIG_INM_ADCREFM | STEPCONFIG_RFP_VREFP | 139 STEPCONFIG_RFM_VREFN); 140 141 tiadc_writel(adc_dev, REG_STEPDELAY(steps), 142 STEPDELAY_OPEN(adc_dev->open_delay[i]) | 143 STEPDELAY_SAMPLE(adc_dev->sample_delay[i])); 144 145 adc_dev->channel_step[i] = steps; 146 steps++; 147 } 148 } 149 150 static irqreturn_t tiadc_irq_h(int irq, void *private) 151 { 152 struct iio_dev *indio_dev = private; 153 struct tiadc_device *adc_dev = iio_priv(indio_dev); 154 unsigned int status, config, adc_fsm; 155 unsigned short count = 0; 156 157 status = tiadc_readl(adc_dev, REG_IRQSTATUS); 158 159 /* 160 * ADC and touchscreen share the IRQ line. 161 * FIFO0 interrupts are used by TSC. Handle FIFO1 IRQs here only 162 */ 163 if (status & IRQENB_FIFO1OVRRUN) { 164 /* FIFO Overrun. Clear flag. Disable/Enable ADC to recover */ 165 config = tiadc_readl(adc_dev, REG_CTRL); 166 config &= ~(CNTRLREG_SSENB); 167 tiadc_writel(adc_dev, REG_CTRL, config); 168 tiadc_writel(adc_dev, REG_IRQSTATUS, 169 IRQENB_FIFO1OVRRUN | IRQENB_FIFO1UNDRFLW | 170 IRQENB_FIFO1THRES); 171 172 /* 173 * Wait for the idle state. 174 * ADC needs to finish the current conversion 175 * before disabling the module 176 */ 177 do { 178 adc_fsm = tiadc_readl(adc_dev, REG_ADCFSM); 179 } while (adc_fsm != 0x10 && count++ < 100); 180 181 tiadc_writel(adc_dev, REG_CTRL, (config | CNTRLREG_SSENB)); 182 return IRQ_HANDLED; 183 } else if (status & IRQENB_FIFO1THRES) { 184 /* Disable irq and wake worker thread */ 185 tiadc_writel(adc_dev, REG_IRQCLR, IRQENB_FIFO1THRES); 186 return IRQ_WAKE_THREAD; 187 } 188 189 return IRQ_NONE; 190 } 191 192 static irqreturn_t tiadc_worker_h(int irq, void *private) 193 { 194 struct iio_dev *indio_dev = private; 195 struct tiadc_device *adc_dev = iio_priv(indio_dev); 196 int i, k, fifo1count, read; 197 u16 *data = adc_dev->data; 198 199 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 200 for (k = 0; k < fifo1count; k = k + i) { 201 for (i = 0; i < indio_dev->scan_bytes / 2; i++) { 202 read = tiadc_readl(adc_dev, REG_FIFO1); 203 data[i] = read & FIFOREAD_DATA_MASK; 204 } 205 iio_push_to_buffers(indio_dev, (u8 *)data); 206 } 207 208 tiadc_writel(adc_dev, REG_IRQSTATUS, IRQENB_FIFO1THRES); 209 tiadc_writel(adc_dev, REG_IRQENABLE, IRQENB_FIFO1THRES); 210 211 return IRQ_HANDLED; 212 } 213 214 static void tiadc_dma_rx_complete(void *param) 215 { 216 struct iio_dev *indio_dev = param; 217 struct tiadc_device *adc_dev = iio_priv(indio_dev); 218 struct tiadc_dma *dma = &adc_dev->dma; 219 u8 *data; 220 int i; 221 222 data = dma->buf + dma->current_period * dma->period_size; 223 dma->current_period = 1 - dma->current_period; /* swap the buffer ID */ 224 225 for (i = 0; i < dma->period_size; i += indio_dev->scan_bytes) { 226 iio_push_to_buffers(indio_dev, data); 227 data += indio_dev->scan_bytes; 228 } 229 } 230 231 static int tiadc_start_dma(struct iio_dev *indio_dev) 232 { 233 struct tiadc_device *adc_dev = iio_priv(indio_dev); 234 struct tiadc_dma *dma = &adc_dev->dma; 235 struct dma_async_tx_descriptor *desc; 236 237 dma->current_period = 0; /* We start to fill period 0 */ 238 239 /* 240 * Make the fifo thresh as the multiple of total number of 241 * channels enabled, so make sure that cyclic DMA period 242 * length is also a multiple of total number of channels 243 * enabled. This ensures that no invalid data is reported 244 * to the stack via iio_push_to_buffers(). 245 */ 246 dma->fifo_thresh = rounddown(FIFO1_THRESHOLD + 1, 247 adc_dev->total_ch_enabled) - 1; 248 249 /* Make sure that period length is multiple of fifo thresh level */ 250 dma->period_size = rounddown(DMA_BUFFER_SIZE / 2, 251 (dma->fifo_thresh + 1) * sizeof(u16)); 252 253 dma->conf.src_maxburst = dma->fifo_thresh + 1; 254 dmaengine_slave_config(dma->chan, &dma->conf); 255 256 desc = dmaengine_prep_dma_cyclic(dma->chan, dma->addr, 257 dma->period_size * 2, 258 dma->period_size, DMA_DEV_TO_MEM, 259 DMA_PREP_INTERRUPT); 260 if (!desc) 261 return -EBUSY; 262 263 desc->callback = tiadc_dma_rx_complete; 264 desc->callback_param = indio_dev; 265 266 dma->cookie = dmaengine_submit(desc); 267 268 dma_async_issue_pending(dma->chan); 269 270 tiadc_writel(adc_dev, REG_FIFO1THR, dma->fifo_thresh); 271 tiadc_writel(adc_dev, REG_DMA1REQ, dma->fifo_thresh); 272 tiadc_writel(adc_dev, REG_DMAENABLE_SET, DMA_FIFO1); 273 274 return 0; 275 } 276 277 static int tiadc_buffer_preenable(struct iio_dev *indio_dev) 278 { 279 struct tiadc_device *adc_dev = iio_priv(indio_dev); 280 int i, fifo1count; 281 int ret; 282 283 ret = tiadc_wait_idle(adc_dev); 284 if (ret) 285 return ret; 286 287 tiadc_writel(adc_dev, REG_IRQCLR, 288 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 289 IRQENB_FIFO1UNDRFLW); 290 291 /* Flush FIFO. Needed in corner cases in simultaneous tsc/adc use */ 292 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 293 for (i = 0; i < fifo1count; i++) 294 tiadc_readl(adc_dev, REG_FIFO1); 295 296 return 0; 297 } 298 299 static int tiadc_buffer_postenable(struct iio_dev *indio_dev) 300 { 301 struct tiadc_device *adc_dev = iio_priv(indio_dev); 302 struct tiadc_dma *dma = &adc_dev->dma; 303 unsigned int irq_enable; 304 unsigned int enb = 0; 305 u8 bit; 306 307 tiadc_step_config(indio_dev); 308 for_each_set_bit(bit, indio_dev->active_scan_mask, adc_dev->channels) { 309 enb |= (get_adc_step_bit(adc_dev, bit) << 1); 310 adc_dev->total_ch_enabled++; 311 } 312 adc_dev->buffer_en_ch_steps = enb; 313 314 if (dma->chan) 315 tiadc_start_dma(indio_dev); 316 317 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, enb); 318 319 tiadc_writel(adc_dev, REG_IRQSTATUS, 320 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 321 IRQENB_FIFO1UNDRFLW); 322 323 irq_enable = IRQENB_FIFO1OVRRUN; 324 if (!dma->chan) 325 irq_enable |= IRQENB_FIFO1THRES; 326 tiadc_writel(adc_dev, REG_IRQENABLE, irq_enable); 327 328 return 0; 329 } 330 331 static int tiadc_buffer_predisable(struct iio_dev *indio_dev) 332 { 333 struct tiadc_device *adc_dev = iio_priv(indio_dev); 334 struct tiadc_dma *dma = &adc_dev->dma; 335 int fifo1count, i; 336 337 tiadc_writel(adc_dev, REG_IRQCLR, 338 IRQENB_FIFO1THRES | IRQENB_FIFO1OVRRUN | 339 IRQENB_FIFO1UNDRFLW); 340 am335x_tsc_se_clr(adc_dev->mfd_tscadc, adc_dev->buffer_en_ch_steps); 341 adc_dev->buffer_en_ch_steps = 0; 342 adc_dev->total_ch_enabled = 0; 343 if (dma->chan) { 344 tiadc_writel(adc_dev, REG_DMAENABLE_CLEAR, 0x2); 345 dmaengine_terminate_async(dma->chan); 346 } 347 348 /* Flush FIFO of leftover data in the time it takes to disable adc */ 349 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 350 for (i = 0; i < fifo1count; i++) 351 tiadc_readl(adc_dev, REG_FIFO1); 352 353 return 0; 354 } 355 356 static int tiadc_buffer_postdisable(struct iio_dev *indio_dev) 357 { 358 tiadc_step_config(indio_dev); 359 360 return 0; 361 } 362 363 static const struct iio_buffer_setup_ops tiadc_buffer_setup_ops = { 364 .preenable = &tiadc_buffer_preenable, 365 .postenable = &tiadc_buffer_postenable, 366 .predisable = &tiadc_buffer_predisable, 367 .postdisable = &tiadc_buffer_postdisable, 368 }; 369 370 static int tiadc_iio_buffered_hardware_setup(struct device *dev, 371 struct iio_dev *indio_dev, 372 irqreturn_t (*pollfunc_bh)(int irq, void *p), 373 irqreturn_t (*pollfunc_th)(int irq, void *p), 374 int irq, unsigned long flags, 375 const struct iio_buffer_setup_ops *setup_ops) 376 { 377 int ret; 378 379 ret = devm_iio_kfifo_buffer_setup(dev, indio_dev, setup_ops); 380 if (ret) 381 return ret; 382 383 return devm_request_threaded_irq(dev, irq, pollfunc_th, pollfunc_bh, 384 flags, indio_dev->name, indio_dev); 385 } 386 387 static const char * const chan_name_ain[] = { 388 "AIN0", 389 "AIN1", 390 "AIN2", 391 "AIN3", 392 "AIN4", 393 "AIN5", 394 "AIN6", 395 "AIN7", 396 }; 397 398 static int tiadc_channel_init(struct device *dev, struct iio_dev *indio_dev, 399 int channels) 400 { 401 struct tiadc_device *adc_dev = iio_priv(indio_dev); 402 struct iio_chan_spec *chan_array; 403 struct iio_chan_spec *chan; 404 int i; 405 406 indio_dev->num_channels = channels; 407 chan_array = devm_kcalloc(dev, channels, sizeof(*chan_array), 408 GFP_KERNEL); 409 if (!chan_array) 410 return -ENOMEM; 411 412 chan = chan_array; 413 for (i = 0; i < channels; i++, chan++) { 414 chan->type = IIO_VOLTAGE; 415 chan->indexed = 1; 416 chan->channel = adc_dev->channel_line[i]; 417 chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW); 418 chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE); 419 chan->datasheet_name = chan_name_ain[chan->channel]; 420 chan->scan_index = i; 421 chan->scan_type.sign = 'u'; 422 chan->scan_type.realbits = 12; 423 chan->scan_type.storagebits = 16; 424 } 425 426 indio_dev->channels = chan_array; 427 428 return 0; 429 } 430 431 static int tiadc_read_raw(struct iio_dev *indio_dev, 432 struct iio_chan_spec const *chan, int *val, int *val2, 433 long mask) 434 { 435 struct tiadc_device *adc_dev = iio_priv(indio_dev); 436 int i, map_val; 437 unsigned int fifo1count, read, stepid; 438 bool found = false; 439 u32 step_en; 440 unsigned long timeout; 441 int ret; 442 443 switch (mask) { 444 case IIO_CHAN_INFO_RAW: 445 break; 446 case IIO_CHAN_INFO_SCALE: 447 switch (chan->type) { 448 case IIO_VOLTAGE: 449 *val = 1800; 450 *val2 = chan->scan_type.realbits; 451 return IIO_VAL_FRACTIONAL_LOG2; 452 default: 453 return -EINVAL; 454 } 455 break; 456 default: 457 return -EINVAL; 458 } 459 460 if (iio_buffer_enabled(indio_dev)) 461 return -EBUSY; 462 463 step_en = get_adc_chan_step_mask(adc_dev, chan); 464 if (!step_en) 465 return -EINVAL; 466 467 mutex_lock(&adc_dev->fifo1_lock); 468 469 ret = tiadc_wait_idle(adc_dev); 470 if (ret) 471 goto err_unlock; 472 473 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 474 while (fifo1count--) 475 tiadc_readl(adc_dev, REG_FIFO1); 476 477 am335x_tsc_se_set_once(adc_dev->mfd_tscadc, step_en); 478 479 /* Wait for Fifo threshold interrupt */ 480 timeout = jiffies + msecs_to_jiffies(IDLE_TIMEOUT_MS * adc_dev->channels); 481 while (1) { 482 fifo1count = tiadc_readl(adc_dev, REG_FIFO1CNT); 483 if (fifo1count) 484 break; 485 486 if (time_after(jiffies, timeout)) { 487 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 488 ret = -EAGAIN; 489 goto err_unlock; 490 } 491 } 492 493 map_val = adc_dev->channel_step[chan->scan_index]; 494 495 /* 496 * We check the complete FIFO. We programmed just one entry but in case 497 * something went wrong we left empty handed (-EAGAIN previously) and 498 * then the value apeared somehow in the FIFO we would have two entries. 499 * Therefore we read every item and keep only the latest version of the 500 * requested channel. 501 */ 502 for (i = 0; i < fifo1count; i++) { 503 read = tiadc_readl(adc_dev, REG_FIFO1); 504 stepid = read & FIFOREAD_CHNLID_MASK; 505 stepid = stepid >> 0x10; 506 507 if (stepid == map_val) { 508 read = read & FIFOREAD_DATA_MASK; 509 found = true; 510 *val = (u16)read; 511 } 512 } 513 514 am335x_tsc_se_adc_done(adc_dev->mfd_tscadc); 515 516 if (!found) 517 ret = -EBUSY; 518 519 err_unlock: 520 mutex_unlock(&adc_dev->fifo1_lock); 521 return ret ? ret : IIO_VAL_INT; 522 } 523 524 static const struct iio_info tiadc_info = { 525 .read_raw = &tiadc_read_raw, 526 }; 527 528 static int tiadc_request_dma(struct platform_device *pdev, 529 struct tiadc_device *adc_dev) 530 { 531 struct tiadc_dma *dma = &adc_dev->dma; 532 dma_cap_mask_t mask; 533 534 /* Default slave configuration parameters */ 535 dma->conf.direction = DMA_DEV_TO_MEM; 536 dma->conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; 537 dma->conf.src_addr = adc_dev->mfd_tscadc->tscadc_phys_base + REG_FIFO1; 538 539 dma_cap_zero(mask); 540 dma_cap_set(DMA_CYCLIC, mask); 541 542 /* Get a channel for RX */ 543 dma->chan = dma_request_chan(adc_dev->mfd_tscadc->dev, "fifo1"); 544 if (IS_ERR(dma->chan)) { 545 int ret = PTR_ERR(dma->chan); 546 547 dma->chan = NULL; 548 return ret; 549 } 550 551 /* RX buffer */ 552 dma->buf = dma_alloc_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 553 &dma->addr, GFP_KERNEL); 554 if (!dma->buf) 555 goto err; 556 557 return 0; 558 559 err: 560 dma_release_channel(dma->chan); 561 return -ENOMEM; 562 } 563 564 static int tiadc_parse_dt(struct platform_device *pdev, 565 struct tiadc_device *adc_dev) 566 { 567 struct device_node *node = pdev->dev.of_node; 568 struct property *prop; 569 const __be32 *cur; 570 int channels = 0; 571 u32 val; 572 int i; 573 574 of_property_for_each_u32(node, "ti,adc-channels", prop, cur, val) { 575 adc_dev->channel_line[channels] = val; 576 577 /* Set Default values for optional DT parameters */ 578 adc_dev->open_delay[channels] = STEPCONFIG_OPENDLY; 579 adc_dev->sample_delay[channels] = STEPCONFIG_SAMPLEDLY; 580 adc_dev->step_avg[channels] = 16; 581 582 channels++; 583 } 584 585 adc_dev->channels = channels; 586 587 of_property_read_u32_array(node, "ti,chan-step-avg", 588 adc_dev->step_avg, channels); 589 of_property_read_u32_array(node, "ti,chan-step-opendelay", 590 adc_dev->open_delay, channels); 591 of_property_read_u32_array(node, "ti,chan-step-sampledelay", 592 adc_dev->sample_delay, channels); 593 594 for (i = 0; i < adc_dev->channels; i++) { 595 int chan; 596 597 chan = adc_dev->channel_line[i]; 598 599 if (adc_dev->step_avg[i] > STEPCONFIG_AVG_16) { 600 dev_warn(&pdev->dev, 601 "chan %d: wrong step avg, truncated to %ld\n", 602 chan, STEPCONFIG_AVG_16); 603 adc_dev->step_avg[i] = STEPCONFIG_AVG_16; 604 } 605 606 if (adc_dev->open_delay[i] > STEPCONFIG_MAX_OPENDLY) { 607 dev_warn(&pdev->dev, 608 "chan %d: wrong open delay, truncated to 0x%lX\n", 609 chan, STEPCONFIG_MAX_OPENDLY); 610 adc_dev->open_delay[i] = STEPCONFIG_MAX_OPENDLY; 611 } 612 613 if (adc_dev->sample_delay[i] > STEPCONFIG_MAX_SAMPLE) { 614 dev_warn(&pdev->dev, 615 "chan %d: wrong sample delay, truncated to 0x%lX\n", 616 chan, STEPCONFIG_MAX_SAMPLE); 617 adc_dev->sample_delay[i] = STEPCONFIG_MAX_SAMPLE; 618 } 619 } 620 621 return 0; 622 } 623 624 static int tiadc_probe(struct platform_device *pdev) 625 { 626 struct iio_dev *indio_dev; 627 struct tiadc_device *adc_dev; 628 struct device_node *node = pdev->dev.of_node; 629 int err; 630 631 if (!node) { 632 dev_err(&pdev->dev, "Could not find valid DT data.\n"); 633 return -EINVAL; 634 } 635 636 indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev)); 637 if (!indio_dev) { 638 dev_err(&pdev->dev, "failed to allocate iio device\n"); 639 return -ENOMEM; 640 } 641 adc_dev = iio_priv(indio_dev); 642 643 adc_dev->mfd_tscadc = ti_tscadc_dev_get(pdev); 644 tiadc_parse_dt(pdev, adc_dev); 645 646 indio_dev->name = dev_name(&pdev->dev); 647 indio_dev->modes = INDIO_DIRECT_MODE; 648 indio_dev->info = &tiadc_info; 649 650 tiadc_step_config(indio_dev); 651 tiadc_writel(adc_dev, REG_FIFO1THR, FIFO1_THRESHOLD); 652 mutex_init(&adc_dev->fifo1_lock); 653 654 err = tiadc_channel_init(&pdev->dev, indio_dev, adc_dev->channels); 655 if (err < 0) 656 return err; 657 658 err = tiadc_iio_buffered_hardware_setup(&pdev->dev, indio_dev, 659 &tiadc_worker_h, 660 &tiadc_irq_h, 661 adc_dev->mfd_tscadc->irq, 662 IRQF_SHARED, 663 &tiadc_buffer_setup_ops); 664 if (err) 665 return err; 666 667 err = iio_device_register(indio_dev); 668 if (err) 669 return err; 670 671 platform_set_drvdata(pdev, indio_dev); 672 673 err = tiadc_request_dma(pdev, adc_dev); 674 if (err && err == -EPROBE_DEFER) 675 goto err_dma; 676 677 return 0; 678 679 err_dma: 680 iio_device_unregister(indio_dev); 681 682 return err; 683 } 684 685 static int tiadc_remove(struct platform_device *pdev) 686 { 687 struct iio_dev *indio_dev = platform_get_drvdata(pdev); 688 struct tiadc_device *adc_dev = iio_priv(indio_dev); 689 struct tiadc_dma *dma = &adc_dev->dma; 690 u32 step_en; 691 692 if (dma->chan) { 693 dma_free_coherent(dma->chan->device->dev, DMA_BUFFER_SIZE, 694 dma->buf, dma->addr); 695 dma_release_channel(dma->chan); 696 } 697 iio_device_unregister(indio_dev); 698 699 step_en = get_adc_step_mask(adc_dev); 700 am335x_tsc_se_clr(adc_dev->mfd_tscadc, step_en); 701 702 return 0; 703 } 704 705 static int __maybe_unused tiadc_suspend(struct device *dev) 706 { 707 struct iio_dev *indio_dev = dev_get_drvdata(dev); 708 struct tiadc_device *adc_dev = iio_priv(indio_dev); 709 unsigned int idle; 710 711 idle = tiadc_readl(adc_dev, REG_CTRL); 712 idle &= ~(CNTRLREG_SSENB); 713 tiadc_writel(adc_dev, REG_CTRL, idle | CNTRLREG_POWERDOWN); 714 715 return 0; 716 } 717 718 static int __maybe_unused tiadc_resume(struct device *dev) 719 { 720 struct iio_dev *indio_dev = dev_get_drvdata(dev); 721 struct tiadc_device *adc_dev = iio_priv(indio_dev); 722 unsigned int restore; 723 724 /* Make sure ADC is powered up */ 725 restore = tiadc_readl(adc_dev, REG_CTRL); 726 restore &= ~CNTRLREG_POWERDOWN; 727 tiadc_writel(adc_dev, REG_CTRL, restore); 728 729 tiadc_step_config(indio_dev); 730 am335x_tsc_se_set_cache(adc_dev->mfd_tscadc, 731 adc_dev->buffer_en_ch_steps); 732 return 0; 733 } 734 735 static SIMPLE_DEV_PM_OPS(tiadc_pm_ops, tiadc_suspend, tiadc_resume); 736 737 static const struct of_device_id ti_adc_dt_ids[] = { 738 { .compatible = "ti,am3359-adc", }, 739 { .compatible = "ti,am4372-adc", }, 740 { } 741 }; 742 MODULE_DEVICE_TABLE(of, ti_adc_dt_ids); 743 744 static struct platform_driver tiadc_driver = { 745 .driver = { 746 .name = "TI-am335x-adc", 747 .pm = &tiadc_pm_ops, 748 .of_match_table = ti_adc_dt_ids, 749 }, 750 .probe = tiadc_probe, 751 .remove = tiadc_remove, 752 }; 753 module_platform_driver(tiadc_driver); 754 755 MODULE_DESCRIPTION("TI ADC controller driver"); 756 MODULE_AUTHOR("Rachna Patil <rachna@ti.com>"); 757 MODULE_LICENSE("GPL"); 758