1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * NXP SAR-ADC driver (adapted from Freescale Vybrid vf610 ADC driver
4 * by Fugang Duan <B38611@freescale.com>)
5 *
6 * Copyright 2013 Freescale Semiconductor, Inc.
7 * Copyright 2017, 2020-2025 NXP
8 * Copyright 2025, Linaro Ltd
9 */
10 #include <linux/bitfield.h>
11 #include <linux/bitops.h>
12 #include <linux/circ_buf.h>
13 #include <linux/cleanup.h>
14 #include <linux/clk.h>
15 #include <linux/completion.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/math64.h>
23 #include <linux/minmax.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/property.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/time.h>
32 #include <linux/types.h>
33 #include <linux/units.h>
34
35 #include <linux/iio/iio.h>
36 #include <linux/iio/triggered_buffer.h>
37 #include <linux/iio/trigger_consumer.h>
38
39 /* SAR ADC registers. */
40 #define NXP_SAR_ADC_CDR(__base, __channel) (((__base) + 0x100) + ((__channel) * 0x4))
41
42 #define NXP_SAR_ADC_CDR_CDATA_MASK GENMASK(11, 0)
43 #define NXP_SAR_ADC_CDR_VALID BIT(19)
44
45 /* Main Configuration Register */
46 #define NXP_SAR_ADC_MCR(__base) ((__base) + 0x00)
47
48 #define NXP_SAR_ADC_MCR_PWDN BIT(0)
49 #define NXP_SAR_ADC_MCR_ACKO BIT(5)
50 #define NXP_SAR_ADC_MCR_ADCLKSEL BIT(8)
51 #define NXP_SAR_ADC_MCR_TSAMP_MASK GENMASK(10, 9)
52 #define NXP_SAR_ADC_MCR_NRSMPL_MASK GENMASK(12, 11)
53 #define NXP_SAR_ADC_MCR_AVGEN BIT(13)
54 #define NXP_SAR_ADC_MCR_CALSTART BIT(14)
55 #define NXP_SAR_ADC_MCR_NSTART BIT(24)
56 #define NXP_SAR_ADC_MCR_MODE BIT(29)
57 #define NXP_SAR_ADC_MCR_OWREN BIT(31)
58
59 /* Main Status Register */
60 #define NXP_SAR_ADC_MSR(__base) ((__base) + 0x04)
61
62 #define NXP_SAR_ADC_MSR_CALBUSY BIT(29)
63 #define NXP_SAR_ADC_MSR_CALFAIL BIT(30)
64
65 /* Interrupt Status Register */
66 #define NXP_SAR_ADC_ISR(__base) ((__base) + 0x10)
67
68 #define NXP_SAR_ADC_ISR_ECH BIT(0)
69
70 /* Channel Pending Register */
71 #define NXP_SAR_ADC_CEOCFR0(__base) ((__base) + 0x14)
72 #define NXP_SAR_ADC_CEOCFR1(__base) ((__base) + 0x18)
73
74 #define NXP_SAR_ADC_EOC_CH(c) BIT(c)
75
76 /* Interrupt Mask Register */
77 #define NXP_SAR_ADC_IMR(__base) ((__base) + 0x20)
78
79 /* Channel Interrupt Mask Register */
80 #define NXP_SAR_ADC_CIMR0(__base) ((__base) + 0x24)
81 #define NXP_SAR_ADC_CIMR1(__base) ((__base) + 0x28)
82
83 /* DMA Setting Register */
84 #define NXP_SAR_ADC_DMAE(__base) ((__base) + 0x40)
85
86 #define NXP_SAR_ADC_DMAE_DMAEN BIT(0)
87 #define NXP_SAR_ADC_DMAE_DCLR BIT(1)
88
89 /* DMA Control register */
90 #define NXP_SAR_ADC_DMAR0(__base) ((__base) + 0x44)
91 #define NXP_SAR_ADC_DMAR1(__base) ((__base) + 0x48)
92
93 /* Conversion Timing Register */
94 #define NXP_SAR_ADC_CTR0(__base) ((__base) + 0x94)
95 #define NXP_SAR_ADC_CTR1(__base) ((__base) + 0x98)
96
97 #define NXP_SAR_ADC_CTR_INPSAMP_MIN 0x08
98 #define NXP_SAR_ADC_CTR_INPSAMP_MAX 0xff
99
100 /* Normal Conversion Mask Register */
101 #define NXP_SAR_ADC_NCMR0(__base) ((__base) + 0xa4)
102 #define NXP_SAR_ADC_NCMR1(__base) ((__base) + 0xa8)
103
104 /* Normal Conversion Mask Register field define */
105 #define NXP_SAR_ADC_CH_MASK GENMASK(7, 0)
106
107 /* Other field define */
108 #define NXP_SAR_ADC_CONV_TIMEOUT (msecs_to_jiffies(100))
109 #define NXP_SAR_ADC_CAL_TIMEOUT_US (100 * USEC_PER_MSEC)
110 #define NXP_SAR_ADC_WAIT_US (2 * USEC_PER_MSEC)
111 #define NXP_SAR_ADC_RESOLUTION 12
112
113 /* Duration of conversion phases */
114 #define NXP_SAR_ADC_TPT 2
115 #define NXP_SAR_ADC_DP 2
116 #define NXP_SAR_ADC_CT ((NXP_SAR_ADC_RESOLUTION + 2) * 4)
117 #define NXP_SAR_ADC_CONV_TIME (NXP_SAR_ADC_TPT + NXP_SAR_ADC_CT + NXP_SAR_ADC_DP)
118
119 #define NXP_SAR_ADC_NR_CHANNELS 8
120
121 #define NXP_PAGE_SIZE SZ_4K
122 #define NXP_SAR_ADC_DMA_SAMPLE_SZ DMA_SLAVE_BUSWIDTH_4_BYTES
123 #define NXP_SAR_ADC_DMA_BUFF_SZ (NXP_PAGE_SIZE * NXP_SAR_ADC_DMA_SAMPLE_SZ)
124 #define NXP_SAR_ADC_DMA_SAMPLE_CNT (NXP_SAR_ADC_DMA_BUFF_SZ / NXP_SAR_ADC_DMA_SAMPLE_SZ)
125
126 struct nxp_sar_adc {
127 void __iomem *regs;
128 phys_addr_t regs_phys;
129 u8 current_channel;
130 u8 channels_used;
131 u16 value;
132 u32 vref_mV;
133
134 /* Save and restore context. */
135 u32 inpsamp;
136 u32 pwdn;
137
138 struct clk *clk;
139 struct dma_chan *dma_chan;
140 struct completion completion;
141 struct circ_buf dma_buf;
142
143 dma_addr_t rx_dma_buf;
144 dma_cookie_t cookie;
145
146 /* Protect circular buffers access. */
147 spinlock_t lock;
148
149 /* Array of enabled channels. */
150 u16 buffered_chan[NXP_SAR_ADC_NR_CHANNELS];
151
152 /* Buffer to be filled by the DMA. */
153 IIO_DECLARE_BUFFER_WITH_TS(u16, buffer, NXP_SAR_ADC_NR_CHANNELS);
154 };
155
156 struct nxp_sar_adc_data {
157 u32 vref_mV;
158 const char *model;
159 };
160
161 #define ADC_CHAN(_idx, _chan_type) { \
162 .type = (_chan_type), \
163 .indexed = 1, \
164 .channel = (_idx), \
165 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
166 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
167 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
168 .scan_index = (_idx), \
169 .scan_type = { \
170 .sign = 'u', \
171 .realbits = 12, \
172 .storagebits = 16, \
173 }, \
174 }
175
176 static const struct iio_chan_spec nxp_sar_adc_iio_channels[] = {
177 ADC_CHAN(0, IIO_VOLTAGE),
178 ADC_CHAN(1, IIO_VOLTAGE),
179 ADC_CHAN(2, IIO_VOLTAGE),
180 ADC_CHAN(3, IIO_VOLTAGE),
181 ADC_CHAN(4, IIO_VOLTAGE),
182 ADC_CHAN(5, IIO_VOLTAGE),
183 ADC_CHAN(6, IIO_VOLTAGE),
184 ADC_CHAN(7, IIO_VOLTAGE),
185 /*
186 * The NXP SAR ADC documentation marks the channels 8 to 31 as
187 * "Reserved". Reflect the same in the driver in case new ADC
188 * variants comes with more channels.
189 */
190 IIO_CHAN_SOFT_TIMESTAMP(32),
191 };
192
nxp_sar_adc_irq_cfg(struct nxp_sar_adc * info,bool enable)193 static void nxp_sar_adc_irq_cfg(struct nxp_sar_adc *info, bool enable)
194 {
195 if (enable)
196 writel(NXP_SAR_ADC_ISR_ECH, NXP_SAR_ADC_IMR(info->regs));
197 else
198 writel(0, NXP_SAR_ADC_IMR(info->regs));
199 }
200
nxp_sar_adc_set_enabled(struct nxp_sar_adc * info,bool enable)201 static bool nxp_sar_adc_set_enabled(struct nxp_sar_adc *info, bool enable)
202 {
203 u32 mcr;
204 bool pwdn;
205
206 mcr = readl(NXP_SAR_ADC_MCR(info->regs));
207
208 /*
209 * Get the current state and return it later. This is used for
210 * suspend/resume to get the power state
211 */
212 pwdn = FIELD_GET(NXP_SAR_ADC_MCR_PWDN, mcr);
213
214 /* When the enabled flag is not set, we set the power down bit */
215 FIELD_MODIFY(NXP_SAR_ADC_MCR_PWDN, &mcr, !enable);
216
217 writel(mcr, NXP_SAR_ADC_MCR(info->regs));
218
219 /*
220 * Ensure there are at least three cycles between the
221 * configuration of NCMR and the setting of NSTART.
222 */
223 if (enable)
224 ndelay(div64_u64(NSEC_PER_SEC, clk_get_rate(info->clk) * 3));
225
226 return pwdn;
227 }
228
nxp_sar_adc_enable(struct nxp_sar_adc * info)229 static inline bool nxp_sar_adc_enable(struct nxp_sar_adc *info)
230 {
231 return nxp_sar_adc_set_enabled(info, true);
232 }
233
nxp_sar_adc_disable(struct nxp_sar_adc * info)234 static inline bool nxp_sar_adc_disable(struct nxp_sar_adc *info)
235 {
236 return nxp_sar_adc_set_enabled(info, false);
237 }
238
nxp_sar_adc_calibration_start(void __iomem * base)239 static inline void nxp_sar_adc_calibration_start(void __iomem *base)
240 {
241 u32 mcr = readl(NXP_SAR_ADC_MCR(base));
242
243 FIELD_MODIFY(NXP_SAR_ADC_MCR_CALSTART, &mcr, 0x1);
244
245 writel(mcr, NXP_SAR_ADC_MCR(base));
246 }
247
nxp_sar_adc_calibration_wait(void __iomem * base)248 static inline int nxp_sar_adc_calibration_wait(void __iomem *base)
249 {
250 u32 msr, ret;
251
252 ret = readl_poll_timeout(NXP_SAR_ADC_MSR(base), msr,
253 !FIELD_GET(NXP_SAR_ADC_MSR_CALBUSY, msr),
254 NXP_SAR_ADC_WAIT_US,
255 NXP_SAR_ADC_CAL_TIMEOUT_US);
256 if (ret)
257 return ret;
258
259 if (FIELD_GET(NXP_SAR_ADC_MSR_CALFAIL, msr)) {
260 /*
261 * If the calibration fails, the status register bit must be
262 * cleared.
263 */
264 FIELD_MODIFY(NXP_SAR_ADC_MSR_CALFAIL, &msr, 0x0);
265 writel(msr, NXP_SAR_ADC_MSR(base));
266
267 return -EAGAIN;
268 }
269
270 return 0;
271 }
272
nxp_sar_adc_calibration(struct nxp_sar_adc * info)273 static int nxp_sar_adc_calibration(struct nxp_sar_adc *info)
274 {
275 int ret;
276
277 /* Calibration works only if the ADC is powered up. */
278 nxp_sar_adc_enable(info);
279
280 /* The calibration operation starts. */
281 nxp_sar_adc_calibration_start(info->regs);
282
283 ret = nxp_sar_adc_calibration_wait(info->regs);
284
285 /*
286 * Calibration works only if the ADC is powered up. However
287 * the calibration is called from the probe function where the
288 * iio is not enabled, so we disable after the calibration.
289 */
290 nxp_sar_adc_disable(info);
291
292 return ret;
293 }
294
nxp_sar_adc_conversion_timing_set(struct nxp_sar_adc * info,u32 inpsamp)295 static void nxp_sar_adc_conversion_timing_set(struct nxp_sar_adc *info, u32 inpsamp)
296 {
297 inpsamp = clamp(inpsamp, NXP_SAR_ADC_CTR_INPSAMP_MIN, NXP_SAR_ADC_CTR_INPSAMP_MAX);
298
299 writel(inpsamp, NXP_SAR_ADC_CTR0(info->regs));
300 }
301
nxp_sar_adc_conversion_timing_get(struct nxp_sar_adc * info)302 static u32 nxp_sar_adc_conversion_timing_get(struct nxp_sar_adc *info)
303 {
304 return readl(NXP_SAR_ADC_CTR0(info->regs));
305 }
306
nxp_sar_adc_read_notify(struct nxp_sar_adc * info)307 static void nxp_sar_adc_read_notify(struct nxp_sar_adc *info)
308 {
309 writel(NXP_SAR_ADC_CH_MASK, NXP_SAR_ADC_CEOCFR0(info->regs));
310 writel(NXP_SAR_ADC_CH_MASK, NXP_SAR_ADC_CEOCFR1(info->regs));
311 }
312
nxp_sar_adc_read_data(struct nxp_sar_adc * info,unsigned int chan)313 static int nxp_sar_adc_read_data(struct nxp_sar_adc *info, unsigned int chan)
314 {
315 u32 ceocfr, cdr;
316
317 ceocfr = readl(NXP_SAR_ADC_CEOCFR0(info->regs));
318
319 /*
320 * FIELD_GET() can not be used here because EOC_CH is not constant.
321 * TODO: Switch to field_get() when it will be available.
322 */
323 if (!(NXP_SAR_ADC_EOC_CH(chan) & ceocfr))
324 return -EIO;
325
326 cdr = readl(NXP_SAR_ADC_CDR(info->regs, chan));
327 if (!(FIELD_GET(NXP_SAR_ADC_CDR_VALID, cdr)))
328 return -EIO;
329
330 return FIELD_GET(NXP_SAR_ADC_CDR_CDATA_MASK, cdr);
331 }
332
nxp_sar_adc_isr_buffer(struct iio_dev * indio_dev)333 static void nxp_sar_adc_isr_buffer(struct iio_dev *indio_dev)
334 {
335 struct nxp_sar_adc *info = iio_priv(indio_dev);
336 unsigned int i;
337 int ret;
338
339 for (i = 0; i < info->channels_used; i++) {
340 ret = nxp_sar_adc_read_data(info, info->buffered_chan[i]);
341 if (ret < 0) {
342 nxp_sar_adc_read_notify(info);
343 return;
344 }
345
346 info->buffer[i] = ret;
347 }
348
349 nxp_sar_adc_read_notify(info);
350
351 iio_push_to_buffers_with_ts(indio_dev, info->buffer, sizeof(info->buffer),
352 iio_get_time_ns(indio_dev));
353
354 iio_trigger_notify_done(indio_dev->trig);
355 }
356
nxp_sar_adc_isr_read_raw(struct iio_dev * indio_dev)357 static void nxp_sar_adc_isr_read_raw(struct iio_dev *indio_dev)
358 {
359 struct nxp_sar_adc *info = iio_priv(indio_dev);
360 int ret;
361
362 ret = nxp_sar_adc_read_data(info, info->current_channel);
363 nxp_sar_adc_read_notify(info);
364 if (ret < 0)
365 return;
366
367 info->value = ret;
368 complete(&info->completion);
369 }
370
nxp_sar_adc_isr(int irq,void * dev_id)371 static irqreturn_t nxp_sar_adc_isr(int irq, void *dev_id)
372 {
373 struct iio_dev *indio_dev = dev_id;
374 struct nxp_sar_adc *info = iio_priv(indio_dev);
375 int isr;
376
377 isr = readl(NXP_SAR_ADC_ISR(info->regs));
378 if (!(FIELD_GET(NXP_SAR_ADC_ISR_ECH, isr)))
379 return IRQ_NONE;
380
381 if (iio_buffer_enabled(indio_dev))
382 nxp_sar_adc_isr_buffer(indio_dev);
383 else
384 nxp_sar_adc_isr_read_raw(indio_dev);
385
386 writel(NXP_SAR_ADC_ISR_ECH, NXP_SAR_ADC_ISR(info->regs));
387
388 return IRQ_HANDLED;
389 }
390
nxp_sar_adc_channels_disable(struct nxp_sar_adc * info,u32 mask)391 static void nxp_sar_adc_channels_disable(struct nxp_sar_adc *info, u32 mask)
392 {
393 u32 ncmr, cimr;
394
395 ncmr = readl(NXP_SAR_ADC_NCMR0(info->regs));
396 cimr = readl(NXP_SAR_ADC_CIMR0(info->regs));
397
398 /* FIELD_MODIFY() can not be used because the mask is not constant */
399 ncmr &= ~mask;
400 cimr &= ~mask;
401
402 writel(ncmr, NXP_SAR_ADC_NCMR0(info->regs));
403 writel(cimr, NXP_SAR_ADC_CIMR0(info->regs));
404 }
405
nxp_sar_adc_channels_enable(struct nxp_sar_adc * info,u32 mask)406 static void nxp_sar_adc_channels_enable(struct nxp_sar_adc *info, u32 mask)
407 {
408 u32 ncmr, cimr;
409
410 ncmr = readl(NXP_SAR_ADC_NCMR0(info->regs));
411 cimr = readl(NXP_SAR_ADC_CIMR0(info->regs));
412
413 ncmr |= mask;
414 cimr |= mask;
415
416 writel(ncmr, NXP_SAR_ADC_NCMR0(info->regs));
417 writel(cimr, NXP_SAR_ADC_CIMR0(info->regs));
418 }
419
nxp_sar_adc_dma_channels_enable(struct nxp_sar_adc * info,u32 mask)420 static void nxp_sar_adc_dma_channels_enable(struct nxp_sar_adc *info, u32 mask)
421 {
422 u32 dmar;
423
424 dmar = readl(NXP_SAR_ADC_DMAR0(info->regs));
425
426 dmar |= mask;
427
428 writel(dmar, NXP_SAR_ADC_DMAR0(info->regs));
429 }
430
nxp_sar_adc_dma_channels_disable(struct nxp_sar_adc * info,u32 mask)431 static void nxp_sar_adc_dma_channels_disable(struct nxp_sar_adc *info, u32 mask)
432 {
433 u32 dmar;
434
435 dmar = readl(NXP_SAR_ADC_DMAR0(info->regs));
436
437 dmar &= ~mask;
438
439 writel(dmar, NXP_SAR_ADC_DMAR0(info->regs));
440 }
441
nxp_sar_adc_dma_cfg(struct nxp_sar_adc * info,bool enable)442 static void nxp_sar_adc_dma_cfg(struct nxp_sar_adc *info, bool enable)
443 {
444 u32 dmae;
445
446 dmae = readl(NXP_SAR_ADC_DMAE(info->regs));
447
448 FIELD_MODIFY(NXP_SAR_ADC_DMAE_DMAEN, &dmae, enable);
449
450 writel(dmae, NXP_SAR_ADC_DMAE(info->regs));
451 }
452
nxp_sar_adc_stop_conversion(struct nxp_sar_adc * info)453 static void nxp_sar_adc_stop_conversion(struct nxp_sar_adc *info)
454 {
455 u32 mcr;
456
457 mcr = readl(NXP_SAR_ADC_MCR(info->regs));
458
459 FIELD_MODIFY(NXP_SAR_ADC_MCR_NSTART, &mcr, 0x0);
460
461 writel(mcr, NXP_SAR_ADC_MCR(info->regs));
462
463 /*
464 * On disable, we have to wait for the transaction to finish.
465 * ADC does not abort the transaction if a chain conversion is
466 * in progress. Wait for the worst case scenario - 80 ADC clk
467 * cycles. The clock rate is 80MHz, this routine is called
468 * only when the capture finishes. The delay will be very
469 * short, usec-ish, which is acceptable in the atomic context.
470 */
471 ndelay(div64_u64(NSEC_PER_SEC, clk_get_rate(info->clk)) * 80);
472 }
473
nxp_sar_adc_start_conversion(struct nxp_sar_adc * info,bool raw)474 static int nxp_sar_adc_start_conversion(struct nxp_sar_adc *info, bool raw)
475 {
476 u32 mcr;
477
478 mcr = readl(NXP_SAR_ADC_MCR(info->regs));
479
480 FIELD_MODIFY(NXP_SAR_ADC_MCR_NSTART, &mcr, 0x1);
481 FIELD_MODIFY(NXP_SAR_ADC_MCR_MODE, &mcr, raw ? 0 : 1);
482
483 writel(mcr, NXP_SAR_ADC_MCR(info->regs));
484
485 return 0;
486 }
487
nxp_sar_adc_read_channel(struct nxp_sar_adc * info,int channel)488 static int nxp_sar_adc_read_channel(struct nxp_sar_adc *info, int channel)
489 {
490 int ret;
491
492 info->current_channel = channel;
493 nxp_sar_adc_channels_enable(info, BIT(channel));
494 nxp_sar_adc_irq_cfg(info, true);
495 nxp_sar_adc_enable(info);
496
497 reinit_completion(&info->completion);
498 ret = nxp_sar_adc_start_conversion(info, true);
499 if (ret < 0)
500 goto out_disable;
501
502 if (!wait_for_completion_interruptible_timeout(&info->completion,
503 NXP_SAR_ADC_CONV_TIMEOUT))
504 ret = -ETIMEDOUT;
505
506 nxp_sar_adc_stop_conversion(info);
507
508 out_disable:
509 nxp_sar_adc_channels_disable(info, BIT(channel));
510 nxp_sar_adc_irq_cfg(info, false);
511 nxp_sar_adc_disable(info);
512
513 return ret;
514 }
515
nxp_sar_adc_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)516 static int nxp_sar_adc_read_raw(struct iio_dev *indio_dev,
517 struct iio_chan_spec const *chan, int *val,
518 int *val2, long mask)
519 {
520 struct nxp_sar_adc *info = iio_priv(indio_dev);
521 u32 inpsamp;
522 int ret;
523
524 switch (mask) {
525 case IIO_CHAN_INFO_RAW:
526 if (!iio_device_claim_direct(indio_dev))
527 return -EBUSY;
528
529 ret = nxp_sar_adc_read_channel(info, chan->channel);
530
531 iio_device_release_direct(indio_dev);
532
533 if (ret)
534 return ret;
535
536 *val = info->value;
537 return IIO_VAL_INT;
538
539 case IIO_CHAN_INFO_SCALE:
540 *val = info->vref_mV;
541 *val2 = NXP_SAR_ADC_RESOLUTION;
542 return IIO_VAL_FRACTIONAL_LOG2;
543
544 case IIO_CHAN_INFO_SAMP_FREQ:
545 inpsamp = nxp_sar_adc_conversion_timing_get(info);
546 *val = clk_get_rate(info->clk) / (inpsamp + NXP_SAR_ADC_CONV_TIME);
547 return IIO_VAL_INT;
548
549 default:
550 return -EINVAL;
551 }
552 }
553
nxp_sar_adc_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long mask)554 static int nxp_sar_adc_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
555 int val, int val2, long mask)
556 {
557 struct nxp_sar_adc *info = iio_priv(indio_dev);
558 u32 inpsamp;
559
560 switch (mask) {
561 case IIO_CHAN_INFO_SAMP_FREQ:
562 /*
563 * Configures the sample period duration in terms of the SAR
564 * controller clock. The minimum acceptable value is 8.
565 * Configuring it to a value lower than 8 sets the sample period
566 * to 8 cycles. We read the clock value and divide by the
567 * sampling timing which gives us the number of cycles expected.
568 * The value is 8-bit wide, consequently the max value is 0xFF.
569 */
570 inpsamp = clk_get_rate(info->clk) / val - NXP_SAR_ADC_CONV_TIME;
571 nxp_sar_adc_conversion_timing_set(info, inpsamp);
572 return 0;
573
574 default:
575 return -EINVAL;
576 }
577 }
578
nxp_sar_adc_dma_cb(void * data)579 static void nxp_sar_adc_dma_cb(void *data)
580 {
581 struct iio_dev *indio_dev = data;
582 struct nxp_sar_adc *info = iio_priv(indio_dev);
583 struct dma_tx_state state;
584 struct circ_buf *dma_buf;
585 struct device *dev_dma;
586 u32 *dma_samples;
587 s64 timestamp;
588 int idx, ret;
589
590 guard(spinlock_irqsave)(&info->lock);
591
592 dma_buf = &info->dma_buf;
593 dma_samples = (u32 *)dma_buf->buf;
594 dev_dma = info->dma_chan->device->dev;
595
596 /*
597 * DMA in some corner cases might have already be charged for
598 * the next transfer. Potentially there can be a race where
599 * the residue changes while the dma engine updates the
600 * buffer. That could be handled by using the
601 * callback_result() instead of callback() because the residue
602 * will be passed as a parameter to the function. However this
603 * new callback is pretty new and the backend does not update
604 * the residue. So let's stick to the version other drivers do
605 * which has proven running well in production since several
606 * years.
607 */
608 dmaengine_tx_status(info->dma_chan, info->cookie, &state);
609
610 dma_sync_single_for_cpu(dev_dma, info->rx_dma_buf,
611 NXP_SAR_ADC_DMA_BUFF_SZ, DMA_FROM_DEVICE);
612
613 /* Current head position. */
614 dma_buf->head = (NXP_SAR_ADC_DMA_BUFF_SZ - state.residue) /
615 NXP_SAR_ADC_DMA_SAMPLE_SZ;
616
617 /* If everything was transferred, avoid an off by one error. */
618 if (!state.residue)
619 dma_buf->head--;
620
621 /* Something went wrong and nothing transferred. */
622 if (state.residue != NXP_SAR_ADC_DMA_BUFF_SZ) {
623 /* Make sure that head is multiple of info->channels_used. */
624 dma_buf->head -= dma_buf->head % info->channels_used;
625
626 /*
627 * dma_buf->tail != dma_buf->head condition will become false
628 * because dma_buf->tail will be incremented with 1.
629 */
630 while (dma_buf->tail != dma_buf->head) {
631 idx = dma_buf->tail % info->channels_used;
632 info->buffer[idx] = dma_samples[dma_buf->tail];
633 dma_buf->tail = (dma_buf->tail + 1) % NXP_SAR_ADC_DMA_SAMPLE_CNT;
634 if (idx != info->channels_used - 1)
635 continue;
636
637 /*
638 * iio_push_to_buffers_with_ts() should not be
639 * called with dma_samples as parameter. The samples
640 * will be smashed if timestamp is enabled.
641 */
642 timestamp = iio_get_time_ns(indio_dev);
643 ret = iio_push_to_buffers_with_ts(indio_dev, info->buffer,
644 sizeof(info->buffer),
645 timestamp);
646 if (ret < 0 && ret != -EBUSY)
647 dev_err_ratelimited(&indio_dev->dev,
648 "failed to push iio buffer: %d",
649 ret);
650 }
651
652 dma_buf->tail = dma_buf->head;
653 }
654
655 dma_sync_single_for_device(dev_dma, info->rx_dma_buf,
656 NXP_SAR_ADC_DMA_BUFF_SZ, DMA_FROM_DEVICE);
657 }
658
nxp_sar_adc_start_cyclic_dma(struct iio_dev * indio_dev)659 static int nxp_sar_adc_start_cyclic_dma(struct iio_dev *indio_dev)
660 {
661 struct nxp_sar_adc *info = iio_priv(indio_dev);
662 struct dma_slave_config config;
663 struct dma_async_tx_descriptor *desc;
664 int ret;
665
666 info->dma_buf.head = 0;
667 info->dma_buf.tail = 0;
668
669 config.direction = DMA_DEV_TO_MEM;
670 config.src_addr_width = NXP_SAR_ADC_DMA_SAMPLE_SZ;
671 config.src_addr = NXP_SAR_ADC_CDR(info->regs_phys, info->buffered_chan[0]);
672 config.src_port_window_size = info->channels_used;
673 config.src_maxburst = info->channels_used;
674 ret = dmaengine_slave_config(info->dma_chan, &config);
675 if (ret < 0)
676 return ret;
677
678 desc = dmaengine_prep_dma_cyclic(info->dma_chan,
679 info->rx_dma_buf,
680 NXP_SAR_ADC_DMA_BUFF_SZ,
681 NXP_SAR_ADC_DMA_BUFF_SZ / 2,
682 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
683 if (!desc)
684 return -EINVAL;
685
686 desc->callback = nxp_sar_adc_dma_cb;
687 desc->callback_param = indio_dev;
688 info->cookie = dmaengine_submit(desc);
689 ret = dma_submit_error(info->cookie);
690 if (ret) {
691 dmaengine_terminate_async(info->dma_chan);
692 return ret;
693 }
694
695 dma_async_issue_pending(info->dma_chan);
696
697 return 0;
698 }
699
nxp_sar_adc_buffer_software_do_predisable(struct iio_dev * indio_dev)700 static void nxp_sar_adc_buffer_software_do_predisable(struct iio_dev *indio_dev)
701 {
702 struct nxp_sar_adc *info = iio_priv(indio_dev);
703
704 /*
705 * The ADC DMAEN bit should be cleared before DMA transaction
706 * is canceled.
707 */
708 nxp_sar_adc_stop_conversion(info);
709 dmaengine_terminate_sync(info->dma_chan);
710 nxp_sar_adc_dma_cfg(info, false);
711 nxp_sar_adc_dma_channels_disable(info, *indio_dev->active_scan_mask);
712
713 dma_release_channel(info->dma_chan);
714 }
715
nxp_sar_adc_buffer_software_do_postenable(struct iio_dev * indio_dev)716 static int nxp_sar_adc_buffer_software_do_postenable(struct iio_dev *indio_dev)
717 {
718 struct nxp_sar_adc *info = iio_priv(indio_dev);
719 int ret;
720
721 info->dma_chan = dma_request_chan(indio_dev->dev.parent, "rx");
722 if (IS_ERR(info->dma_chan))
723 return PTR_ERR(info->dma_chan);
724
725 nxp_sar_adc_dma_channels_enable(info, *indio_dev->active_scan_mask);
726
727 nxp_sar_adc_dma_cfg(info, true);
728
729 ret = nxp_sar_adc_start_cyclic_dma(indio_dev);
730 if (ret)
731 goto out_dma_channels_disable;
732
733 ret = nxp_sar_adc_start_conversion(info, false);
734 if (ret)
735 goto out_stop_cyclic_dma;
736
737 return 0;
738
739 out_stop_cyclic_dma:
740 dmaengine_terminate_sync(info->dma_chan);
741
742 out_dma_channels_disable:
743 nxp_sar_adc_dma_cfg(info, false);
744 nxp_sar_adc_dma_channels_disable(info, *indio_dev->active_scan_mask);
745 dma_release_channel(info->dma_chan);
746
747 return ret;
748 }
749
nxp_sar_adc_buffer_trigger_do_predisable(struct iio_dev * indio_dev)750 static void nxp_sar_adc_buffer_trigger_do_predisable(struct iio_dev *indio_dev)
751 {
752 struct nxp_sar_adc *info = iio_priv(indio_dev);
753
754 nxp_sar_adc_irq_cfg(info, false);
755 }
756
nxp_sar_adc_buffer_trigger_do_postenable(struct iio_dev * indio_dev)757 static int nxp_sar_adc_buffer_trigger_do_postenable(struct iio_dev *indio_dev)
758 {
759 struct nxp_sar_adc *info = iio_priv(indio_dev);
760
761 nxp_sar_adc_irq_cfg(info, true);
762
763 return 0;
764 }
765
nxp_sar_adc_buffer_postenable(struct iio_dev * indio_dev)766 static int nxp_sar_adc_buffer_postenable(struct iio_dev *indio_dev)
767 {
768 struct nxp_sar_adc *info = iio_priv(indio_dev);
769 int current_mode = iio_device_get_current_mode(indio_dev);
770 unsigned long channel;
771 int ret;
772
773 info->channels_used = 0;
774
775 /*
776 * The SAR-ADC has two groups of channels.
777 *
778 * - Group #0:
779 * * bit 0-7 : channel 0 -> channel 7
780 * * bit 8-31 : reserved
781 *
782 * - Group #32:
783 * * bit 0-7 : Internal
784 * * bit 8-31 : reserved
785 *
786 * The 8 channels from group #0 are used in this driver for
787 * ADC as described when declaring the IIO device and the
788 * mapping is the same. That means the active_scan_mask can be
789 * used directly to write the channel interrupt mask.
790 */
791 nxp_sar_adc_channels_enable(info, *indio_dev->active_scan_mask);
792
793 for_each_set_bit(channel, indio_dev->active_scan_mask, NXP_SAR_ADC_NR_CHANNELS)
794 info->buffered_chan[info->channels_used++] = channel;
795
796 nxp_sar_adc_enable(info);
797
798 if (current_mode == INDIO_BUFFER_SOFTWARE)
799 ret = nxp_sar_adc_buffer_software_do_postenable(indio_dev);
800 else
801 ret = nxp_sar_adc_buffer_trigger_do_postenable(indio_dev);
802 if (ret)
803 goto out_postenable;
804
805 return 0;
806
807 out_postenable:
808 nxp_sar_adc_disable(info);
809 nxp_sar_adc_channels_disable(info, *indio_dev->active_scan_mask);
810
811 return ret;
812 }
813
nxp_sar_adc_buffer_predisable(struct iio_dev * indio_dev)814 static int nxp_sar_adc_buffer_predisable(struct iio_dev *indio_dev)
815 {
816 struct nxp_sar_adc *info = iio_priv(indio_dev);
817 int currentmode = iio_device_get_current_mode(indio_dev);
818
819 if (currentmode == INDIO_BUFFER_SOFTWARE)
820 nxp_sar_adc_buffer_software_do_predisable(indio_dev);
821 else
822 nxp_sar_adc_buffer_trigger_do_predisable(indio_dev);
823
824 nxp_sar_adc_disable(info);
825
826 nxp_sar_adc_channels_disable(info, *indio_dev->active_scan_mask);
827
828 return 0;
829 }
830
nxp_sar_adc_trigger_handler(int irq,void * p)831 static irqreturn_t nxp_sar_adc_trigger_handler(int irq, void *p)
832 {
833 struct iio_poll_func *pf = p;
834 struct iio_dev *indio_dev = pf->indio_dev;
835 struct nxp_sar_adc *info = iio_priv(indio_dev);
836 int ret;
837
838 ret = nxp_sar_adc_start_conversion(info, true);
839 if (ret < 0)
840 dev_dbg(&indio_dev->dev, "Failed to start conversion\n");
841
842 return IRQ_HANDLED;
843 }
844
845 static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
846 .postenable = nxp_sar_adc_buffer_postenable,
847 .predisable = nxp_sar_adc_buffer_predisable,
848 };
849
850 static const struct iio_info nxp_sar_adc_iio_info = {
851 .read_raw = nxp_sar_adc_read_raw,
852 .write_raw = nxp_sar_adc_write_raw,
853 };
854
nxp_sar_adc_dma_probe(struct device * dev,struct nxp_sar_adc * info)855 static int nxp_sar_adc_dma_probe(struct device *dev, struct nxp_sar_adc *info)
856 {
857 u8 *rx_buf;
858
859 rx_buf = dmam_alloc_coherent(dev, NXP_SAR_ADC_DMA_BUFF_SZ,
860 &info->rx_dma_buf, GFP_KERNEL);
861 if (!rx_buf)
862 return -ENOMEM;
863
864 info->dma_buf.buf = rx_buf;
865
866 return 0;
867 }
868
869 /*
870 * The documentation describes the reset values for the registers.
871 * However some registers do not have these values after a reset. It
872 * is not a desirable situation. In some other SoC family
873 * documentation NXP recommends not assuming the default values are
874 * set and to initialize the registers conforming to the documentation
875 * reset information to prevent this situation. Assume the same rule
876 * applies here as there is a discrepancy between what is read from
877 * the registers at reset time and the documentation.
878 */
nxp_sar_adc_set_default_values(struct nxp_sar_adc * info)879 static void nxp_sar_adc_set_default_values(struct nxp_sar_adc *info)
880 {
881 writel(0x00003901, NXP_SAR_ADC_MCR(info->regs));
882 writel(0x00000001, NXP_SAR_ADC_MSR(info->regs));
883 writel(0x00000014, NXP_SAR_ADC_CTR0(info->regs));
884 writel(0x00000014, NXP_SAR_ADC_CTR1(info->regs));
885 writel(0x00000000, NXP_SAR_ADC_CIMR0(info->regs));
886 writel(0x00000000, NXP_SAR_ADC_CIMR1(info->regs));
887 writel(0x00000000, NXP_SAR_ADC_NCMR0(info->regs));
888 writel(0x00000000, NXP_SAR_ADC_NCMR1(info->regs));
889 }
890
nxp_sar_adc_probe(struct platform_device * pdev)891 static int nxp_sar_adc_probe(struct platform_device *pdev)
892 {
893 struct device *dev = &pdev->dev;
894 const struct nxp_sar_adc_data *data = device_get_match_data(dev);
895 struct nxp_sar_adc *info;
896 struct iio_dev *indio_dev;
897 struct resource *mem;
898 int irq, ret;
899
900 indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
901 if (!indio_dev)
902 return -ENOMEM;
903
904 info = iio_priv(indio_dev);
905 info->vref_mV = data->vref_mV;
906 spin_lock_init(&info->lock);
907 info->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
908 if (IS_ERR(info->regs))
909 return dev_err_probe(dev, PTR_ERR(info->regs),
910 "Failed to get and remap resource");
911
912 info->regs_phys = mem->start;
913
914 irq = platform_get_irq(pdev, 0);
915 if (irq < 0)
916 return irq;
917
918 ret = devm_request_irq(dev, irq, nxp_sar_adc_isr, 0, dev_name(dev),
919 indio_dev);
920 if (ret < 0)
921 return ret;
922
923 info->clk = devm_clk_get_enabled(dev, NULL);
924 if (IS_ERR(info->clk))
925 return dev_err_probe(dev, PTR_ERR(info->clk),
926 "Failed to get the clock\n");
927
928 platform_set_drvdata(pdev, indio_dev);
929
930 init_completion(&info->completion);
931
932 indio_dev->name = data->model;
933 indio_dev->info = &nxp_sar_adc_iio_info;
934 indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
935 indio_dev->channels = nxp_sar_adc_iio_channels;
936 indio_dev->num_channels = ARRAY_SIZE(nxp_sar_adc_iio_channels);
937
938 nxp_sar_adc_set_default_values(info);
939
940 ret = nxp_sar_adc_calibration(info);
941 if (ret)
942 dev_err_probe(dev, ret, "Calibration failed\n");
943
944 ret = nxp_sar_adc_dma_probe(dev, info);
945 if (ret)
946 return dev_err_probe(dev, ret, "Failed to initialize the DMA\n");
947
948 ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
949 &iio_pollfunc_store_time,
950 &nxp_sar_adc_trigger_handler,
951 &iio_triggered_buffer_setup_ops);
952 if (ret < 0)
953 return dev_err_probe(dev, ret, "Couldn't initialise the buffer\n");
954
955 ret = devm_iio_device_register(dev, indio_dev);
956 if (ret)
957 return dev_err_probe(dev, ret, "Couldn't register the device\n");
958
959 return 0;
960 }
961
nxp_sar_adc_suspend(struct device * dev)962 static int nxp_sar_adc_suspend(struct device *dev)
963 {
964 struct nxp_sar_adc *info = iio_priv(dev_get_drvdata(dev));
965
966 info->pwdn = nxp_sar_adc_disable(info);
967 info->inpsamp = nxp_sar_adc_conversion_timing_get(info);
968
969 clk_disable_unprepare(info->clk);
970
971 return 0;
972 }
973
nxp_sar_adc_resume(struct device * dev)974 static int nxp_sar_adc_resume(struct device *dev)
975 {
976 struct nxp_sar_adc *info = iio_priv(dev_get_drvdata(dev));
977 int ret;
978
979 ret = clk_prepare_enable(info->clk);
980 if (ret)
981 return ret;
982
983 nxp_sar_adc_conversion_timing_set(info, info->inpsamp);
984
985 if (!info->pwdn)
986 nxp_sar_adc_enable(info);
987
988 return 0;
989 }
990
991 static DEFINE_SIMPLE_DEV_PM_OPS(nxp_sar_adc_pm_ops, nxp_sar_adc_suspend,
992 nxp_sar_adc_resume);
993
994 static const struct nxp_sar_adc_data s32g2_sar_adc_data = {
995 .vref_mV = 1800,
996 .model = "s32g2-sar-adc",
997 };
998
999 static const struct of_device_id nxp_sar_adc_match[] = {
1000 { .compatible = "nxp,s32g2-sar-adc", .data = &s32g2_sar_adc_data },
1001 { }
1002 };
1003 MODULE_DEVICE_TABLE(of, nxp_sar_adc_match);
1004
1005 static struct platform_driver nxp_sar_adc_driver = {
1006 .probe = nxp_sar_adc_probe,
1007 .driver = {
1008 .name = "nxp-sar-adc",
1009 .of_match_table = nxp_sar_adc_match,
1010 .pm = pm_sleep_ptr(&nxp_sar_adc_pm_ops),
1011 },
1012 };
1013 module_platform_driver(nxp_sar_adc_driver);
1014
1015 MODULE_AUTHOR("NXP");
1016 MODULE_DESCRIPTION("NXP SAR-ADC driver");
1017 MODULE_LICENSE("GPL");
1018