1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * NXP SAR-ADC driver (adapted from Freescale Vybrid vf610 ADC driver
4 * by Fugang Duan <B38611@freescale.com>)
5 *
6 * Copyright 2013 Freescale Semiconductor, Inc.
7 * Copyright 2017, 2020-2025 NXP
8 * Copyright 2025, Linaro Ltd
9 */
10 #include <linux/bitfield.h>
11 #include <linux/bitops.h>
12 #include <linux/circ_buf.h>
13 #include <linux/cleanup.h>
14 #include <linux/clk.h>
15 #include <linux/completion.h>
16 #include <linux/delay.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/dmaengine.h>
19 #include <linux/err.h>
20 #include <linux/interrupt.h>
21 #include <linux/iopoll.h>
22 #include <linux/math64.h>
23 #include <linux/minmax.h>
24 #include <linux/mod_devicetable.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/pm.h>
28 #include <linux/property.h>
29 #include <linux/slab.h>
30 #include <linux/spinlock.h>
31 #include <linux/time.h>
32 #include <linux/types.h>
33 #include <linux/units.h>
34
35 #include <linux/iio/iio.h>
36 #include <linux/iio/triggered_buffer.h>
37 #include <linux/iio/trigger_consumer.h>
38
39 /* SAR ADC registers. */
40 #define NXP_SAR_ADC_CDR(__base, __channel) (((__base) + 0x100) + ((__channel) * 0x4))
41
42 #define NXP_SAR_ADC_CDR_CDATA_MASK GENMASK(11, 0)
43 #define NXP_SAR_ADC_CDR_VALID BIT(19)
44
45 /* Main Configuration Register */
46 #define NXP_SAR_ADC_MCR(__base) ((__base) + 0x00)
47
48 #define NXP_SAR_ADC_MCR_PWDN BIT(0)
49 #define NXP_SAR_ADC_MCR_ACKO BIT(5)
50 #define NXP_SAR_ADC_MCR_ADCLKSEL BIT(8)
51 #define NXP_SAR_ADC_MCR_TSAMP_MASK GENMASK(10, 9)
52 #define NXP_SAR_ADC_MCR_NRSMPL_MASK GENMASK(12, 11)
53 #define NXP_SAR_ADC_MCR_AVGEN BIT(13)
54 #define NXP_SAR_ADC_MCR_CALSTART BIT(14)
55 #define NXP_SAR_ADC_MCR_NSTART BIT(24)
56 #define NXP_SAR_ADC_MCR_MODE BIT(29)
57 #define NXP_SAR_ADC_MCR_OWREN BIT(31)
58
59 /* Main Status Register */
60 #define NXP_SAR_ADC_MSR(__base) ((__base) + 0x04)
61
62 #define NXP_SAR_ADC_MSR_CALBUSY BIT(29)
63 #define NXP_SAR_ADC_MSR_CALFAIL BIT(30)
64
65 /* Interrupt Status Register */
66 #define NXP_SAR_ADC_ISR(__base) ((__base) + 0x10)
67
68 #define NXP_SAR_ADC_ISR_ECH BIT(0)
69
70 /* Channel Pending Register */
71 #define NXP_SAR_ADC_CEOCFR0(__base) ((__base) + 0x14)
72 #define NXP_SAR_ADC_CEOCFR1(__base) ((__base) + 0x18)
73
74 #define NXP_SAR_ADC_EOC_CH(c) BIT(c)
75
76 /* Interrupt Mask Register */
77 #define NXP_SAR_ADC_IMR(__base) ((__base) + 0x20)
78
79 /* Channel Interrupt Mask Register */
80 #define NXP_SAR_ADC_CIMR0(__base) ((__base) + 0x24)
81 #define NXP_SAR_ADC_CIMR1(__base) ((__base) + 0x28)
82
83 /* DMA Setting Register */
84 #define NXP_SAR_ADC_DMAE(__base) ((__base) + 0x40)
85
86 #define NXP_SAR_ADC_DMAE_DMAEN BIT(0)
87 #define NXP_SAR_ADC_DMAE_DCLR BIT(1)
88
89 /* DMA Control register */
90 #define NXP_SAR_ADC_DMAR0(__base) ((__base) + 0x44)
91 #define NXP_SAR_ADC_DMAR1(__base) ((__base) + 0x48)
92
93 /* Conversion Timing Register */
94 #define NXP_SAR_ADC_CTR0(__base) ((__base) + 0x94)
95 #define NXP_SAR_ADC_CTR1(__base) ((__base) + 0x98)
96
97 #define NXP_SAR_ADC_CTR_INPSAMP_MIN 0x08
98 #define NXP_SAR_ADC_CTR_INPSAMP_MAX 0xff
99
100 /* Normal Conversion Mask Register */
101 #define NXP_SAR_ADC_NCMR0(__base) ((__base) + 0xa4)
102 #define NXP_SAR_ADC_NCMR1(__base) ((__base) + 0xa8)
103
104 /* Normal Conversion Mask Register field define */
105 #define NXP_SAR_ADC_CH_MASK GENMASK(7, 0)
106
107 /* Other field define */
108 #define NXP_SAR_ADC_CONV_TIMEOUT (msecs_to_jiffies(100))
109 #define NXP_SAR_ADC_CAL_TIMEOUT_US (100 * USEC_PER_MSEC)
110 #define NXP_SAR_ADC_WAIT_US (2 * USEC_PER_MSEC)
111 #define NXP_SAR_ADC_RESOLUTION 12
112
113 /* Duration of conversion phases */
114 #define NXP_SAR_ADC_TPT 2
115 #define NXP_SAR_ADC_DP 2
116 #define NXP_SAR_ADC_CT ((NXP_SAR_ADC_RESOLUTION + 2) * 4)
117 #define NXP_SAR_ADC_CONV_TIME (NXP_SAR_ADC_TPT + NXP_SAR_ADC_CT + NXP_SAR_ADC_DP)
118
119 #define NXP_SAR_ADC_NR_CHANNELS 8
120
121 #define NXP_PAGE_SIZE SZ_4K
122 #define NXP_SAR_ADC_DMA_SAMPLE_SZ DMA_SLAVE_BUSWIDTH_4_BYTES
123 #define NXP_SAR_ADC_DMA_BUFF_SZ (NXP_PAGE_SIZE * NXP_SAR_ADC_DMA_SAMPLE_SZ)
124 #define NXP_SAR_ADC_DMA_SAMPLE_CNT (NXP_SAR_ADC_DMA_BUFF_SZ / NXP_SAR_ADC_DMA_SAMPLE_SZ)
125
126 struct nxp_sar_adc {
127 void __iomem *regs;
128 phys_addr_t regs_phys;
129 u8 current_channel;
130 u8 channels_used;
131 u16 value;
132 u32 vref_mV;
133
134 /* Save and restore context. */
135 u32 inpsamp;
136 u32 pwdn;
137
138 struct clk *clk;
139 struct dma_chan *dma_chan;
140 struct completion completion;
141 struct circ_buf dma_buf;
142
143 dma_addr_t rx_dma_buf;
144 dma_cookie_t cookie;
145
146 /* Protect circular buffers access. */
147 spinlock_t lock;
148
149 /* Array of enabled channels. */
150 u16 buffered_chan[NXP_SAR_ADC_NR_CHANNELS];
151
152 /* Buffer to be filled by the DMA. */
153 IIO_DECLARE_BUFFER_WITH_TS(u16, buffer, NXP_SAR_ADC_NR_CHANNELS);
154 };
155
156 struct nxp_sar_adc_data {
157 u32 vref_mV;
158 const char *model;
159 };
160
161 #define ADC_CHAN(_idx, _chan_type) { \
162 .type = (_chan_type), \
163 .indexed = 1, \
164 .channel = (_idx), \
165 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
166 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
167 BIT(IIO_CHAN_INFO_SAMP_FREQ), \
168 .scan_index = (_idx), \
169 .scan_type = { \
170 .sign = 'u', \
171 .realbits = 12, \
172 .storagebits = 16, \
173 }, \
174 }
175
176 static const struct iio_chan_spec nxp_sar_adc_iio_channels[] = {
177 ADC_CHAN(0, IIO_VOLTAGE),
178 ADC_CHAN(1, IIO_VOLTAGE),
179 ADC_CHAN(2, IIO_VOLTAGE),
180 ADC_CHAN(3, IIO_VOLTAGE),
181 ADC_CHAN(4, IIO_VOLTAGE),
182 ADC_CHAN(5, IIO_VOLTAGE),
183 ADC_CHAN(6, IIO_VOLTAGE),
184 ADC_CHAN(7, IIO_VOLTAGE),
185 /*
186 * The NXP SAR ADC documentation marks the channels 8 to 31 as
187 * "Reserved". Reflect the same in the driver in case new ADC
188 * variants comes with more channels.
189 */
190 IIO_CHAN_SOFT_TIMESTAMP(32),
191 };
192
nxp_sar_adc_irq_cfg(struct nxp_sar_adc * info,bool enable)193 static void nxp_sar_adc_irq_cfg(struct nxp_sar_adc *info, bool enable)
194 {
195 if (enable)
196 writel(NXP_SAR_ADC_ISR_ECH, NXP_SAR_ADC_IMR(info->regs));
197 else
198 writel(0, NXP_SAR_ADC_IMR(info->regs));
199 }
200
nxp_sar_adc_set_enabled(struct nxp_sar_adc * info,bool enable)201 static bool nxp_sar_adc_set_enabled(struct nxp_sar_adc *info, bool enable)
202 {
203 u32 mcr;
204 bool pwdn;
205
206 mcr = readl(NXP_SAR_ADC_MCR(info->regs));
207
208 /*
209 * Get the current state and return it later. This is used for
210 * suspend/resume to get the power state
211 */
212 pwdn = FIELD_GET(NXP_SAR_ADC_MCR_PWDN, mcr);
213
214 /* When the enabled flag is not set, we set the power down bit */
215 FIELD_MODIFY(NXP_SAR_ADC_MCR_PWDN, &mcr, !enable);
216
217 writel(mcr, NXP_SAR_ADC_MCR(info->regs));
218
219 /*
220 * Ensure there are at least three cycles between the
221 * configuration of NCMR and the setting of NSTART.
222 */
223 if (enable)
224 ndelay(div64_u64(NSEC_PER_SEC, clk_get_rate(info->clk) * 3));
225
226 return pwdn;
227 }
228
nxp_sar_adc_enable(struct nxp_sar_adc * info)229 static inline bool nxp_sar_adc_enable(struct nxp_sar_adc *info)
230 {
231 return nxp_sar_adc_set_enabled(info, true);
232 }
233
nxp_sar_adc_disable(struct nxp_sar_adc * info)234 static inline bool nxp_sar_adc_disable(struct nxp_sar_adc *info)
235 {
236 return nxp_sar_adc_set_enabled(info, false);
237 }
238
nxp_sar_adc_calibration_start(void __iomem * base)239 static inline void nxp_sar_adc_calibration_start(void __iomem *base)
240 {
241 u32 mcr = readl(NXP_SAR_ADC_MCR(base));
242
243 FIELD_MODIFY(NXP_SAR_ADC_MCR_CALSTART, &mcr, 0x1);
244
245 writel(mcr, NXP_SAR_ADC_MCR(base));
246 }
247
nxp_sar_adc_calibration_wait(void __iomem * base)248 static inline int nxp_sar_adc_calibration_wait(void __iomem *base)
249 {
250 u32 msr;
251 int ret;
252
253 ret = readl_poll_timeout(NXP_SAR_ADC_MSR(base), msr,
254 !FIELD_GET(NXP_SAR_ADC_MSR_CALBUSY, msr),
255 NXP_SAR_ADC_WAIT_US,
256 NXP_SAR_ADC_CAL_TIMEOUT_US);
257 if (ret)
258 return ret;
259
260 if (FIELD_GET(NXP_SAR_ADC_MSR_CALFAIL, msr)) {
261 /*
262 * If the calibration fails, the status register bit must be
263 * cleared.
264 */
265 FIELD_MODIFY(NXP_SAR_ADC_MSR_CALFAIL, &msr, 0x0);
266 writel(msr, NXP_SAR_ADC_MSR(base));
267
268 return -EAGAIN;
269 }
270
271 return 0;
272 }
273
nxp_sar_adc_calibration(struct nxp_sar_adc * info)274 static int nxp_sar_adc_calibration(struct nxp_sar_adc *info)
275 {
276 int ret;
277
278 /* Calibration works only if the ADC is powered up. */
279 nxp_sar_adc_enable(info);
280
281 /* The calibration operation starts. */
282 nxp_sar_adc_calibration_start(info->regs);
283
284 ret = nxp_sar_adc_calibration_wait(info->regs);
285
286 /*
287 * Calibration works only if the ADC is powered up. However
288 * the calibration is called from the probe function where the
289 * iio is not enabled, so we disable after the calibration.
290 */
291 nxp_sar_adc_disable(info);
292
293 return ret;
294 }
295
nxp_sar_adc_conversion_timing_set(struct nxp_sar_adc * info,u32 inpsamp)296 static void nxp_sar_adc_conversion_timing_set(struct nxp_sar_adc *info, u32 inpsamp)
297 {
298 inpsamp = clamp(inpsamp, NXP_SAR_ADC_CTR_INPSAMP_MIN, NXP_SAR_ADC_CTR_INPSAMP_MAX);
299
300 writel(inpsamp, NXP_SAR_ADC_CTR0(info->regs));
301 }
302
nxp_sar_adc_conversion_timing_get(struct nxp_sar_adc * info)303 static u32 nxp_sar_adc_conversion_timing_get(struct nxp_sar_adc *info)
304 {
305 return readl(NXP_SAR_ADC_CTR0(info->regs));
306 }
307
nxp_sar_adc_read_notify(struct nxp_sar_adc * info)308 static void nxp_sar_adc_read_notify(struct nxp_sar_adc *info)
309 {
310 writel(NXP_SAR_ADC_CH_MASK, NXP_SAR_ADC_CEOCFR0(info->regs));
311 writel(NXP_SAR_ADC_CH_MASK, NXP_SAR_ADC_CEOCFR1(info->regs));
312 }
313
nxp_sar_adc_read_data(struct nxp_sar_adc * info,unsigned int chan)314 static int nxp_sar_adc_read_data(struct nxp_sar_adc *info, unsigned int chan)
315 {
316 u32 ceocfr, cdr;
317
318 ceocfr = readl(NXP_SAR_ADC_CEOCFR0(info->regs));
319
320 /*
321 * FIELD_GET() can not be used here because EOC_CH is not constant.
322 * TODO: Switch to field_get() when it will be available.
323 */
324 if (!(NXP_SAR_ADC_EOC_CH(chan) & ceocfr))
325 return -EIO;
326
327 cdr = readl(NXP_SAR_ADC_CDR(info->regs, chan));
328 if (!(FIELD_GET(NXP_SAR_ADC_CDR_VALID, cdr)))
329 return -EIO;
330
331 return FIELD_GET(NXP_SAR_ADC_CDR_CDATA_MASK, cdr);
332 }
333
nxp_sar_adc_isr_buffer(struct iio_dev * indio_dev)334 static void nxp_sar_adc_isr_buffer(struct iio_dev *indio_dev)
335 {
336 struct nxp_sar_adc *info = iio_priv(indio_dev);
337 unsigned int i;
338 int ret;
339
340 for (i = 0; i < info->channels_used; i++) {
341 ret = nxp_sar_adc_read_data(info, info->buffered_chan[i]);
342 if (ret < 0) {
343 nxp_sar_adc_read_notify(info);
344 return;
345 }
346
347 info->buffer[i] = ret;
348 }
349
350 nxp_sar_adc_read_notify(info);
351
352 iio_push_to_buffers_with_ts(indio_dev, info->buffer, sizeof(info->buffer),
353 iio_get_time_ns(indio_dev));
354
355 iio_trigger_notify_done(indio_dev->trig);
356 }
357
nxp_sar_adc_isr_read_raw(struct iio_dev * indio_dev)358 static void nxp_sar_adc_isr_read_raw(struct iio_dev *indio_dev)
359 {
360 struct nxp_sar_adc *info = iio_priv(indio_dev);
361 int ret;
362
363 ret = nxp_sar_adc_read_data(info, info->current_channel);
364 nxp_sar_adc_read_notify(info);
365 if (ret < 0)
366 return;
367
368 info->value = ret;
369 complete(&info->completion);
370 }
371
nxp_sar_adc_isr(int irq,void * dev_id)372 static irqreturn_t nxp_sar_adc_isr(int irq, void *dev_id)
373 {
374 struct iio_dev *indio_dev = dev_id;
375 struct nxp_sar_adc *info = iio_priv(indio_dev);
376 int isr;
377
378 isr = readl(NXP_SAR_ADC_ISR(info->regs));
379 if (!(FIELD_GET(NXP_SAR_ADC_ISR_ECH, isr)))
380 return IRQ_NONE;
381
382 if (iio_buffer_enabled(indio_dev))
383 nxp_sar_adc_isr_buffer(indio_dev);
384 else
385 nxp_sar_adc_isr_read_raw(indio_dev);
386
387 writel(NXP_SAR_ADC_ISR_ECH, NXP_SAR_ADC_ISR(info->regs));
388
389 return IRQ_HANDLED;
390 }
391
nxp_sar_adc_channels_disable(struct nxp_sar_adc * info,u32 mask)392 static void nxp_sar_adc_channels_disable(struct nxp_sar_adc *info, u32 mask)
393 {
394 u32 ncmr, cimr;
395
396 ncmr = readl(NXP_SAR_ADC_NCMR0(info->regs));
397 cimr = readl(NXP_SAR_ADC_CIMR0(info->regs));
398
399 /* FIELD_MODIFY() can not be used because the mask is not constant */
400 ncmr &= ~mask;
401 cimr &= ~mask;
402
403 writel(ncmr, NXP_SAR_ADC_NCMR0(info->regs));
404 writel(cimr, NXP_SAR_ADC_CIMR0(info->regs));
405 }
406
nxp_sar_adc_channels_enable(struct nxp_sar_adc * info,u32 mask)407 static void nxp_sar_adc_channels_enable(struct nxp_sar_adc *info, u32 mask)
408 {
409 u32 ncmr, cimr;
410
411 ncmr = readl(NXP_SAR_ADC_NCMR0(info->regs));
412 cimr = readl(NXP_SAR_ADC_CIMR0(info->regs));
413
414 ncmr |= mask;
415 cimr |= mask;
416
417 writel(ncmr, NXP_SAR_ADC_NCMR0(info->regs));
418 writel(cimr, NXP_SAR_ADC_CIMR0(info->regs));
419 }
420
nxp_sar_adc_dma_channels_enable(struct nxp_sar_adc * info,u32 mask)421 static void nxp_sar_adc_dma_channels_enable(struct nxp_sar_adc *info, u32 mask)
422 {
423 u32 dmar;
424
425 dmar = readl(NXP_SAR_ADC_DMAR0(info->regs));
426
427 dmar |= mask;
428
429 writel(dmar, NXP_SAR_ADC_DMAR0(info->regs));
430 }
431
nxp_sar_adc_dma_channels_disable(struct nxp_sar_adc * info,u32 mask)432 static void nxp_sar_adc_dma_channels_disable(struct nxp_sar_adc *info, u32 mask)
433 {
434 u32 dmar;
435
436 dmar = readl(NXP_SAR_ADC_DMAR0(info->regs));
437
438 dmar &= ~mask;
439
440 writel(dmar, NXP_SAR_ADC_DMAR0(info->regs));
441 }
442
nxp_sar_adc_dma_cfg(struct nxp_sar_adc * info,bool enable)443 static void nxp_sar_adc_dma_cfg(struct nxp_sar_adc *info, bool enable)
444 {
445 u32 dmae;
446
447 dmae = readl(NXP_SAR_ADC_DMAE(info->regs));
448
449 FIELD_MODIFY(NXP_SAR_ADC_DMAE_DMAEN, &dmae, enable);
450
451 writel(dmae, NXP_SAR_ADC_DMAE(info->regs));
452 }
453
nxp_sar_adc_stop_conversion(struct nxp_sar_adc * info)454 static void nxp_sar_adc_stop_conversion(struct nxp_sar_adc *info)
455 {
456 u32 mcr;
457
458 mcr = readl(NXP_SAR_ADC_MCR(info->regs));
459
460 FIELD_MODIFY(NXP_SAR_ADC_MCR_NSTART, &mcr, 0x0);
461
462 writel(mcr, NXP_SAR_ADC_MCR(info->regs));
463
464 /*
465 * On disable, we have to wait for the transaction to finish.
466 * ADC does not abort the transaction if a chain conversion is
467 * in progress. Wait for the worst case scenario - 80 ADC clk
468 * cycles. The clock rate is 80MHz, this routine is called
469 * only when the capture finishes. The delay will be very
470 * short, usec-ish, which is acceptable in the atomic context.
471 */
472 ndelay(div64_u64(NSEC_PER_SEC, clk_get_rate(info->clk)) * 80);
473 }
474
nxp_sar_adc_start_conversion(struct nxp_sar_adc * info,bool raw)475 static int nxp_sar_adc_start_conversion(struct nxp_sar_adc *info, bool raw)
476 {
477 u32 mcr;
478
479 mcr = readl(NXP_SAR_ADC_MCR(info->regs));
480
481 FIELD_MODIFY(NXP_SAR_ADC_MCR_NSTART, &mcr, 0x1);
482 FIELD_MODIFY(NXP_SAR_ADC_MCR_MODE, &mcr, raw ? 0 : 1);
483
484 writel(mcr, NXP_SAR_ADC_MCR(info->regs));
485
486 return 0;
487 }
488
nxp_sar_adc_read_channel(struct nxp_sar_adc * info,int channel)489 static int nxp_sar_adc_read_channel(struct nxp_sar_adc *info, int channel)
490 {
491 int ret;
492
493 info->current_channel = channel;
494 nxp_sar_adc_channels_enable(info, BIT(channel));
495 nxp_sar_adc_irq_cfg(info, true);
496 nxp_sar_adc_enable(info);
497
498 reinit_completion(&info->completion);
499 ret = nxp_sar_adc_start_conversion(info, true);
500 if (ret < 0)
501 goto out_disable;
502
503 if (!wait_for_completion_interruptible_timeout(&info->completion,
504 NXP_SAR_ADC_CONV_TIMEOUT))
505 ret = -ETIMEDOUT;
506
507 nxp_sar_adc_stop_conversion(info);
508
509 out_disable:
510 nxp_sar_adc_channels_disable(info, BIT(channel));
511 nxp_sar_adc_irq_cfg(info, false);
512 nxp_sar_adc_disable(info);
513
514 return ret;
515 }
516
nxp_sar_adc_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)517 static int nxp_sar_adc_read_raw(struct iio_dev *indio_dev,
518 struct iio_chan_spec const *chan, int *val,
519 int *val2, long mask)
520 {
521 struct nxp_sar_adc *info = iio_priv(indio_dev);
522 u32 inpsamp;
523 int ret;
524
525 switch (mask) {
526 case IIO_CHAN_INFO_RAW:
527 if (!iio_device_claim_direct(indio_dev))
528 return -EBUSY;
529
530 ret = nxp_sar_adc_read_channel(info, chan->channel);
531
532 iio_device_release_direct(indio_dev);
533
534 if (ret)
535 return ret;
536
537 *val = info->value;
538 return IIO_VAL_INT;
539
540 case IIO_CHAN_INFO_SCALE:
541 *val = info->vref_mV;
542 *val2 = NXP_SAR_ADC_RESOLUTION;
543 return IIO_VAL_FRACTIONAL_LOG2;
544
545 case IIO_CHAN_INFO_SAMP_FREQ:
546 inpsamp = nxp_sar_adc_conversion_timing_get(info);
547 *val = clk_get_rate(info->clk) / (inpsamp + NXP_SAR_ADC_CONV_TIME);
548 return IIO_VAL_INT;
549
550 default:
551 return -EINVAL;
552 }
553 }
554
nxp_sar_adc_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long mask)555 static int nxp_sar_adc_write_raw(struct iio_dev *indio_dev, struct iio_chan_spec const *chan,
556 int val, int val2, long mask)
557 {
558 struct nxp_sar_adc *info = iio_priv(indio_dev);
559 u32 inpsamp;
560
561 switch (mask) {
562 case IIO_CHAN_INFO_SAMP_FREQ:
563 /*
564 * Configures the sample period duration in terms of the SAR
565 * controller clock. The minimum acceptable value is 8.
566 * Configuring it to a value lower than 8 sets the sample period
567 * to 8 cycles. We read the clock value and divide by the
568 * sampling timing which gives us the number of cycles expected.
569 * The value is 8-bit wide, consequently the max value is 0xFF.
570 */
571 inpsamp = clk_get_rate(info->clk) / val - NXP_SAR_ADC_CONV_TIME;
572 nxp_sar_adc_conversion_timing_set(info, inpsamp);
573 return 0;
574
575 default:
576 return -EINVAL;
577 }
578 }
579
nxp_sar_adc_dma_cb(void * data)580 static void nxp_sar_adc_dma_cb(void *data)
581 {
582 struct iio_dev *indio_dev = data;
583 struct nxp_sar_adc *info = iio_priv(indio_dev);
584 struct dma_tx_state state;
585 struct circ_buf *dma_buf;
586 struct device *dev_dma;
587 u32 *dma_samples;
588 s64 timestamp;
589 int idx, ret;
590
591 guard(spinlock_irqsave)(&info->lock);
592
593 dma_buf = &info->dma_buf;
594 dma_samples = (u32 *)dma_buf->buf;
595 dev_dma = info->dma_chan->device->dev;
596
597 /*
598 * DMA in some corner cases might have already be charged for
599 * the next transfer. Potentially there can be a race where
600 * the residue changes while the dma engine updates the
601 * buffer. That could be handled by using the
602 * callback_result() instead of callback() because the residue
603 * will be passed as a parameter to the function. However this
604 * new callback is pretty new and the backend does not update
605 * the residue. So let's stick to the version other drivers do
606 * which has proven running well in production since several
607 * years.
608 */
609 dmaengine_tx_status(info->dma_chan, info->cookie, &state);
610
611 dma_sync_single_for_cpu(dev_dma, info->rx_dma_buf,
612 NXP_SAR_ADC_DMA_BUFF_SZ, DMA_FROM_DEVICE);
613
614 /* Current head position. */
615 dma_buf->head = (NXP_SAR_ADC_DMA_BUFF_SZ - state.residue) /
616 NXP_SAR_ADC_DMA_SAMPLE_SZ;
617
618 /* If everything was transferred, avoid an off by one error. */
619 if (!state.residue)
620 dma_buf->head--;
621
622 /* Something went wrong and nothing transferred. */
623 if (state.residue != NXP_SAR_ADC_DMA_BUFF_SZ) {
624 /* Make sure that head is multiple of info->channels_used. */
625 dma_buf->head -= dma_buf->head % info->channels_used;
626
627 /*
628 * dma_buf->tail != dma_buf->head condition will become false
629 * because dma_buf->tail will be incremented with 1.
630 */
631 while (dma_buf->tail != dma_buf->head) {
632 idx = dma_buf->tail % info->channels_used;
633 info->buffer[idx] = dma_samples[dma_buf->tail];
634 dma_buf->tail = (dma_buf->tail + 1) % NXP_SAR_ADC_DMA_SAMPLE_CNT;
635 if (idx != info->channels_used - 1)
636 continue;
637
638 /*
639 * iio_push_to_buffers_with_ts() should not be
640 * called with dma_samples as parameter. The samples
641 * will be smashed if timestamp is enabled.
642 */
643 timestamp = iio_get_time_ns(indio_dev);
644 ret = iio_push_to_buffers_with_ts(indio_dev, info->buffer,
645 sizeof(info->buffer),
646 timestamp);
647 if (ret < 0 && ret != -EBUSY)
648 dev_err_ratelimited(&indio_dev->dev,
649 "failed to push iio buffer: %d",
650 ret);
651 }
652
653 dma_buf->tail = dma_buf->head;
654 }
655
656 dma_sync_single_for_device(dev_dma, info->rx_dma_buf,
657 NXP_SAR_ADC_DMA_BUFF_SZ, DMA_FROM_DEVICE);
658 }
659
nxp_sar_adc_start_cyclic_dma(struct iio_dev * indio_dev)660 static int nxp_sar_adc_start_cyclic_dma(struct iio_dev *indio_dev)
661 {
662 struct nxp_sar_adc *info = iio_priv(indio_dev);
663 struct dma_slave_config config;
664 struct dma_async_tx_descriptor *desc;
665 int ret;
666
667 info->dma_buf.head = 0;
668 info->dma_buf.tail = 0;
669
670 config.direction = DMA_DEV_TO_MEM;
671 config.src_addr_width = NXP_SAR_ADC_DMA_SAMPLE_SZ;
672 config.src_addr = NXP_SAR_ADC_CDR(info->regs_phys, info->buffered_chan[0]);
673 config.src_port_window_size = info->channels_used;
674 config.src_maxburst = info->channels_used;
675 ret = dmaengine_slave_config(info->dma_chan, &config);
676 if (ret < 0)
677 return ret;
678
679 desc = dmaengine_prep_dma_cyclic(info->dma_chan,
680 info->rx_dma_buf,
681 NXP_SAR_ADC_DMA_BUFF_SZ,
682 NXP_SAR_ADC_DMA_BUFF_SZ / 2,
683 DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT);
684 if (!desc)
685 return -EINVAL;
686
687 desc->callback = nxp_sar_adc_dma_cb;
688 desc->callback_param = indio_dev;
689 info->cookie = dmaengine_submit(desc);
690 ret = dma_submit_error(info->cookie);
691 if (ret) {
692 dmaengine_terminate_async(info->dma_chan);
693 return ret;
694 }
695
696 dma_async_issue_pending(info->dma_chan);
697
698 return 0;
699 }
700
nxp_sar_adc_buffer_software_do_predisable(struct iio_dev * indio_dev)701 static void nxp_sar_adc_buffer_software_do_predisable(struct iio_dev *indio_dev)
702 {
703 struct nxp_sar_adc *info = iio_priv(indio_dev);
704
705 /*
706 * The ADC DMAEN bit should be cleared before DMA transaction
707 * is canceled.
708 */
709 nxp_sar_adc_stop_conversion(info);
710 dmaengine_terminate_sync(info->dma_chan);
711 nxp_sar_adc_dma_cfg(info, false);
712 nxp_sar_adc_dma_channels_disable(info, *indio_dev->active_scan_mask);
713
714 dma_release_channel(info->dma_chan);
715 }
716
nxp_sar_adc_buffer_software_do_postenable(struct iio_dev * indio_dev)717 static int nxp_sar_adc_buffer_software_do_postenable(struct iio_dev *indio_dev)
718 {
719 struct nxp_sar_adc *info = iio_priv(indio_dev);
720 int ret;
721
722 info->dma_chan = dma_request_chan(indio_dev->dev.parent, "rx");
723 if (IS_ERR(info->dma_chan))
724 return PTR_ERR(info->dma_chan);
725
726 nxp_sar_adc_dma_channels_enable(info, *indio_dev->active_scan_mask);
727
728 nxp_sar_adc_dma_cfg(info, true);
729
730 ret = nxp_sar_adc_start_cyclic_dma(indio_dev);
731 if (ret)
732 goto out_dma_channels_disable;
733
734 ret = nxp_sar_adc_start_conversion(info, false);
735 if (ret)
736 goto out_stop_cyclic_dma;
737
738 return 0;
739
740 out_stop_cyclic_dma:
741 dmaengine_terminate_sync(info->dma_chan);
742
743 out_dma_channels_disable:
744 nxp_sar_adc_dma_cfg(info, false);
745 nxp_sar_adc_dma_channels_disable(info, *indio_dev->active_scan_mask);
746 dma_release_channel(info->dma_chan);
747
748 return ret;
749 }
750
nxp_sar_adc_buffer_trigger_do_predisable(struct iio_dev * indio_dev)751 static void nxp_sar_adc_buffer_trigger_do_predisable(struct iio_dev *indio_dev)
752 {
753 struct nxp_sar_adc *info = iio_priv(indio_dev);
754
755 nxp_sar_adc_irq_cfg(info, false);
756 }
757
nxp_sar_adc_buffer_trigger_do_postenable(struct iio_dev * indio_dev)758 static int nxp_sar_adc_buffer_trigger_do_postenable(struct iio_dev *indio_dev)
759 {
760 struct nxp_sar_adc *info = iio_priv(indio_dev);
761
762 nxp_sar_adc_irq_cfg(info, true);
763
764 return 0;
765 }
766
nxp_sar_adc_buffer_postenable(struct iio_dev * indio_dev)767 static int nxp_sar_adc_buffer_postenable(struct iio_dev *indio_dev)
768 {
769 struct nxp_sar_adc *info = iio_priv(indio_dev);
770 int current_mode = iio_device_get_current_mode(indio_dev);
771 unsigned long channel;
772 int ret;
773
774 info->channels_used = 0;
775
776 /*
777 * The SAR-ADC has two groups of channels.
778 *
779 * - Group #0:
780 * * bit 0-7 : channel 0 -> channel 7
781 * * bit 8-31 : reserved
782 *
783 * - Group #32:
784 * * bit 0-7 : Internal
785 * * bit 8-31 : reserved
786 *
787 * The 8 channels from group #0 are used in this driver for
788 * ADC as described when declaring the IIO device and the
789 * mapping is the same. That means the active_scan_mask can be
790 * used directly to write the channel interrupt mask.
791 */
792 nxp_sar_adc_channels_enable(info, *indio_dev->active_scan_mask);
793
794 for_each_set_bit(channel, indio_dev->active_scan_mask, NXP_SAR_ADC_NR_CHANNELS)
795 info->buffered_chan[info->channels_used++] = channel;
796
797 nxp_sar_adc_enable(info);
798
799 if (current_mode == INDIO_BUFFER_SOFTWARE)
800 ret = nxp_sar_adc_buffer_software_do_postenable(indio_dev);
801 else
802 ret = nxp_sar_adc_buffer_trigger_do_postenable(indio_dev);
803 if (ret)
804 goto out_postenable;
805
806 return 0;
807
808 out_postenable:
809 nxp_sar_adc_disable(info);
810 nxp_sar_adc_channels_disable(info, *indio_dev->active_scan_mask);
811
812 return ret;
813 }
814
nxp_sar_adc_buffer_predisable(struct iio_dev * indio_dev)815 static int nxp_sar_adc_buffer_predisable(struct iio_dev *indio_dev)
816 {
817 struct nxp_sar_adc *info = iio_priv(indio_dev);
818 int currentmode = iio_device_get_current_mode(indio_dev);
819
820 if (currentmode == INDIO_BUFFER_SOFTWARE)
821 nxp_sar_adc_buffer_software_do_predisable(indio_dev);
822 else
823 nxp_sar_adc_buffer_trigger_do_predisable(indio_dev);
824
825 nxp_sar_adc_disable(info);
826
827 nxp_sar_adc_channels_disable(info, *indio_dev->active_scan_mask);
828
829 return 0;
830 }
831
nxp_sar_adc_trigger_handler(int irq,void * p)832 static irqreturn_t nxp_sar_adc_trigger_handler(int irq, void *p)
833 {
834 struct iio_poll_func *pf = p;
835 struct iio_dev *indio_dev = pf->indio_dev;
836 struct nxp_sar_adc *info = iio_priv(indio_dev);
837 int ret;
838
839 ret = nxp_sar_adc_start_conversion(info, true);
840 if (ret < 0)
841 dev_dbg(&indio_dev->dev, "Failed to start conversion\n");
842
843 return IRQ_HANDLED;
844 }
845
846 static const struct iio_buffer_setup_ops iio_triggered_buffer_setup_ops = {
847 .postenable = nxp_sar_adc_buffer_postenable,
848 .predisable = nxp_sar_adc_buffer_predisable,
849 };
850
851 static const struct iio_info nxp_sar_adc_iio_info = {
852 .read_raw = nxp_sar_adc_read_raw,
853 .write_raw = nxp_sar_adc_write_raw,
854 };
855
nxp_sar_adc_dma_probe(struct device * dev,struct nxp_sar_adc * info)856 static int nxp_sar_adc_dma_probe(struct device *dev, struct nxp_sar_adc *info)
857 {
858 u8 *rx_buf;
859
860 rx_buf = dmam_alloc_coherent(dev, NXP_SAR_ADC_DMA_BUFF_SZ,
861 &info->rx_dma_buf, GFP_KERNEL);
862 if (!rx_buf)
863 return -ENOMEM;
864
865 info->dma_buf.buf = rx_buf;
866
867 return 0;
868 }
869
870 /*
871 * The documentation describes the reset values for the registers.
872 * However some registers do not have these values after a reset. It
873 * is not a desirable situation. In some other SoC family
874 * documentation NXP recommends not assuming the default values are
875 * set and to initialize the registers conforming to the documentation
876 * reset information to prevent this situation. Assume the same rule
877 * applies here as there is a discrepancy between what is read from
878 * the registers at reset time and the documentation.
879 */
nxp_sar_adc_set_default_values(struct nxp_sar_adc * info)880 static void nxp_sar_adc_set_default_values(struct nxp_sar_adc *info)
881 {
882 writel(0x00003901, NXP_SAR_ADC_MCR(info->regs));
883 writel(0x00000001, NXP_SAR_ADC_MSR(info->regs));
884 writel(0x00000014, NXP_SAR_ADC_CTR0(info->regs));
885 writel(0x00000014, NXP_SAR_ADC_CTR1(info->regs));
886 writel(0x00000000, NXP_SAR_ADC_CIMR0(info->regs));
887 writel(0x00000000, NXP_SAR_ADC_CIMR1(info->regs));
888 writel(0x00000000, NXP_SAR_ADC_NCMR0(info->regs));
889 writel(0x00000000, NXP_SAR_ADC_NCMR1(info->regs));
890 }
891
nxp_sar_adc_probe(struct platform_device * pdev)892 static int nxp_sar_adc_probe(struct platform_device *pdev)
893 {
894 struct device *dev = &pdev->dev;
895 const struct nxp_sar_adc_data *data = device_get_match_data(dev);
896 struct nxp_sar_adc *info;
897 struct iio_dev *indio_dev;
898 struct resource *mem;
899 int irq, ret;
900
901 indio_dev = devm_iio_device_alloc(dev, sizeof(*info));
902 if (!indio_dev)
903 return -ENOMEM;
904
905 info = iio_priv(indio_dev);
906 info->vref_mV = data->vref_mV;
907 spin_lock_init(&info->lock);
908 info->regs = devm_platform_get_and_ioremap_resource(pdev, 0, &mem);
909 if (IS_ERR(info->regs))
910 return dev_err_probe(dev, PTR_ERR(info->regs),
911 "Failed to get and remap resource");
912
913 info->regs_phys = mem->start;
914
915 irq = platform_get_irq(pdev, 0);
916 if (irq < 0)
917 return irq;
918
919 ret = devm_request_irq(dev, irq, nxp_sar_adc_isr, 0, dev_name(dev),
920 indio_dev);
921 if (ret < 0)
922 return ret;
923
924 info->clk = devm_clk_get_enabled(dev, NULL);
925 if (IS_ERR(info->clk))
926 return dev_err_probe(dev, PTR_ERR(info->clk),
927 "Failed to get the clock\n");
928
929 platform_set_drvdata(pdev, indio_dev);
930
931 init_completion(&info->completion);
932
933 indio_dev->name = data->model;
934 indio_dev->info = &nxp_sar_adc_iio_info;
935 indio_dev->modes = INDIO_DIRECT_MODE | INDIO_BUFFER_SOFTWARE;
936 indio_dev->channels = nxp_sar_adc_iio_channels;
937 indio_dev->num_channels = ARRAY_SIZE(nxp_sar_adc_iio_channels);
938
939 nxp_sar_adc_set_default_values(info);
940
941 ret = nxp_sar_adc_calibration(info);
942 if (ret)
943 dev_err_probe(dev, ret, "Calibration failed\n");
944
945 ret = nxp_sar_adc_dma_probe(dev, info);
946 if (ret)
947 return dev_err_probe(dev, ret, "Failed to initialize the DMA\n");
948
949 ret = devm_iio_triggered_buffer_setup(dev, indio_dev,
950 &iio_pollfunc_store_time,
951 &nxp_sar_adc_trigger_handler,
952 &iio_triggered_buffer_setup_ops);
953 if (ret < 0)
954 return dev_err_probe(dev, ret, "Couldn't initialise the buffer\n");
955
956 ret = devm_iio_device_register(dev, indio_dev);
957 if (ret)
958 return dev_err_probe(dev, ret, "Couldn't register the device\n");
959
960 return 0;
961 }
962
nxp_sar_adc_suspend(struct device * dev)963 static int nxp_sar_adc_suspend(struct device *dev)
964 {
965 struct nxp_sar_adc *info = iio_priv(dev_get_drvdata(dev));
966
967 info->pwdn = nxp_sar_adc_disable(info);
968 info->inpsamp = nxp_sar_adc_conversion_timing_get(info);
969
970 clk_disable_unprepare(info->clk);
971
972 return 0;
973 }
974
nxp_sar_adc_resume(struct device * dev)975 static int nxp_sar_adc_resume(struct device *dev)
976 {
977 struct nxp_sar_adc *info = iio_priv(dev_get_drvdata(dev));
978 int ret;
979
980 ret = clk_prepare_enable(info->clk);
981 if (ret)
982 return ret;
983
984 nxp_sar_adc_conversion_timing_set(info, info->inpsamp);
985
986 if (!info->pwdn)
987 nxp_sar_adc_enable(info);
988
989 return 0;
990 }
991
992 static DEFINE_SIMPLE_DEV_PM_OPS(nxp_sar_adc_pm_ops, nxp_sar_adc_suspend,
993 nxp_sar_adc_resume);
994
995 static const struct nxp_sar_adc_data s32g2_sar_adc_data = {
996 .vref_mV = 1800,
997 .model = "s32g2-sar-adc",
998 };
999
1000 static const struct of_device_id nxp_sar_adc_match[] = {
1001 { .compatible = "nxp,s32g2-sar-adc", .data = &s32g2_sar_adc_data },
1002 { }
1003 };
1004 MODULE_DEVICE_TABLE(of, nxp_sar_adc_match);
1005
1006 static struct platform_driver nxp_sar_adc_driver = {
1007 .probe = nxp_sar_adc_probe,
1008 .driver = {
1009 .name = "nxp-sar-adc",
1010 .of_match_table = nxp_sar_adc_match,
1011 .pm = pm_sleep_ptr(&nxp_sar_adc_pm_ops),
1012 },
1013 };
1014 module_platform_driver(nxp_sar_adc_driver);
1015
1016 MODULE_AUTHOR("NXP");
1017 MODULE_DESCRIPTION("NXP SAR-ADC driver");
1018 MODULE_LICENSE("GPL");
1019