xref: /linux/drivers/iio/adc/adi-axi-adc.c (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Analog Devices Generic AXI ADC IP core
4  * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip
5  *
6  * Copyright 2012-2020 Analog Devices Inc.
7  */
8 
9 #include <linux/bitfield.h>
10 #include <linux/cleanup.h>
11 #include <linux/clk.h>
12 #include <linux/err.h>
13 #include <linux/io.h>
14 #include <linux/delay.h>
15 #include <linux/module.h>
16 #include <linux/mutex.h>
17 #include <linux/of.h>
18 #include <linux/platform_device.h>
19 #include <linux/property.h>
20 #include <linux/regmap.h>
21 #include <linux/slab.h>
22 
23 #include <linux/fpga/adi-axi-common.h>
24 
25 #include <linux/iio/backend.h>
26 #include <linux/iio/buffer-dmaengine.h>
27 #include <linux/iio/buffer.h>
28 #include <linux/iio/iio.h>
29 
30 /*
31  * Register definitions:
32  *   https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map
33  */
34 
35 /* ADC controls */
36 
37 #define ADI_AXI_REG_RSTN			0x0040
38 #define   ADI_AXI_REG_RSTN_CE_N			BIT(2)
39 #define   ADI_AXI_REG_RSTN_MMCM_RSTN		BIT(1)
40 #define   ADI_AXI_REG_RSTN_RSTN			BIT(0)
41 
42 #define ADI_AXI_ADC_REG_CTRL			0x0044
43 #define    ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK	BIT(1)
44 
45 #define ADI_AXI_ADC_REG_DRP_STATUS		0x0074
46 #define   ADI_AXI_ADC_DRP_LOCKED		BIT(17)
47 
48 /* ADC Channel controls */
49 
50 #define ADI_AXI_REG_CHAN_CTRL(c)		(0x0400 + (c) * 0x40)
51 #define   ADI_AXI_REG_CHAN_CTRL_LB_OWR		BIT(11)
52 #define   ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR	BIT(10)
53 #define   ADI_AXI_REG_CHAN_CTRL_IQCOR_EN	BIT(9)
54 #define   ADI_AXI_REG_CHAN_CTRL_DCFILT_EN	BIT(8)
55 #define   ADI_AXI_REG_CHAN_CTRL_FMT_MASK	GENMASK(6, 4)
56 #define   ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT	BIT(6)
57 #define   ADI_AXI_REG_CHAN_CTRL_FMT_TYPE	BIT(5)
58 #define   ADI_AXI_REG_CHAN_CTRL_FMT_EN		BIT(4)
59 #define   ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR	BIT(1)
60 #define   ADI_AXI_REG_CHAN_CTRL_ENABLE		BIT(0)
61 
62 #define ADI_AXI_ADC_REG_CHAN_STATUS(c)		(0x0404 + (c) * 0x40)
63 #define   ADI_AXI_ADC_CHAN_STAT_PN_MASK		GENMASK(2, 1)
64 /* out of sync */
65 #define   ADI_AXI_ADC_CHAN_STAT_PN_OOS		BIT(1)
66 /* spurious out of sync */
67 #define   ADI_AXI_ADC_CHAN_STAT_PN_ERR		BIT(2)
68 
69 #define ADI_AXI_ADC_REG_CHAN_CTRL_3(c)		(0x0418 + (c) * 0x40)
70 #define   ADI_AXI_ADC_CHAN_PN_SEL_MASK		GENMASK(19, 16)
71 
72 /* IO Delays */
73 #define ADI_AXI_ADC_REG_DELAY(l)		(0x0800 + (l) * 0x4)
74 #define   AXI_ADC_DELAY_CTRL_MASK		GENMASK(4, 0)
75 
76 #define ADI_AXI_ADC_MAX_IO_NUM_LANES		15
77 
78 #define ADI_AXI_REG_CHAN_CTRL_DEFAULTS		\
79 	(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT |	\
80 	 ADI_AXI_REG_CHAN_CTRL_FMT_EN |		\
81 	 ADI_AXI_REG_CHAN_CTRL_ENABLE)
82 
83 struct adi_axi_adc_state {
84 	struct regmap *regmap;
85 	struct device *dev;
86 	/* lock to protect multiple accesses to the device registers */
87 	struct mutex lock;
88 };
89 
axi_adc_enable(struct iio_backend * back)90 static int axi_adc_enable(struct iio_backend *back)
91 {
92 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
93 	unsigned int __val;
94 	int ret;
95 
96 	guard(mutex)(&st->lock);
97 	ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
98 			      ADI_AXI_REG_RSTN_MMCM_RSTN);
99 	if (ret)
100 		return ret;
101 
102 	/*
103 	 * Make sure the DRP (Dynamic Reconfiguration Port) is locked. Not all
104 	 * designs really use it but if they don't we still get the lock bit
105 	 * set. So let's do it all the time so the code is generic.
106 	 */
107 	ret = regmap_read_poll_timeout(st->regmap, ADI_AXI_ADC_REG_DRP_STATUS,
108 				       __val, __val & ADI_AXI_ADC_DRP_LOCKED,
109 				       100, 1000);
110 	if (ret)
111 		return ret;
112 
113 	return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN,
114 			       ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN);
115 }
116 
axi_adc_disable(struct iio_backend * back)117 static void axi_adc_disable(struct iio_backend *back)
118 {
119 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
120 
121 	guard(mutex)(&st->lock);
122 	regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
123 }
124 
axi_adc_data_format_set(struct iio_backend * back,unsigned int chan,const struct iio_backend_data_fmt * data)125 static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan,
126 				   const struct iio_backend_data_fmt *data)
127 {
128 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
129 	u32 val;
130 
131 	if (!data->enable)
132 		return regmap_clear_bits(st->regmap,
133 					 ADI_AXI_REG_CHAN_CTRL(chan),
134 					 ADI_AXI_REG_CHAN_CTRL_FMT_EN);
135 
136 	val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true);
137 	if (data->sign_extend)
138 		val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true);
139 	if (data->type == IIO_BACKEND_OFFSET_BINARY)
140 		val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true);
141 
142 	return regmap_update_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
143 				  ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val);
144 }
145 
axi_adc_data_sample_trigger(struct iio_backend * back,enum iio_backend_sample_trigger trigger)146 static int axi_adc_data_sample_trigger(struct iio_backend *back,
147 				       enum iio_backend_sample_trigger trigger)
148 {
149 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
150 
151 	switch (trigger) {
152 	case IIO_BACKEND_SAMPLE_TRIGGER_EDGE_RISING:
153 		return regmap_clear_bits(st->regmap, ADI_AXI_ADC_REG_CTRL,
154 					 ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK);
155 	case IIO_BACKEND_SAMPLE_TRIGGER_EDGE_FALLING:
156 		return regmap_set_bits(st->regmap, ADI_AXI_ADC_REG_CTRL,
157 				       ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK);
158 	default:
159 		return -EINVAL;
160 	}
161 }
162 
axi_adc_iodelays_set(struct iio_backend * back,unsigned int lane,unsigned int tap)163 static int axi_adc_iodelays_set(struct iio_backend *back, unsigned int lane,
164 				unsigned int tap)
165 {
166 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
167 	int ret;
168 	u32 val;
169 
170 	if (tap > FIELD_MAX(AXI_ADC_DELAY_CTRL_MASK))
171 		return -EINVAL;
172 	if (lane > ADI_AXI_ADC_MAX_IO_NUM_LANES)
173 		return -EINVAL;
174 
175 	guard(mutex)(&st->lock);
176 	ret = regmap_write(st->regmap, ADI_AXI_ADC_REG_DELAY(lane), tap);
177 	if (ret)
178 		return ret;
179 	/*
180 	 * If readback is ~0, that means there are issues with the
181 	 * delay_clk.
182 	 */
183 	ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_DELAY(lane), &val);
184 	if (ret)
185 		return ret;
186 	if (val == U32_MAX)
187 		return -EIO;
188 
189 	return 0;
190 }
191 
axi_adc_test_pattern_set(struct iio_backend * back,unsigned int chan,enum iio_backend_test_pattern pattern)192 static int axi_adc_test_pattern_set(struct iio_backend *back,
193 				    unsigned int chan,
194 				    enum iio_backend_test_pattern pattern)
195 {
196 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
197 
198 	switch (pattern) {
199 	case IIO_BACKEND_NO_TEST_PATTERN:
200 		/* nothing to do */
201 		return 0;
202 	case IIO_BACKEND_ADI_PRBS_9A:
203 		return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan),
204 					  ADI_AXI_ADC_CHAN_PN_SEL_MASK,
205 					  FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 0));
206 	case IIO_BACKEND_ADI_PRBS_23A:
207 		return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan),
208 					  ADI_AXI_ADC_CHAN_PN_SEL_MASK,
209 					  FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 1));
210 	default:
211 		return -EINVAL;
212 	}
213 }
214 
axi_adc_read_chan_status(struct adi_axi_adc_state * st,unsigned int chan,unsigned int * status)215 static int axi_adc_read_chan_status(struct adi_axi_adc_state *st, unsigned int chan,
216 				    unsigned int *status)
217 {
218 	int ret;
219 
220 	guard(mutex)(&st->lock);
221 	/* reset test bits by setting them */
222 	ret = regmap_write(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan),
223 			   ADI_AXI_ADC_CHAN_STAT_PN_MASK);
224 	if (ret)
225 		return ret;
226 
227 	/* let's give enough time to validate or erroring the incoming pattern */
228 	fsleep(1000);
229 
230 	return regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan),
231 			   status);
232 }
233 
axi_adc_chan_status(struct iio_backend * back,unsigned int chan,bool * error)234 static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan,
235 			       bool *error)
236 {
237 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
238 	u32 val;
239 	int ret;
240 
241 	ret = axi_adc_read_chan_status(st, chan, &val);
242 	if (ret)
243 		return ret;
244 
245 	if (ADI_AXI_ADC_CHAN_STAT_PN_MASK & val)
246 		*error = true;
247 	else
248 		*error = false;
249 
250 	return 0;
251 }
252 
axi_adc_debugfs_print_chan_status(struct iio_backend * back,unsigned int chan,char * buf,size_t len)253 static int axi_adc_debugfs_print_chan_status(struct iio_backend *back,
254 					     unsigned int chan, char *buf,
255 					     size_t len)
256 {
257 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
258 	u32 val;
259 	int ret;
260 
261 	ret = axi_adc_read_chan_status(st, chan, &val);
262 	if (ret)
263 		return ret;
264 
265 	/*
266 	 * PN_ERR is cleared in case out of sync is set. Hence, no point in
267 	 * checking both bits.
268 	 */
269 	if (val & ADI_AXI_ADC_CHAN_STAT_PN_OOS)
270 		return scnprintf(buf, len, "CH%u: Out of Sync.\n", chan);
271 	if (val & ADI_AXI_ADC_CHAN_STAT_PN_ERR)
272 		return scnprintf(buf, len, "CH%u: Spurious Out of Sync.\n", chan);
273 
274 	return scnprintf(buf, len, "CH%u: OK.\n", chan);
275 }
276 
axi_adc_chan_enable(struct iio_backend * back,unsigned int chan)277 static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan)
278 {
279 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
280 
281 	return regmap_set_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
282 			       ADI_AXI_REG_CHAN_CTRL_ENABLE);
283 }
284 
axi_adc_chan_disable(struct iio_backend * back,unsigned int chan)285 static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan)
286 {
287 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
288 
289 	return regmap_clear_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan),
290 				 ADI_AXI_REG_CHAN_CTRL_ENABLE);
291 }
292 
axi_adc_request_buffer(struct iio_backend * back,struct iio_dev * indio_dev)293 static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back,
294 						 struct iio_dev *indio_dev)
295 {
296 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
297 	const char *dma_name;
298 
299 	if (device_property_read_string(st->dev, "dma-names", &dma_name))
300 		dma_name = "rx";
301 
302 	return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name);
303 }
304 
axi_adc_free_buffer(struct iio_backend * back,struct iio_buffer * buffer)305 static void axi_adc_free_buffer(struct iio_backend *back,
306 				struct iio_buffer *buffer)
307 {
308 	iio_dmaengine_buffer_free(buffer);
309 }
310 
axi_adc_reg_access(struct iio_backend * back,unsigned int reg,unsigned int writeval,unsigned int * readval)311 static int axi_adc_reg_access(struct iio_backend *back, unsigned int reg,
312 			      unsigned int writeval, unsigned int *readval)
313 {
314 	struct adi_axi_adc_state *st = iio_backend_get_priv(back);
315 
316 	if (readval)
317 		return regmap_read(st->regmap, reg, readval);
318 
319 	return regmap_write(st->regmap, reg, writeval);
320 }
321 
322 static const struct regmap_config axi_adc_regmap_config = {
323 	.val_bits = 32,
324 	.reg_bits = 32,
325 	.reg_stride = 4,
326 };
327 
328 static const struct iio_backend_ops adi_axi_adc_ops = {
329 	.enable = axi_adc_enable,
330 	.disable = axi_adc_disable,
331 	.data_format_set = axi_adc_data_format_set,
332 	.chan_enable = axi_adc_chan_enable,
333 	.chan_disable = axi_adc_chan_disable,
334 	.request_buffer = axi_adc_request_buffer,
335 	.free_buffer = axi_adc_free_buffer,
336 	.data_sample_trigger = axi_adc_data_sample_trigger,
337 	.iodelay_set = axi_adc_iodelays_set,
338 	.test_pattern_set = axi_adc_test_pattern_set,
339 	.chan_status = axi_adc_chan_status,
340 	.debugfs_reg_access = iio_backend_debugfs_ptr(axi_adc_reg_access),
341 	.debugfs_print_chan_status = iio_backend_debugfs_ptr(axi_adc_debugfs_print_chan_status),
342 };
343 
344 static const struct iio_backend_info adi_axi_adc_generic = {
345 	.name = "axi-adc",
346 	.ops = &adi_axi_adc_ops,
347 };
348 
adi_axi_adc_probe(struct platform_device * pdev)349 static int adi_axi_adc_probe(struct platform_device *pdev)
350 {
351 	const unsigned int *expected_ver;
352 	struct adi_axi_adc_state *st;
353 	void __iomem *base;
354 	unsigned int ver;
355 	struct clk *clk;
356 	int ret;
357 
358 	st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL);
359 	if (!st)
360 		return -ENOMEM;
361 
362 	base = devm_platform_ioremap_resource(pdev, 0);
363 	if (IS_ERR(base))
364 		return PTR_ERR(base);
365 
366 	st->dev = &pdev->dev;
367 	st->regmap = devm_regmap_init_mmio(&pdev->dev, base,
368 					   &axi_adc_regmap_config);
369 	if (IS_ERR(st->regmap))
370 		return dev_err_probe(&pdev->dev, PTR_ERR(st->regmap),
371 				     "failed to init register map\n");
372 
373 	expected_ver = device_get_match_data(&pdev->dev);
374 	if (!expected_ver)
375 		return -ENODEV;
376 
377 	clk = devm_clk_get_enabled(&pdev->dev, NULL);
378 	if (IS_ERR(clk))
379 		return dev_err_probe(&pdev->dev, PTR_ERR(clk),
380 				     "failed to get clock\n");
381 
382 	/*
383 	 * Force disable the core. Up to the frontend to enable us. And we can
384 	 * still read/write registers...
385 	 */
386 	ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0);
387 	if (ret)
388 		return ret;
389 
390 	ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver);
391 	if (ret)
392 		return ret;
393 
394 	if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) {
395 		dev_err(&pdev->dev,
396 			"Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n",
397 			ADI_AXI_PCORE_VER_MAJOR(*expected_ver),
398 			ADI_AXI_PCORE_VER_MINOR(*expected_ver),
399 			ADI_AXI_PCORE_VER_PATCH(*expected_ver),
400 			ADI_AXI_PCORE_VER_MAJOR(ver),
401 			ADI_AXI_PCORE_VER_MINOR(ver),
402 			ADI_AXI_PCORE_VER_PATCH(ver));
403 		return -ENODEV;
404 	}
405 
406 	ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st);
407 	if (ret)
408 		return dev_err_probe(&pdev->dev, ret,
409 				     "failed to register iio backend\n");
410 
411 	dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n",
412 		 ADI_AXI_PCORE_VER_MAJOR(ver),
413 		 ADI_AXI_PCORE_VER_MINOR(ver),
414 		 ADI_AXI_PCORE_VER_PATCH(ver));
415 
416 	return 0;
417 }
418 
419 static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a');
420 
421 /* Match table for of_platform binding */
422 static const struct of_device_id adi_axi_adc_of_match[] = {
423 	{ .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info },
424 	{ /* end of list */ }
425 };
426 MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match);
427 
428 static struct platform_driver adi_axi_adc_driver = {
429 	.driver = {
430 		.name = KBUILD_MODNAME,
431 		.of_match_table = adi_axi_adc_of_match,
432 	},
433 	.probe = adi_axi_adc_probe,
434 };
435 module_platform_driver(adi_axi_adc_driver);
436 
437 MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>");
438 MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver");
439 MODULE_LICENSE("GPL v2");
440 MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER);
441 MODULE_IMPORT_NS(IIO_BACKEND);
442