1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Analog Devices Generic AXI ADC IP core 4 * Link: https://wiki.analog.com/resources/fpga/docs/axi_adc_ip 5 * 6 * Copyright 2012-2020 Analog Devices Inc. 7 */ 8 9 #include <linux/bitfield.h> 10 #include <linux/cleanup.h> 11 #include <linux/clk.h> 12 #include <linux/err.h> 13 #include <linux/io.h> 14 #include <linux/delay.h> 15 #include <linux/module.h> 16 #include <linux/mutex.h> 17 #include <linux/of.h> 18 #include <linux/platform_device.h> 19 #include <linux/property.h> 20 #include <linux/regmap.h> 21 #include <linux/slab.h> 22 23 #include <linux/fpga/adi-axi-common.h> 24 25 #include <linux/iio/backend.h> 26 #include <linux/iio/buffer-dmaengine.h> 27 #include <linux/iio/buffer.h> 28 #include <linux/iio/iio.h> 29 30 /* 31 * Register definitions: 32 * https://wiki.analog.com/resources/fpga/docs/axi_adc_ip#register_map 33 */ 34 35 /* ADC controls */ 36 37 #define ADI_AXI_REG_RSTN 0x0040 38 #define ADI_AXI_REG_RSTN_CE_N BIT(2) 39 #define ADI_AXI_REG_RSTN_MMCM_RSTN BIT(1) 40 #define ADI_AXI_REG_RSTN_RSTN BIT(0) 41 42 #define ADI_AXI_ADC_REG_CTRL 0x0044 43 #define ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK BIT(1) 44 45 #define ADI_AXI_ADC_REG_DRP_STATUS 0x0074 46 #define ADI_AXI_ADC_DRP_LOCKED BIT(17) 47 48 /* ADC Channel controls */ 49 50 #define ADI_AXI_REG_CHAN_CTRL(c) (0x0400 + (c) * 0x40) 51 #define ADI_AXI_REG_CHAN_CTRL_LB_OWR BIT(11) 52 #define ADI_AXI_REG_CHAN_CTRL_PN_SEL_OWR BIT(10) 53 #define ADI_AXI_REG_CHAN_CTRL_IQCOR_EN BIT(9) 54 #define ADI_AXI_REG_CHAN_CTRL_DCFILT_EN BIT(8) 55 #define ADI_AXI_REG_CHAN_CTRL_FMT_MASK GENMASK(6, 4) 56 #define ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT BIT(6) 57 #define ADI_AXI_REG_CHAN_CTRL_FMT_TYPE BIT(5) 58 #define ADI_AXI_REG_CHAN_CTRL_FMT_EN BIT(4) 59 #define ADI_AXI_REG_CHAN_CTRL_PN_TYPE_OWR BIT(1) 60 #define ADI_AXI_REG_CHAN_CTRL_ENABLE BIT(0) 61 62 #define ADI_AXI_ADC_REG_CHAN_STATUS(c) (0x0404 + (c) * 0x40) 63 #define ADI_AXI_ADC_CHAN_STAT_PN_MASK GENMASK(2, 1) 64 65 #define ADI_AXI_ADC_REG_CHAN_CTRL_3(c) (0x0418 + (c) * 0x40) 66 #define ADI_AXI_ADC_CHAN_PN_SEL_MASK GENMASK(19, 16) 67 68 /* IO Delays */ 69 #define ADI_AXI_ADC_REG_DELAY(l) (0x0800 + (l) * 0x4) 70 #define AXI_ADC_DELAY_CTRL_MASK GENMASK(4, 0) 71 72 #define ADI_AXI_ADC_MAX_IO_NUM_LANES 15 73 74 #define ADI_AXI_REG_CHAN_CTRL_DEFAULTS \ 75 (ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT | \ 76 ADI_AXI_REG_CHAN_CTRL_FMT_EN | \ 77 ADI_AXI_REG_CHAN_CTRL_ENABLE) 78 79 struct adi_axi_adc_state { 80 struct regmap *regmap; 81 struct device *dev; 82 /* lock to protect multiple accesses to the device registers */ 83 struct mutex lock; 84 }; 85 86 static int axi_adc_enable(struct iio_backend *back) 87 { 88 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 89 unsigned int __val; 90 int ret; 91 92 guard(mutex)(&st->lock); 93 ret = regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN, 94 ADI_AXI_REG_RSTN_MMCM_RSTN); 95 if (ret) 96 return ret; 97 98 /* 99 * Make sure the DRP (Dynamic Reconfiguration Port) is locked. Not all 100 * designs really use it but if they don't we still get the lock bit 101 * set. So let's do it all the time so the code is generic. 102 */ 103 ret = regmap_read_poll_timeout(st->regmap, ADI_AXI_ADC_REG_DRP_STATUS, 104 __val, __val & ADI_AXI_ADC_DRP_LOCKED, 105 100, 1000); 106 if (ret) 107 return ret; 108 109 return regmap_set_bits(st->regmap, ADI_AXI_REG_RSTN, 110 ADI_AXI_REG_RSTN_RSTN | ADI_AXI_REG_RSTN_MMCM_RSTN); 111 } 112 113 static void axi_adc_disable(struct iio_backend *back) 114 { 115 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 116 117 guard(mutex)(&st->lock); 118 regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0); 119 } 120 121 static int axi_adc_data_format_set(struct iio_backend *back, unsigned int chan, 122 const struct iio_backend_data_fmt *data) 123 { 124 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 125 u32 val; 126 127 if (!data->enable) 128 return regmap_clear_bits(st->regmap, 129 ADI_AXI_REG_CHAN_CTRL(chan), 130 ADI_AXI_REG_CHAN_CTRL_FMT_EN); 131 132 val = FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_EN, true); 133 if (data->sign_extend) 134 val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_SIGNEXT, true); 135 if (data->type == IIO_BACKEND_OFFSET_BINARY) 136 val |= FIELD_PREP(ADI_AXI_REG_CHAN_CTRL_FMT_TYPE, true); 137 138 return regmap_update_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan), 139 ADI_AXI_REG_CHAN_CTRL_FMT_MASK, val); 140 } 141 142 static int axi_adc_data_sample_trigger(struct iio_backend *back, 143 enum iio_backend_sample_trigger trigger) 144 { 145 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 146 147 switch (trigger) { 148 case IIO_BACKEND_SAMPLE_TRIGGER_EDGE_RISING: 149 return regmap_clear_bits(st->regmap, ADI_AXI_ADC_REG_CTRL, 150 ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK); 151 case IIO_BACKEND_SAMPLE_TRIGGER_EDGE_FALLING: 152 return regmap_set_bits(st->regmap, ADI_AXI_ADC_REG_CTRL, 153 ADI_AXI_ADC_CTRL_DDR_EDGESEL_MASK); 154 default: 155 return -EINVAL; 156 } 157 } 158 159 static int axi_adc_iodelays_set(struct iio_backend *back, unsigned int lane, 160 unsigned int tap) 161 { 162 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 163 int ret; 164 u32 val; 165 166 if (tap > FIELD_MAX(AXI_ADC_DELAY_CTRL_MASK)) 167 return -EINVAL; 168 if (lane > ADI_AXI_ADC_MAX_IO_NUM_LANES) 169 return -EINVAL; 170 171 guard(mutex)(&st->lock); 172 ret = regmap_write(st->regmap, ADI_AXI_ADC_REG_DELAY(lane), tap); 173 if (ret) 174 return ret; 175 /* 176 * If readback is ~0, that means there are issues with the 177 * delay_clk. 178 */ 179 ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_DELAY(lane), &val); 180 if (ret) 181 return ret; 182 if (val == U32_MAX) 183 return -EIO; 184 185 return 0; 186 } 187 188 static int axi_adc_test_pattern_set(struct iio_backend *back, 189 unsigned int chan, 190 enum iio_backend_test_pattern pattern) 191 { 192 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 193 194 switch (pattern) { 195 case IIO_BACKEND_NO_TEST_PATTERN: 196 /* nothing to do */ 197 return 0; 198 case IIO_BACKEND_ADI_PRBS_9A: 199 return regmap_update_bits(st->regmap, ADI_AXI_ADC_REG_CHAN_CTRL_3(chan), 200 ADI_AXI_ADC_CHAN_PN_SEL_MASK, 201 FIELD_PREP(ADI_AXI_ADC_CHAN_PN_SEL_MASK, 0)); 202 default: 203 return -EINVAL; 204 } 205 } 206 207 static int axi_adc_chan_status(struct iio_backend *back, unsigned int chan, 208 bool *error) 209 { 210 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 211 int ret; 212 u32 val; 213 214 guard(mutex)(&st->lock); 215 /* reset test bits by setting them */ 216 ret = regmap_write(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan), 217 ADI_AXI_ADC_CHAN_STAT_PN_MASK); 218 if (ret) 219 return ret; 220 221 /* let's give enough time to validate or erroring the incoming pattern */ 222 fsleep(1000); 223 224 ret = regmap_read(st->regmap, ADI_AXI_ADC_REG_CHAN_STATUS(chan), &val); 225 if (ret) 226 return ret; 227 228 if (ADI_AXI_ADC_CHAN_STAT_PN_MASK & val) 229 *error = true; 230 else 231 *error = false; 232 233 return 0; 234 } 235 236 static int axi_adc_chan_enable(struct iio_backend *back, unsigned int chan) 237 { 238 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 239 240 return regmap_set_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan), 241 ADI_AXI_REG_CHAN_CTRL_ENABLE); 242 } 243 244 static int axi_adc_chan_disable(struct iio_backend *back, unsigned int chan) 245 { 246 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 247 248 return regmap_clear_bits(st->regmap, ADI_AXI_REG_CHAN_CTRL(chan), 249 ADI_AXI_REG_CHAN_CTRL_ENABLE); 250 } 251 252 static struct iio_buffer *axi_adc_request_buffer(struct iio_backend *back, 253 struct iio_dev *indio_dev) 254 { 255 struct adi_axi_adc_state *st = iio_backend_get_priv(back); 256 const char *dma_name; 257 258 if (device_property_read_string(st->dev, "dma-names", &dma_name)) 259 dma_name = "rx"; 260 261 return iio_dmaengine_buffer_setup(st->dev, indio_dev, dma_name); 262 } 263 264 static void axi_adc_free_buffer(struct iio_backend *back, 265 struct iio_buffer *buffer) 266 { 267 iio_dmaengine_buffer_free(buffer); 268 } 269 270 static const struct regmap_config axi_adc_regmap_config = { 271 .val_bits = 32, 272 .reg_bits = 32, 273 .reg_stride = 4, 274 }; 275 276 static const struct iio_backend_ops adi_axi_adc_generic = { 277 .enable = axi_adc_enable, 278 .disable = axi_adc_disable, 279 .data_format_set = axi_adc_data_format_set, 280 .chan_enable = axi_adc_chan_enable, 281 .chan_disable = axi_adc_chan_disable, 282 .request_buffer = axi_adc_request_buffer, 283 .free_buffer = axi_adc_free_buffer, 284 .data_sample_trigger = axi_adc_data_sample_trigger, 285 .iodelay_set = axi_adc_iodelays_set, 286 .test_pattern_set = axi_adc_test_pattern_set, 287 .chan_status = axi_adc_chan_status, 288 }; 289 290 static int adi_axi_adc_probe(struct platform_device *pdev) 291 { 292 const unsigned int *expected_ver; 293 struct adi_axi_adc_state *st; 294 void __iomem *base; 295 unsigned int ver; 296 struct clk *clk; 297 int ret; 298 299 st = devm_kzalloc(&pdev->dev, sizeof(*st), GFP_KERNEL); 300 if (!st) 301 return -ENOMEM; 302 303 base = devm_platform_ioremap_resource(pdev, 0); 304 if (IS_ERR(base)) 305 return PTR_ERR(base); 306 307 st->dev = &pdev->dev; 308 st->regmap = devm_regmap_init_mmio(&pdev->dev, base, 309 &axi_adc_regmap_config); 310 if (IS_ERR(st->regmap)) 311 return dev_err_probe(&pdev->dev, PTR_ERR(st->regmap), 312 "failed to init register map\n"); 313 314 expected_ver = device_get_match_data(&pdev->dev); 315 if (!expected_ver) 316 return -ENODEV; 317 318 clk = devm_clk_get_enabled(&pdev->dev, NULL); 319 if (IS_ERR(clk)) 320 return dev_err_probe(&pdev->dev, PTR_ERR(clk), 321 "failed to get clock\n"); 322 323 /* 324 * Force disable the core. Up to the frontend to enable us. And we can 325 * still read/write registers... 326 */ 327 ret = regmap_write(st->regmap, ADI_AXI_REG_RSTN, 0); 328 if (ret) 329 return ret; 330 331 ret = regmap_read(st->regmap, ADI_AXI_REG_VERSION, &ver); 332 if (ret) 333 return ret; 334 335 if (ADI_AXI_PCORE_VER_MAJOR(ver) != ADI_AXI_PCORE_VER_MAJOR(*expected_ver)) { 336 dev_err(&pdev->dev, 337 "Major version mismatch. Expected %d.%.2d.%c, Reported %d.%.2d.%c\n", 338 ADI_AXI_PCORE_VER_MAJOR(*expected_ver), 339 ADI_AXI_PCORE_VER_MINOR(*expected_ver), 340 ADI_AXI_PCORE_VER_PATCH(*expected_ver), 341 ADI_AXI_PCORE_VER_MAJOR(ver), 342 ADI_AXI_PCORE_VER_MINOR(ver), 343 ADI_AXI_PCORE_VER_PATCH(ver)); 344 return -ENODEV; 345 } 346 347 ret = devm_iio_backend_register(&pdev->dev, &adi_axi_adc_generic, st); 348 if (ret) 349 return dev_err_probe(&pdev->dev, ret, 350 "failed to register iio backend\n"); 351 352 dev_info(&pdev->dev, "AXI ADC IP core (%d.%.2d.%c) probed\n", 353 ADI_AXI_PCORE_VER_MAJOR(ver), 354 ADI_AXI_PCORE_VER_MINOR(ver), 355 ADI_AXI_PCORE_VER_PATCH(ver)); 356 357 return 0; 358 } 359 360 static unsigned int adi_axi_adc_10_0_a_info = ADI_AXI_PCORE_VER(10, 0, 'a'); 361 362 /* Match table for of_platform binding */ 363 static const struct of_device_id adi_axi_adc_of_match[] = { 364 { .compatible = "adi,axi-adc-10.0.a", .data = &adi_axi_adc_10_0_a_info }, 365 { /* end of list */ } 366 }; 367 MODULE_DEVICE_TABLE(of, adi_axi_adc_of_match); 368 369 static struct platform_driver adi_axi_adc_driver = { 370 .driver = { 371 .name = KBUILD_MODNAME, 372 .of_match_table = adi_axi_adc_of_match, 373 }, 374 .probe = adi_axi_adc_probe, 375 }; 376 module_platform_driver(adi_axi_adc_driver); 377 378 MODULE_AUTHOR("Michael Hennerich <michael.hennerich@analog.com>"); 379 MODULE_DESCRIPTION("Analog Devices Generic AXI ADC IP core driver"); 380 MODULE_LICENSE("GPL v2"); 381 MODULE_IMPORT_NS(IIO_DMAENGINE_BUFFER); 382 MODULE_IMPORT_NS(IIO_BACKEND); 383