1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for the Diolan DLN-2 USB-ADC adapter
4 *
5 * Copyright (c) 2017 Jack Andersen
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/platform_device.h>
12 #include <linux/mfd/dln2.h>
13
14 #include <linux/iio/iio.h>
15 #include <linux/iio/sysfs.h>
16 #include <linux/iio/trigger.h>
17 #include <linux/iio/trigger_consumer.h>
18 #include <linux/iio/triggered_buffer.h>
19 #include <linux/iio/buffer.h>
20 #include <linux/iio/kfifo_buf.h>
21
22 #define DLN2_ADC_MOD_NAME "dln2-adc"
23
24 #define DLN2_ADC_ID 0x06
25
26 #define DLN2_ADC_GET_CHANNEL_COUNT DLN2_CMD(0x01, DLN2_ADC_ID)
27 #define DLN2_ADC_ENABLE DLN2_CMD(0x02, DLN2_ADC_ID)
28 #define DLN2_ADC_DISABLE DLN2_CMD(0x03, DLN2_ADC_ID)
29 #define DLN2_ADC_CHANNEL_ENABLE DLN2_CMD(0x05, DLN2_ADC_ID)
30 #define DLN2_ADC_CHANNEL_DISABLE DLN2_CMD(0x06, DLN2_ADC_ID)
31 #define DLN2_ADC_SET_RESOLUTION DLN2_CMD(0x08, DLN2_ADC_ID)
32 #define DLN2_ADC_CHANNEL_GET_VAL DLN2_CMD(0x0A, DLN2_ADC_ID)
33 #define DLN2_ADC_CHANNEL_GET_ALL_VAL DLN2_CMD(0x0B, DLN2_ADC_ID)
34 #define DLN2_ADC_CHANNEL_SET_CFG DLN2_CMD(0x0C, DLN2_ADC_ID)
35 #define DLN2_ADC_CHANNEL_GET_CFG DLN2_CMD(0x0D, DLN2_ADC_ID)
36 #define DLN2_ADC_CONDITION_MET_EV DLN2_CMD(0x10, DLN2_ADC_ID)
37
38 #define DLN2_ADC_EVENT_NONE 0
39 #define DLN2_ADC_EVENT_BELOW 1
40 #define DLN2_ADC_EVENT_LEVEL_ABOVE 2
41 #define DLN2_ADC_EVENT_OUTSIDE 3
42 #define DLN2_ADC_EVENT_INSIDE 4
43 #define DLN2_ADC_EVENT_ALWAYS 5
44
45 #define DLN2_ADC_MAX_CHANNELS 8
46 #define DLN2_ADC_DATA_BITS 10
47
48 /*
49 * Plays similar role to iio_demux_table in subsystem core; except allocated
50 * in a fixed 8-element array.
51 */
52 struct dln2_adc_demux_table {
53 unsigned int from;
54 unsigned int to;
55 unsigned int length;
56 };
57
58 struct dln2_adc {
59 struct platform_device *pdev;
60 struct iio_chan_spec iio_channels[DLN2_ADC_MAX_CHANNELS + 1];
61 int port, trigger_chan;
62 struct iio_trigger *trig;
63 struct mutex mutex;
64 /* Cached sample period in milliseconds */
65 unsigned int sample_period;
66 /* Demux table */
67 unsigned int demux_count;
68 struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS];
69 };
70
71 struct dln2_adc_port_chan {
72 u8 port;
73 u8 chan;
74 };
75
76 struct dln2_adc_get_all_vals {
77 __le16 channel_mask;
78 __le16 values[DLN2_ADC_MAX_CHANNELS];
79 };
80
dln2_adc_add_demux(struct dln2_adc * dln2,unsigned int in_loc,unsigned int out_loc,unsigned int length)81 static void dln2_adc_add_demux(struct dln2_adc *dln2,
82 unsigned int in_loc, unsigned int out_loc,
83 unsigned int length)
84 {
85 struct dln2_adc_demux_table *p = dln2->demux_count ?
86 &dln2->demux[dln2->demux_count - 1] : NULL;
87
88 if (p && p->from + p->length == in_loc &&
89 p->to + p->length == out_loc) {
90 p->length += length;
91 } else if (dln2->demux_count < DLN2_ADC_MAX_CHANNELS) {
92 p = &dln2->demux[dln2->demux_count++];
93 p->from = in_loc;
94 p->to = out_loc;
95 p->length = length;
96 }
97 }
98
dln2_adc_update_demux(struct dln2_adc * dln2)99 static void dln2_adc_update_demux(struct dln2_adc *dln2)
100 {
101 int in_ind = -1, out_ind;
102 unsigned int in_loc = 0, out_loc = 0;
103 struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
104
105 /* Clear out any old demux */
106 dln2->demux_count = 0;
107
108 /* Optimize all 8-channels case */
109 if (iio_get_masklength(indio_dev) &&
110 (*indio_dev->active_scan_mask & 0xff) == 0xff) {
111 dln2_adc_add_demux(dln2, 0, 0, 16);
112 return;
113 }
114
115 /* Build demux table from fixed 8-channels to active_scan_mask */
116 iio_for_each_active_channel(indio_dev, out_ind) {
117 /* Handle timestamp separately */
118 if (out_ind == DLN2_ADC_MAX_CHANNELS)
119 break;
120 for (++in_ind; in_ind != out_ind; ++in_ind)
121 in_loc += 2;
122 dln2_adc_add_demux(dln2, in_loc, out_loc, 2);
123 out_loc += 2;
124 in_loc += 2;
125 }
126 }
127
dln2_adc_get_chan_count(struct dln2_adc * dln2)128 static int dln2_adc_get_chan_count(struct dln2_adc *dln2)
129 {
130 int ret;
131 u8 port = dln2->port;
132 u8 count;
133 int olen = sizeof(count);
134
135 ret = dln2_transfer(dln2->pdev, DLN2_ADC_GET_CHANNEL_COUNT,
136 &port, sizeof(port), &count, &olen);
137 if (ret < 0) {
138 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
139 return ret;
140 }
141 if (olen < sizeof(count))
142 return -EPROTO;
143
144 return count;
145 }
146
dln2_adc_set_port_resolution(struct dln2_adc * dln2)147 static int dln2_adc_set_port_resolution(struct dln2_adc *dln2)
148 {
149 int ret;
150 struct dln2_adc_port_chan port_chan = {
151 .port = dln2->port,
152 .chan = DLN2_ADC_DATA_BITS,
153 };
154
155 ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_SET_RESOLUTION,
156 &port_chan, sizeof(port_chan));
157 if (ret < 0)
158 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
159
160 return ret;
161 }
162
dln2_adc_set_chan_enabled(struct dln2_adc * dln2,int channel,bool enable)163 static int dln2_adc_set_chan_enabled(struct dln2_adc *dln2,
164 int channel, bool enable)
165 {
166 int ret;
167 struct dln2_adc_port_chan port_chan = {
168 .port = dln2->port,
169 .chan = channel,
170 };
171 u16 cmd = enable ? DLN2_ADC_CHANNEL_ENABLE : DLN2_ADC_CHANNEL_DISABLE;
172
173 ret = dln2_transfer_tx(dln2->pdev, cmd, &port_chan, sizeof(port_chan));
174 if (ret < 0)
175 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
176
177 return ret;
178 }
179
dln2_adc_set_port_enabled(struct dln2_adc * dln2,bool enable,u16 * conflict_out)180 static int dln2_adc_set_port_enabled(struct dln2_adc *dln2, bool enable,
181 u16 *conflict_out)
182 {
183 int ret;
184 u8 port = dln2->port;
185 __le16 conflict;
186 int olen = sizeof(conflict);
187 u16 cmd = enable ? DLN2_ADC_ENABLE : DLN2_ADC_DISABLE;
188
189 if (conflict_out)
190 *conflict_out = 0;
191
192 ret = dln2_transfer(dln2->pdev, cmd, &port, sizeof(port),
193 &conflict, &olen);
194 if (ret < 0) {
195 dev_dbg(&dln2->pdev->dev, "Problem in %s(%d)\n",
196 __func__, (int)enable);
197 if (conflict_out && enable && olen >= sizeof(conflict))
198 *conflict_out = le16_to_cpu(conflict);
199 return ret;
200 }
201 if (enable && olen < sizeof(conflict))
202 return -EPROTO;
203
204 return ret;
205 }
206
dln2_adc_set_chan_period(struct dln2_adc * dln2,unsigned int channel,unsigned int period)207 static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
208 unsigned int channel, unsigned int period)
209 {
210 int ret;
211 struct {
212 struct dln2_adc_port_chan port_chan;
213 __u8 type;
214 __le16 period;
215 __le16 low;
216 __le16 high;
217 } __packed set_cfg = {
218 .port_chan.port = dln2->port,
219 .port_chan.chan = channel,
220 .type = period ? DLN2_ADC_EVENT_ALWAYS : DLN2_ADC_EVENT_NONE,
221 .period = cpu_to_le16(period)
222 };
223
224 ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_CHANNEL_SET_CFG,
225 &set_cfg, sizeof(set_cfg));
226 if (ret < 0)
227 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
228
229 return ret;
230 }
231
dln2_adc_read(struct dln2_adc * dln2,unsigned int channel)232 static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
233 {
234 int ret, i;
235 u16 conflict;
236 __le16 value;
237 int olen = sizeof(value);
238 struct dln2_adc_port_chan port_chan = {
239 .port = dln2->port,
240 .chan = channel,
241 };
242
243 ret = dln2_adc_set_chan_enabled(dln2, channel, true);
244 if (ret < 0)
245 return ret;
246
247 ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
248 if (ret < 0) {
249 if (conflict) {
250 dev_err(&dln2->pdev->dev,
251 "ADC pins conflict with mask %04X\n",
252 (int)conflict);
253 ret = -EBUSY;
254 }
255 goto disable_chan;
256 }
257
258 /*
259 * Call GET_VAL twice due to initial zero-return immediately after
260 * enabling channel.
261 */
262 for (i = 0; i < 2; ++i) {
263 ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_VAL,
264 &port_chan, sizeof(port_chan),
265 &value, &olen);
266 if (ret < 0) {
267 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
268 goto disable_port;
269 }
270 if (olen < sizeof(value)) {
271 ret = -EPROTO;
272 goto disable_port;
273 }
274 }
275
276 ret = le16_to_cpu(value);
277
278 disable_port:
279 dln2_adc_set_port_enabled(dln2, false, NULL);
280 disable_chan:
281 dln2_adc_set_chan_enabled(dln2, channel, false);
282
283 return ret;
284 }
285
dln2_adc_read_all(struct dln2_adc * dln2,struct dln2_adc_get_all_vals * get_all_vals)286 static int dln2_adc_read_all(struct dln2_adc *dln2,
287 struct dln2_adc_get_all_vals *get_all_vals)
288 {
289 int ret;
290 __u8 port = dln2->port;
291 int olen = sizeof(*get_all_vals);
292
293 ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_ALL_VAL,
294 &port, sizeof(port), get_all_vals, &olen);
295 if (ret < 0) {
296 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
297 return ret;
298 }
299 if (olen < sizeof(*get_all_vals))
300 return -EPROTO;
301
302 return ret;
303 }
304
dln2_adc_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)305 static int dln2_adc_read_raw(struct iio_dev *indio_dev,
306 struct iio_chan_spec const *chan,
307 int *val,
308 int *val2,
309 long mask)
310 {
311 int ret;
312 unsigned int microhertz;
313 struct dln2_adc *dln2 = iio_priv(indio_dev);
314
315 switch (mask) {
316 case IIO_CHAN_INFO_RAW:
317 if (!iio_device_claim_direct(indio_dev))
318 return -EBUSY;
319
320 mutex_lock(&dln2->mutex);
321 ret = dln2_adc_read(dln2, chan->channel);
322 mutex_unlock(&dln2->mutex);
323
324 iio_device_release_direct(indio_dev);
325
326 if (ret < 0)
327 return ret;
328
329 *val = ret;
330 return IIO_VAL_INT;
331
332 case IIO_CHAN_INFO_SCALE:
333 /*
334 * Voltage reference is fixed at 3.3v
335 * 3.3 / (1 << 10) * 1000000000
336 */
337 *val = 0;
338 *val2 = 3222656;
339 return IIO_VAL_INT_PLUS_NANO;
340
341 case IIO_CHAN_INFO_SAMP_FREQ:
342 if (dln2->sample_period) {
343 microhertz = 1000000000 / dln2->sample_period;
344 *val = microhertz / 1000000;
345 *val2 = microhertz % 1000000;
346 } else {
347 *val = 0;
348 *val2 = 0;
349 }
350
351 return IIO_VAL_INT_PLUS_MICRO;
352
353 default:
354 return -EINVAL;
355 }
356 }
357
dln2_adc_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long mask)358 static int dln2_adc_write_raw(struct iio_dev *indio_dev,
359 struct iio_chan_spec const *chan,
360 int val,
361 int val2,
362 long mask)
363 {
364 int ret;
365 unsigned int microhertz;
366 struct dln2_adc *dln2 = iio_priv(indio_dev);
367
368 switch (mask) {
369 case IIO_CHAN_INFO_SAMP_FREQ:
370 microhertz = 1000000 * val + val2;
371
372 mutex_lock(&dln2->mutex);
373
374 dln2->sample_period =
375 microhertz ? 1000000000 / microhertz : UINT_MAX;
376 if (dln2->sample_period > 65535) {
377 dln2->sample_period = 65535;
378 dev_warn(&dln2->pdev->dev,
379 "clamping period to 65535ms\n");
380 }
381
382 /*
383 * The first requested channel is arbitrated as a shared
384 * trigger source, so only one event is registered with the
385 * DLN. The event handler will then read all enabled channel
386 * values using DLN2_ADC_CHANNEL_GET_ALL_VAL to maintain
387 * synchronization between ADC readings.
388 */
389 if (dln2->trigger_chan != -1)
390 ret = dln2_adc_set_chan_period(dln2,
391 dln2->trigger_chan, dln2->sample_period);
392 else
393 ret = 0;
394
395 mutex_unlock(&dln2->mutex);
396
397 return ret;
398
399 default:
400 return -EINVAL;
401 }
402 }
403
dln2_update_scan_mode(struct iio_dev * indio_dev,const unsigned long * scan_mask)404 static int dln2_update_scan_mode(struct iio_dev *indio_dev,
405 const unsigned long *scan_mask)
406 {
407 struct dln2_adc *dln2 = iio_priv(indio_dev);
408 int chan_count = indio_dev->num_channels - 1;
409 int ret, i, j;
410
411 mutex_lock(&dln2->mutex);
412
413 for (i = 0; i < chan_count; ++i) {
414 ret = dln2_adc_set_chan_enabled(dln2, i,
415 test_bit(i, scan_mask));
416 if (ret < 0) {
417 for (j = 0; j < i; ++j)
418 dln2_adc_set_chan_enabled(dln2, j, false);
419 mutex_unlock(&dln2->mutex);
420 dev_err(&dln2->pdev->dev,
421 "Unable to enable ADC channel %d\n", i);
422 return -EBUSY;
423 }
424 }
425
426 dln2_adc_update_demux(dln2);
427
428 mutex_unlock(&dln2->mutex);
429
430 return 0;
431 }
432
433 #define DLN2_ADC_CHAN(lval, idx) { \
434 lval.type = IIO_VOLTAGE; \
435 lval.channel = idx; \
436 lval.indexed = 1; \
437 lval.info_mask_separate = BIT(IIO_CHAN_INFO_RAW); \
438 lval.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \
439 BIT(IIO_CHAN_INFO_SAMP_FREQ); \
440 lval.scan_index = idx; \
441 lval.scan_type.sign = 'u'; \
442 lval.scan_type.realbits = DLN2_ADC_DATA_BITS; \
443 lval.scan_type.storagebits = 16; \
444 lval.scan_type.endianness = IIO_LE; \
445 }
446
447 /* Assignment version of IIO_CHAN_SOFT_TIMESTAMP */
448 #define IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(lval, _si) { \
449 lval.type = IIO_TIMESTAMP; \
450 lval.channel = -1; \
451 lval.scan_index = _si; \
452 lval.scan_type.sign = 's'; \
453 lval.scan_type.realbits = 64; \
454 lval.scan_type.storagebits = 64; \
455 }
456
457 static const struct iio_info dln2_adc_info = {
458 .read_raw = dln2_adc_read_raw,
459 .write_raw = dln2_adc_write_raw,
460 .update_scan_mode = dln2_update_scan_mode,
461 };
462
dln2_adc_trigger_h(int irq,void * p)463 static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
464 {
465 struct iio_poll_func *pf = p;
466 struct iio_dev *indio_dev = pf->indio_dev;
467 struct {
468 __le16 values[DLN2_ADC_MAX_CHANNELS];
469 aligned_s64 timestamp_space;
470 } data = { };
471 struct dln2_adc_get_all_vals dev_data;
472 struct dln2_adc *dln2 = iio_priv(indio_dev);
473 const struct dln2_adc_demux_table *t;
474 int ret, i;
475
476 mutex_lock(&dln2->mutex);
477 ret = dln2_adc_read_all(dln2, &dev_data);
478 mutex_unlock(&dln2->mutex);
479 if (ret < 0)
480 goto done;
481
482 /* Demux operation */
483 for (i = 0; i < dln2->demux_count; ++i) {
484 t = &dln2->demux[i];
485 memcpy((void *)data.values + t->to,
486 (void *)dev_data.values + t->from, t->length);
487 }
488
489 iio_push_to_buffers_with_ts(indio_dev, &data, sizeof(data),
490 iio_get_time_ns(indio_dev));
491
492 done:
493 iio_trigger_notify_done(indio_dev->trig);
494 return IRQ_HANDLED;
495 }
496
dln2_adc_triggered_buffer_postenable(struct iio_dev * indio_dev)497 static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
498 {
499 int ret;
500 struct dln2_adc *dln2 = iio_priv(indio_dev);
501 u16 conflict;
502 unsigned int trigger_chan;
503
504 mutex_lock(&dln2->mutex);
505
506 /* Enable ADC */
507 ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
508 if (ret < 0) {
509 mutex_unlock(&dln2->mutex);
510 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
511 if (conflict) {
512 dev_err(&dln2->pdev->dev,
513 "ADC pins conflict with mask %04X\n",
514 (int)conflict);
515 ret = -EBUSY;
516 }
517 return ret;
518 }
519
520 /* Assign trigger channel based on first enabled channel */
521 trigger_chan = find_first_bit(indio_dev->active_scan_mask,
522 iio_get_masklength(indio_dev));
523 if (trigger_chan < DLN2_ADC_MAX_CHANNELS) {
524 dln2->trigger_chan = trigger_chan;
525 ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan,
526 dln2->sample_period);
527 mutex_unlock(&dln2->mutex);
528 if (ret < 0) {
529 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
530 return ret;
531 }
532 } else {
533 dln2->trigger_chan = -1;
534 mutex_unlock(&dln2->mutex);
535 }
536
537 return 0;
538 }
539
dln2_adc_triggered_buffer_predisable(struct iio_dev * indio_dev)540 static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
541 {
542 int ret;
543 struct dln2_adc *dln2 = iio_priv(indio_dev);
544
545 mutex_lock(&dln2->mutex);
546
547 /* Disable trigger channel */
548 if (dln2->trigger_chan != -1) {
549 dln2_adc_set_chan_period(dln2, dln2->trigger_chan, 0);
550 dln2->trigger_chan = -1;
551 }
552
553 /* Disable ADC */
554 ret = dln2_adc_set_port_enabled(dln2, false, NULL);
555
556 mutex_unlock(&dln2->mutex);
557 if (ret < 0)
558 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
559
560 return ret;
561 }
562
563 static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
564 .postenable = dln2_adc_triggered_buffer_postenable,
565 .predisable = dln2_adc_triggered_buffer_predisable,
566 };
567
dln2_adc_event(struct platform_device * pdev,u16 echo,const void * data,int len)568 static void dln2_adc_event(struct platform_device *pdev, u16 echo,
569 const void *data, int len)
570 {
571 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
572 struct dln2_adc *dln2 = iio_priv(indio_dev);
573
574 /* Called via URB completion handler */
575 iio_trigger_poll(dln2->trig);
576 }
577
dln2_adc_probe(struct platform_device * pdev)578 static int dln2_adc_probe(struct platform_device *pdev)
579 {
580 struct device *dev = &pdev->dev;
581 struct dln2_adc *dln2;
582 struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
583 struct iio_dev *indio_dev;
584 int i, ret, chans;
585
586 indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2));
587 if (!indio_dev) {
588 dev_err(dev, "failed allocating iio device\n");
589 return -ENOMEM;
590 }
591
592 dln2 = iio_priv(indio_dev);
593 dln2->pdev = pdev;
594 dln2->port = pdata->port;
595 dln2->trigger_chan = -1;
596 mutex_init(&dln2->mutex);
597
598 platform_set_drvdata(pdev, indio_dev);
599
600 ret = dln2_adc_set_port_resolution(dln2);
601 if (ret < 0) {
602 dev_err(dev, "failed to set ADC resolution to 10 bits\n");
603 return ret;
604 }
605
606 chans = dln2_adc_get_chan_count(dln2);
607 if (chans < 0) {
608 dev_err(dev, "failed to get channel count: %d\n", chans);
609 return chans;
610 }
611 if (chans > DLN2_ADC_MAX_CHANNELS) {
612 chans = DLN2_ADC_MAX_CHANNELS;
613 dev_warn(dev, "clamping channels to %d\n",
614 DLN2_ADC_MAX_CHANNELS);
615 }
616
617 for (i = 0; i < chans; ++i)
618 DLN2_ADC_CHAN(dln2->iio_channels[i], i)
619 IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(dln2->iio_channels[i], i);
620
621 indio_dev->name = DLN2_ADC_MOD_NAME;
622 indio_dev->info = &dln2_adc_info;
623 indio_dev->modes = INDIO_DIRECT_MODE;
624 indio_dev->channels = dln2->iio_channels;
625 indio_dev->num_channels = chans + 1;
626 indio_dev->setup_ops = &dln2_adc_buffer_setup_ops;
627
628 dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
629 indio_dev->name,
630 iio_device_id(indio_dev));
631 if (!dln2->trig) {
632 dev_err(dev, "failed to allocate trigger\n");
633 return -ENOMEM;
634 }
635 iio_trigger_set_drvdata(dln2->trig, dln2);
636 ret = devm_iio_trigger_register(dev, dln2->trig);
637 if (ret) {
638 dev_err(dev, "failed to register trigger: %d\n", ret);
639 return ret;
640 }
641 iio_trigger_set_immutable(indio_dev, dln2->trig);
642
643 ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
644 dln2_adc_trigger_h,
645 &dln2_adc_buffer_setup_ops);
646 if (ret) {
647 dev_err(dev, "failed to allocate triggered buffer: %d\n", ret);
648 return ret;
649 }
650
651 ret = dln2_register_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV,
652 dln2_adc_event);
653 if (ret) {
654 dev_err(dev, "failed to setup DLN2 periodic event: %d\n", ret);
655 return ret;
656 }
657
658 ret = iio_device_register(indio_dev);
659 if (ret) {
660 dev_err(dev, "failed to register iio device: %d\n", ret);
661 goto unregister_event;
662 }
663
664 return ret;
665
666 unregister_event:
667 dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
668
669 return ret;
670 }
671
dln2_adc_remove(struct platform_device * pdev)672 static void dln2_adc_remove(struct platform_device *pdev)
673 {
674 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
675
676 iio_device_unregister(indio_dev);
677 dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
678 }
679
680 static struct platform_driver dln2_adc_driver = {
681 .driver.name = DLN2_ADC_MOD_NAME,
682 .probe = dln2_adc_probe,
683 .remove = dln2_adc_remove,
684 };
685
686 module_platform_driver(dln2_adc_driver);
687
688 MODULE_AUTHOR("Jack Andersen <jackoalan@gmail.com");
689 MODULE_DESCRIPTION("Driver for the Diolan DLN2 ADC interface");
690 MODULE_LICENSE("GPL v2");
691 MODULE_ALIAS("platform:dln2-adc");
692