1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Driver for the Diolan DLN-2 USB-ADC adapter
4 *
5 * Copyright (c) 2017 Jack Andersen
6 */
7
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10 #include <linux/types.h>
11 #include <linux/platform_device.h>
12 #include <linux/mfd/dln2.h>
13
14 #include <linux/iio/iio.h>
15 #include <linux/iio/sysfs.h>
16 #include <linux/iio/trigger.h>
17 #include <linux/iio/trigger_consumer.h>
18 #include <linux/iio/triggered_buffer.h>
19 #include <linux/iio/buffer.h>
20 #include <linux/iio/kfifo_buf.h>
21
22 #define DLN2_ADC_MOD_NAME "dln2-adc"
23
24 #define DLN2_ADC_ID 0x06
25
26 #define DLN2_ADC_GET_CHANNEL_COUNT DLN2_CMD(0x01, DLN2_ADC_ID)
27 #define DLN2_ADC_ENABLE DLN2_CMD(0x02, DLN2_ADC_ID)
28 #define DLN2_ADC_DISABLE DLN2_CMD(0x03, DLN2_ADC_ID)
29 #define DLN2_ADC_CHANNEL_ENABLE DLN2_CMD(0x05, DLN2_ADC_ID)
30 #define DLN2_ADC_CHANNEL_DISABLE DLN2_CMD(0x06, DLN2_ADC_ID)
31 #define DLN2_ADC_SET_RESOLUTION DLN2_CMD(0x08, DLN2_ADC_ID)
32 #define DLN2_ADC_CHANNEL_GET_VAL DLN2_CMD(0x0A, DLN2_ADC_ID)
33 #define DLN2_ADC_CHANNEL_GET_ALL_VAL DLN2_CMD(0x0B, DLN2_ADC_ID)
34 #define DLN2_ADC_CHANNEL_SET_CFG DLN2_CMD(0x0C, DLN2_ADC_ID)
35 #define DLN2_ADC_CHANNEL_GET_CFG DLN2_CMD(0x0D, DLN2_ADC_ID)
36 #define DLN2_ADC_CONDITION_MET_EV DLN2_CMD(0x10, DLN2_ADC_ID)
37
38 #define DLN2_ADC_EVENT_NONE 0
39 #define DLN2_ADC_EVENT_BELOW 1
40 #define DLN2_ADC_EVENT_LEVEL_ABOVE 2
41 #define DLN2_ADC_EVENT_OUTSIDE 3
42 #define DLN2_ADC_EVENT_INSIDE 4
43 #define DLN2_ADC_EVENT_ALWAYS 5
44
45 #define DLN2_ADC_MAX_CHANNELS 8
46 #define DLN2_ADC_DATA_BITS 10
47
48 /*
49 * Plays similar role to iio_demux_table in subsystem core; except allocated
50 * in a fixed 8-element array.
51 */
52 struct dln2_adc_demux_table {
53 unsigned int from;
54 unsigned int to;
55 unsigned int length;
56 };
57
58 struct dln2_adc {
59 struct platform_device *pdev;
60 struct iio_chan_spec iio_channels[DLN2_ADC_MAX_CHANNELS + 1];
61 int port, trigger_chan;
62 struct iio_trigger *trig;
63 struct mutex mutex;
64 /* Cached sample period in milliseconds */
65 unsigned int sample_period;
66 /* Demux table */
67 unsigned int demux_count;
68 struct dln2_adc_demux_table demux[DLN2_ADC_MAX_CHANNELS];
69 };
70
71 struct dln2_adc_port_chan {
72 u8 port;
73 u8 chan;
74 };
75
76 struct dln2_adc_get_all_vals {
77 __le16 channel_mask;
78 __le16 values[DLN2_ADC_MAX_CHANNELS];
79 };
80
dln2_adc_add_demux(struct dln2_adc * dln2,unsigned int in_loc,unsigned int out_loc,unsigned int length)81 static void dln2_adc_add_demux(struct dln2_adc *dln2,
82 unsigned int in_loc, unsigned int out_loc,
83 unsigned int length)
84 {
85 struct dln2_adc_demux_table *p = dln2->demux_count ?
86 &dln2->demux[dln2->demux_count - 1] : NULL;
87
88 if (p && p->from + p->length == in_loc &&
89 p->to + p->length == out_loc) {
90 p->length += length;
91 } else if (dln2->demux_count < DLN2_ADC_MAX_CHANNELS) {
92 p = &dln2->demux[dln2->demux_count++];
93 p->from = in_loc;
94 p->to = out_loc;
95 p->length = length;
96 }
97 }
98
dln2_adc_update_demux(struct dln2_adc * dln2)99 static void dln2_adc_update_demux(struct dln2_adc *dln2)
100 {
101 int in_ind = -1, out_ind;
102 unsigned int in_loc = 0, out_loc = 0;
103 struct iio_dev *indio_dev = platform_get_drvdata(dln2->pdev);
104
105 /* Clear out any old demux */
106 dln2->demux_count = 0;
107
108 /* Optimize all 8-channels case */
109 if (iio_get_masklength(indio_dev) &&
110 (*indio_dev->active_scan_mask & 0xff) == 0xff) {
111 dln2_adc_add_demux(dln2, 0, 0, 16);
112 return;
113 }
114
115 /* Build demux table from fixed 8-channels to active_scan_mask */
116 iio_for_each_active_channel(indio_dev, out_ind) {
117 /* Handle timestamp separately */
118 if (out_ind == DLN2_ADC_MAX_CHANNELS)
119 break;
120 for (++in_ind; in_ind != out_ind; ++in_ind)
121 in_loc += 2;
122 dln2_adc_add_demux(dln2, in_loc, out_loc, 2);
123 out_loc += 2;
124 in_loc += 2;
125 }
126 }
127
dln2_adc_get_chan_count(struct dln2_adc * dln2)128 static int dln2_adc_get_chan_count(struct dln2_adc *dln2)
129 {
130 int ret;
131 u8 port = dln2->port;
132 u8 count;
133 int olen = sizeof(count);
134
135 ret = dln2_transfer(dln2->pdev, DLN2_ADC_GET_CHANNEL_COUNT,
136 &port, sizeof(port), &count, &olen);
137 if (ret < 0) {
138 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
139 return ret;
140 }
141 if (olen < sizeof(count))
142 return -EPROTO;
143
144 return count;
145 }
146
dln2_adc_set_port_resolution(struct dln2_adc * dln2)147 static int dln2_adc_set_port_resolution(struct dln2_adc *dln2)
148 {
149 int ret;
150 struct dln2_adc_port_chan port_chan = {
151 .port = dln2->port,
152 .chan = DLN2_ADC_DATA_BITS,
153 };
154
155 ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_SET_RESOLUTION,
156 &port_chan, sizeof(port_chan));
157 if (ret < 0)
158 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
159
160 return ret;
161 }
162
dln2_adc_set_chan_enabled(struct dln2_adc * dln2,int channel,bool enable)163 static int dln2_adc_set_chan_enabled(struct dln2_adc *dln2,
164 int channel, bool enable)
165 {
166 int ret;
167 struct dln2_adc_port_chan port_chan = {
168 .port = dln2->port,
169 .chan = channel,
170 };
171 u16 cmd = enable ? DLN2_ADC_CHANNEL_ENABLE : DLN2_ADC_CHANNEL_DISABLE;
172
173 ret = dln2_transfer_tx(dln2->pdev, cmd, &port_chan, sizeof(port_chan));
174 if (ret < 0)
175 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
176
177 return ret;
178 }
179
dln2_adc_set_port_enabled(struct dln2_adc * dln2,bool enable,u16 * conflict_out)180 static int dln2_adc_set_port_enabled(struct dln2_adc *dln2, bool enable,
181 u16 *conflict_out)
182 {
183 int ret;
184 u8 port = dln2->port;
185 __le16 conflict;
186 int olen = sizeof(conflict);
187 u16 cmd = enable ? DLN2_ADC_ENABLE : DLN2_ADC_DISABLE;
188
189 if (conflict_out)
190 *conflict_out = 0;
191
192 ret = dln2_transfer(dln2->pdev, cmd, &port, sizeof(port),
193 &conflict, &olen);
194 if (ret < 0) {
195 dev_dbg(&dln2->pdev->dev, "Problem in %s(%d)\n",
196 __func__, (int)enable);
197 if (conflict_out && enable && olen >= sizeof(conflict))
198 *conflict_out = le16_to_cpu(conflict);
199 return ret;
200 }
201 if (enable && olen < sizeof(conflict))
202 return -EPROTO;
203
204 return ret;
205 }
206
dln2_adc_set_chan_period(struct dln2_adc * dln2,unsigned int channel,unsigned int period)207 static int dln2_adc_set_chan_period(struct dln2_adc *dln2,
208 unsigned int channel, unsigned int period)
209 {
210 int ret;
211 struct {
212 struct dln2_adc_port_chan port_chan;
213 __u8 type;
214 __le16 period;
215 __le16 low;
216 __le16 high;
217 } __packed set_cfg = {
218 .port_chan.port = dln2->port,
219 .port_chan.chan = channel,
220 .type = period ? DLN2_ADC_EVENT_ALWAYS : DLN2_ADC_EVENT_NONE,
221 .period = cpu_to_le16(period)
222 };
223
224 ret = dln2_transfer_tx(dln2->pdev, DLN2_ADC_CHANNEL_SET_CFG,
225 &set_cfg, sizeof(set_cfg));
226 if (ret < 0)
227 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
228
229 return ret;
230 }
231
dln2_adc_read(struct dln2_adc * dln2,unsigned int channel)232 static int dln2_adc_read(struct dln2_adc *dln2, unsigned int channel)
233 {
234 int ret, i;
235 u16 conflict;
236 __le16 value;
237 int olen = sizeof(value);
238 struct dln2_adc_port_chan port_chan = {
239 .port = dln2->port,
240 .chan = channel,
241 };
242
243 ret = dln2_adc_set_chan_enabled(dln2, channel, true);
244 if (ret < 0)
245 return ret;
246
247 ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
248 if (ret < 0) {
249 if (conflict) {
250 dev_err(&dln2->pdev->dev,
251 "ADC pins conflict with mask %04X\n",
252 (int)conflict);
253 ret = -EBUSY;
254 }
255 goto disable_chan;
256 }
257
258 /*
259 * Call GET_VAL twice due to initial zero-return immediately after
260 * enabling channel.
261 */
262 for (i = 0; i < 2; ++i) {
263 ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_VAL,
264 &port_chan, sizeof(port_chan),
265 &value, &olen);
266 if (ret < 0) {
267 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
268 goto disable_port;
269 }
270 if (olen < sizeof(value)) {
271 ret = -EPROTO;
272 goto disable_port;
273 }
274 }
275
276 ret = le16_to_cpu(value);
277
278 disable_port:
279 dln2_adc_set_port_enabled(dln2, false, NULL);
280 disable_chan:
281 dln2_adc_set_chan_enabled(dln2, channel, false);
282
283 return ret;
284 }
285
dln2_adc_read_all(struct dln2_adc * dln2,struct dln2_adc_get_all_vals * get_all_vals)286 static int dln2_adc_read_all(struct dln2_adc *dln2,
287 struct dln2_adc_get_all_vals *get_all_vals)
288 {
289 int ret;
290 __u8 port = dln2->port;
291 int olen = sizeof(*get_all_vals);
292
293 ret = dln2_transfer(dln2->pdev, DLN2_ADC_CHANNEL_GET_ALL_VAL,
294 &port, sizeof(port), get_all_vals, &olen);
295 if (ret < 0) {
296 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
297 return ret;
298 }
299 if (olen < sizeof(*get_all_vals))
300 return -EPROTO;
301
302 return ret;
303 }
304
dln2_adc_read_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int * val,int * val2,long mask)305 static int dln2_adc_read_raw(struct iio_dev *indio_dev,
306 struct iio_chan_spec const *chan,
307 int *val,
308 int *val2,
309 long mask)
310 {
311 int ret;
312 unsigned int microhertz;
313 struct dln2_adc *dln2 = iio_priv(indio_dev);
314
315 switch (mask) {
316 case IIO_CHAN_INFO_RAW:
317 if (!iio_device_claim_direct(indio_dev))
318 return -EBUSY;
319
320 mutex_lock(&dln2->mutex);
321 ret = dln2_adc_read(dln2, chan->channel);
322 mutex_unlock(&dln2->mutex);
323
324 iio_device_release_direct(indio_dev);
325
326 if (ret < 0)
327 return ret;
328
329 *val = ret;
330 return IIO_VAL_INT;
331
332 case IIO_CHAN_INFO_SCALE:
333 /*
334 * Voltage reference is fixed at 3.3v
335 * 3.3 / (1 << 10) * 1000000000
336 */
337 *val = 0;
338 *val2 = 3222656;
339 return IIO_VAL_INT_PLUS_NANO;
340
341 case IIO_CHAN_INFO_SAMP_FREQ:
342 if (dln2->sample_period) {
343 microhertz = 1000000000 / dln2->sample_period;
344 *val = microhertz / 1000000;
345 *val2 = microhertz % 1000000;
346 } else {
347 *val = 0;
348 *val2 = 0;
349 }
350
351 return IIO_VAL_INT_PLUS_MICRO;
352
353 default:
354 return -EINVAL;
355 }
356 }
357
dln2_adc_write_raw(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,int val,int val2,long mask)358 static int dln2_adc_write_raw(struct iio_dev *indio_dev,
359 struct iio_chan_spec const *chan,
360 int val,
361 int val2,
362 long mask)
363 {
364 int ret;
365 unsigned int microhertz;
366 struct dln2_adc *dln2 = iio_priv(indio_dev);
367
368 switch (mask) {
369 case IIO_CHAN_INFO_SAMP_FREQ:
370 microhertz = 1000000 * val + val2;
371
372 mutex_lock(&dln2->mutex);
373
374 dln2->sample_period =
375 microhertz ? 1000000000 / microhertz : UINT_MAX;
376 if (dln2->sample_period > 65535) {
377 dln2->sample_period = 65535;
378 dev_warn(&dln2->pdev->dev,
379 "clamping period to 65535ms\n");
380 }
381
382 /*
383 * The first requested channel is arbitrated as a shared
384 * trigger source, so only one event is registered with the
385 * DLN. The event handler will then read all enabled channel
386 * values using DLN2_ADC_CHANNEL_GET_ALL_VAL to maintain
387 * synchronization between ADC readings.
388 */
389 if (dln2->trigger_chan != -1)
390 ret = dln2_adc_set_chan_period(dln2,
391 dln2->trigger_chan, dln2->sample_period);
392 else
393 ret = 0;
394
395 mutex_unlock(&dln2->mutex);
396
397 return ret;
398
399 default:
400 return -EINVAL;
401 }
402 }
403
dln2_update_scan_mode(struct iio_dev * indio_dev,const unsigned long * scan_mask)404 static int dln2_update_scan_mode(struct iio_dev *indio_dev,
405 const unsigned long *scan_mask)
406 {
407 struct dln2_adc *dln2 = iio_priv(indio_dev);
408 int chan_count = indio_dev->num_channels - 1;
409 int ret, i, j;
410
411 mutex_lock(&dln2->mutex);
412
413 for (i = 0; i < chan_count; ++i) {
414 ret = dln2_adc_set_chan_enabled(dln2, i,
415 test_bit(i, scan_mask));
416 if (ret < 0) {
417 for (j = 0; j < i; ++j)
418 dln2_adc_set_chan_enabled(dln2, j, false);
419 mutex_unlock(&dln2->mutex);
420 dev_err(&dln2->pdev->dev,
421 "Unable to enable ADC channel %d\n", i);
422 return -EBUSY;
423 }
424 }
425
426 dln2_adc_update_demux(dln2);
427
428 mutex_unlock(&dln2->mutex);
429
430 return 0;
431 }
432
433 #define DLN2_ADC_CHAN(lval, idx) { \
434 lval.type = IIO_VOLTAGE; \
435 lval.channel = idx; \
436 lval.indexed = 1; \
437 lval.info_mask_separate = BIT(IIO_CHAN_INFO_RAW); \
438 lval.info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SCALE) | \
439 BIT(IIO_CHAN_INFO_SAMP_FREQ); \
440 lval.scan_index = idx; \
441 lval.scan_type.sign = 'u'; \
442 lval.scan_type.realbits = DLN2_ADC_DATA_BITS; \
443 lval.scan_type.storagebits = 16; \
444 lval.scan_type.endianness = IIO_LE; \
445 }
446
447 /* Assignment version of IIO_CHAN_SOFT_TIMESTAMP */
448 #define IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(lval, _si) { \
449 lval.type = IIO_TIMESTAMP; \
450 lval.channel = -1; \
451 lval.scan_index = _si; \
452 lval.scan_type.sign = 's'; \
453 lval.scan_type.realbits = 64; \
454 lval.scan_type.storagebits = 64; \
455 }
456
457 static const struct iio_info dln2_adc_info = {
458 .read_raw = dln2_adc_read_raw,
459 .write_raw = dln2_adc_write_raw,
460 .update_scan_mode = dln2_update_scan_mode,
461 };
462
dln2_adc_trigger_h(int irq,void * p)463 static irqreturn_t dln2_adc_trigger_h(int irq, void *p)
464 {
465 struct iio_poll_func *pf = p;
466 struct iio_dev *indio_dev = pf->indio_dev;
467 struct {
468 __le16 values[DLN2_ADC_MAX_CHANNELS];
469 aligned_s64 timestamp_space;
470 } data;
471 struct dln2_adc_get_all_vals dev_data;
472 struct dln2_adc *dln2 = iio_priv(indio_dev);
473 const struct dln2_adc_demux_table *t;
474 int ret, i;
475
476 mutex_lock(&dln2->mutex);
477 ret = dln2_adc_read_all(dln2, &dev_data);
478 mutex_unlock(&dln2->mutex);
479 if (ret < 0)
480 goto done;
481
482 memset(&data, 0, sizeof(data));
483
484 /* Demux operation */
485 for (i = 0; i < dln2->demux_count; ++i) {
486 t = &dln2->demux[i];
487 memcpy((void *)data.values + t->to,
488 (void *)dev_data.values + t->from, t->length);
489 }
490
491 iio_push_to_buffers_with_ts(indio_dev, &data, sizeof(data),
492 iio_get_time_ns(indio_dev));
493
494 done:
495 iio_trigger_notify_done(indio_dev->trig);
496 return IRQ_HANDLED;
497 }
498
dln2_adc_triggered_buffer_postenable(struct iio_dev * indio_dev)499 static int dln2_adc_triggered_buffer_postenable(struct iio_dev *indio_dev)
500 {
501 int ret;
502 struct dln2_adc *dln2 = iio_priv(indio_dev);
503 u16 conflict;
504 unsigned int trigger_chan;
505
506 mutex_lock(&dln2->mutex);
507
508 /* Enable ADC */
509 ret = dln2_adc_set_port_enabled(dln2, true, &conflict);
510 if (ret < 0) {
511 mutex_unlock(&dln2->mutex);
512 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
513 if (conflict) {
514 dev_err(&dln2->pdev->dev,
515 "ADC pins conflict with mask %04X\n",
516 (int)conflict);
517 ret = -EBUSY;
518 }
519 return ret;
520 }
521
522 /* Assign trigger channel based on first enabled channel */
523 trigger_chan = find_first_bit(indio_dev->active_scan_mask,
524 iio_get_masklength(indio_dev));
525 if (trigger_chan < DLN2_ADC_MAX_CHANNELS) {
526 dln2->trigger_chan = trigger_chan;
527 ret = dln2_adc_set_chan_period(dln2, dln2->trigger_chan,
528 dln2->sample_period);
529 mutex_unlock(&dln2->mutex);
530 if (ret < 0) {
531 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
532 return ret;
533 }
534 } else {
535 dln2->trigger_chan = -1;
536 mutex_unlock(&dln2->mutex);
537 }
538
539 return 0;
540 }
541
dln2_adc_triggered_buffer_predisable(struct iio_dev * indio_dev)542 static int dln2_adc_triggered_buffer_predisable(struct iio_dev *indio_dev)
543 {
544 int ret;
545 struct dln2_adc *dln2 = iio_priv(indio_dev);
546
547 mutex_lock(&dln2->mutex);
548
549 /* Disable trigger channel */
550 if (dln2->trigger_chan != -1) {
551 dln2_adc_set_chan_period(dln2, dln2->trigger_chan, 0);
552 dln2->trigger_chan = -1;
553 }
554
555 /* Disable ADC */
556 ret = dln2_adc_set_port_enabled(dln2, false, NULL);
557
558 mutex_unlock(&dln2->mutex);
559 if (ret < 0)
560 dev_dbg(&dln2->pdev->dev, "Problem in %s\n", __func__);
561
562 return ret;
563 }
564
565 static const struct iio_buffer_setup_ops dln2_adc_buffer_setup_ops = {
566 .postenable = dln2_adc_triggered_buffer_postenable,
567 .predisable = dln2_adc_triggered_buffer_predisable,
568 };
569
dln2_adc_event(struct platform_device * pdev,u16 echo,const void * data,int len)570 static void dln2_adc_event(struct platform_device *pdev, u16 echo,
571 const void *data, int len)
572 {
573 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
574 struct dln2_adc *dln2 = iio_priv(indio_dev);
575
576 /* Called via URB completion handler */
577 iio_trigger_poll(dln2->trig);
578 }
579
dln2_adc_probe(struct platform_device * pdev)580 static int dln2_adc_probe(struct platform_device *pdev)
581 {
582 struct device *dev = &pdev->dev;
583 struct dln2_adc *dln2;
584 struct dln2_platform_data *pdata = dev_get_platdata(&pdev->dev);
585 struct iio_dev *indio_dev;
586 int i, ret, chans;
587
588 indio_dev = devm_iio_device_alloc(dev, sizeof(*dln2));
589 if (!indio_dev) {
590 dev_err(dev, "failed allocating iio device\n");
591 return -ENOMEM;
592 }
593
594 dln2 = iio_priv(indio_dev);
595 dln2->pdev = pdev;
596 dln2->port = pdata->port;
597 dln2->trigger_chan = -1;
598 mutex_init(&dln2->mutex);
599
600 platform_set_drvdata(pdev, indio_dev);
601
602 ret = dln2_adc_set_port_resolution(dln2);
603 if (ret < 0) {
604 dev_err(dev, "failed to set ADC resolution to 10 bits\n");
605 return ret;
606 }
607
608 chans = dln2_adc_get_chan_count(dln2);
609 if (chans < 0) {
610 dev_err(dev, "failed to get channel count: %d\n", chans);
611 return chans;
612 }
613 if (chans > DLN2_ADC_MAX_CHANNELS) {
614 chans = DLN2_ADC_MAX_CHANNELS;
615 dev_warn(dev, "clamping channels to %d\n",
616 DLN2_ADC_MAX_CHANNELS);
617 }
618
619 for (i = 0; i < chans; ++i)
620 DLN2_ADC_CHAN(dln2->iio_channels[i], i)
621 IIO_CHAN_SOFT_TIMESTAMP_ASSIGN(dln2->iio_channels[i], i);
622
623 indio_dev->name = DLN2_ADC_MOD_NAME;
624 indio_dev->info = &dln2_adc_info;
625 indio_dev->modes = INDIO_DIRECT_MODE;
626 indio_dev->channels = dln2->iio_channels;
627 indio_dev->num_channels = chans + 1;
628 indio_dev->setup_ops = &dln2_adc_buffer_setup_ops;
629
630 dln2->trig = devm_iio_trigger_alloc(dev, "%s-dev%d",
631 indio_dev->name,
632 iio_device_id(indio_dev));
633 if (!dln2->trig) {
634 dev_err(dev, "failed to allocate trigger\n");
635 return -ENOMEM;
636 }
637 iio_trigger_set_drvdata(dln2->trig, dln2);
638 ret = devm_iio_trigger_register(dev, dln2->trig);
639 if (ret) {
640 dev_err(dev, "failed to register trigger: %d\n", ret);
641 return ret;
642 }
643 iio_trigger_set_immutable(indio_dev, dln2->trig);
644
645 ret = devm_iio_triggered_buffer_setup(dev, indio_dev, NULL,
646 dln2_adc_trigger_h,
647 &dln2_adc_buffer_setup_ops);
648 if (ret) {
649 dev_err(dev, "failed to allocate triggered buffer: %d\n", ret);
650 return ret;
651 }
652
653 ret = dln2_register_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV,
654 dln2_adc_event);
655 if (ret) {
656 dev_err(dev, "failed to setup DLN2 periodic event: %d\n", ret);
657 return ret;
658 }
659
660 ret = iio_device_register(indio_dev);
661 if (ret) {
662 dev_err(dev, "failed to register iio device: %d\n", ret);
663 goto unregister_event;
664 }
665
666 return ret;
667
668 unregister_event:
669 dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
670
671 return ret;
672 }
673
dln2_adc_remove(struct platform_device * pdev)674 static void dln2_adc_remove(struct platform_device *pdev)
675 {
676 struct iio_dev *indio_dev = platform_get_drvdata(pdev);
677
678 iio_device_unregister(indio_dev);
679 dln2_unregister_event_cb(pdev, DLN2_ADC_CONDITION_MET_EV);
680 }
681
682 static struct platform_driver dln2_adc_driver = {
683 .driver.name = DLN2_ADC_MOD_NAME,
684 .probe = dln2_adc_probe,
685 .remove = dln2_adc_remove,
686 };
687
688 module_platform_driver(dln2_adc_driver);
689
690 MODULE_AUTHOR("Jack Andersen <jackoalan@gmail.com");
691 MODULE_DESCRIPTION("Driver for the Diolan DLN2 ADC interface");
692 MODULE_LICENSE("GPL v2");
693 MODULE_ALIAS("platform:dln2-adc");
694