1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core in kernel channel mapping
3 *
4 * Copyright (c) 2011 Jonathan Cameron
5 */
6 #include <linux/cleanup.h>
7 #include <linux/err.h>
8 #include <linux/export.h>
9 #include <linux/minmax.h>
10 #include <linux/mm.h>
11 #include <linux/mutex.h>
12 #include <linux/property.h>
13 #include <linux/slab.h>
14
15 #include <linux/iio/iio.h>
16 #include <linux/iio/iio-opaque.h>
17 #include "iio_core.h"
18 #include <linux/iio/machine.h>
19 #include <linux/iio/driver.h>
20 #include <linux/iio/consumer.h>
21
22 struct iio_map_internal {
23 struct iio_dev *indio_dev;
24 const struct iio_map *map;
25 struct list_head l;
26 };
27
28 static LIST_HEAD(iio_map_list);
29 static DEFINE_MUTEX(iio_map_list_lock);
30
iio_map_array_unregister_locked(struct iio_dev * indio_dev)31 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev)
32 {
33 int ret = -ENODEV;
34 struct iio_map_internal *mapi, *next;
35
36 list_for_each_entry_safe(mapi, next, &iio_map_list, l) {
37 if (indio_dev == mapi->indio_dev) {
38 list_del(&mapi->l);
39 kfree(mapi);
40 ret = 0;
41 }
42 }
43 return ret;
44 }
45
iio_map_array_register(struct iio_dev * indio_dev,const struct iio_map * maps)46 int iio_map_array_register(struct iio_dev *indio_dev, const struct iio_map *maps)
47 {
48 struct iio_map_internal *mapi;
49 int i = 0;
50 int ret;
51
52 if (!maps)
53 return 0;
54
55 guard(mutex)(&iio_map_list_lock);
56 while (maps[i].consumer_dev_name) {
57 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL);
58 if (!mapi) {
59 ret = -ENOMEM;
60 goto error_ret;
61 }
62 mapi->map = &maps[i];
63 mapi->indio_dev = indio_dev;
64 list_add_tail(&mapi->l, &iio_map_list);
65 i++;
66 }
67
68 return 0;
69 error_ret:
70 iio_map_array_unregister_locked(indio_dev);
71 return ret;
72 }
73 EXPORT_SYMBOL_GPL(iio_map_array_register);
74
75 /*
76 * Remove all map entries associated with the given iio device
77 */
iio_map_array_unregister(struct iio_dev * indio_dev)78 int iio_map_array_unregister(struct iio_dev *indio_dev)
79 {
80 guard(mutex)(&iio_map_list_lock);
81 return iio_map_array_unregister_locked(indio_dev);
82 }
83 EXPORT_SYMBOL_GPL(iio_map_array_unregister);
84
iio_map_array_unregister_cb(void * indio_dev)85 static void iio_map_array_unregister_cb(void *indio_dev)
86 {
87 iio_map_array_unregister(indio_dev);
88 }
89
devm_iio_map_array_register(struct device * dev,struct iio_dev * indio_dev,const struct iio_map * maps)90 int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev,
91 const struct iio_map *maps)
92 {
93 int ret;
94
95 ret = iio_map_array_register(indio_dev, maps);
96 if (ret)
97 return ret;
98
99 return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev);
100 }
101 EXPORT_SYMBOL_GPL(devm_iio_map_array_register);
102
103 static const struct iio_chan_spec
iio_chan_spec_from_name(const struct iio_dev * indio_dev,const char * name)104 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name)
105 {
106 int i;
107 const struct iio_chan_spec *chan = NULL;
108
109 for (i = 0; i < indio_dev->num_channels; i++)
110 if (indio_dev->channels[i].datasheet_name &&
111 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) {
112 chan = &indio_dev->channels[i];
113 break;
114 }
115 return chan;
116 }
117
118 /**
119 * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index
120 * @indio_dev: pointer to the iio_dev structure
121 * @iiospec: IIO specifier as found in the device tree
122 *
123 * This is simple translation function, suitable for the most 1:1 mapped
124 * channels in IIO chips. This function performs only one sanity check:
125 * whether IIO index is less than num_channels (that is specified in the
126 * iio_dev).
127 */
__fwnode_iio_simple_xlate(struct iio_dev * indio_dev,const struct fwnode_reference_args * iiospec)128 static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev,
129 const struct fwnode_reference_args *iiospec)
130 {
131 if (!iiospec->nargs)
132 return 0;
133
134 if (iiospec->args[0] >= indio_dev->num_channels) {
135 dev_err(&indio_dev->dev, "invalid channel index %llu\n",
136 iiospec->args[0]);
137 return -EINVAL;
138 }
139
140 return iiospec->args[0];
141 }
142
__fwnode_iio_channel_get(struct iio_channel * channel,struct fwnode_handle * fwnode,int index)143 static int __fwnode_iio_channel_get(struct iio_channel *channel,
144 struct fwnode_handle *fwnode, int index)
145 {
146 struct fwnode_reference_args iiospec;
147 struct device *idev;
148 struct iio_dev *indio_dev;
149 int err;
150
151 err = fwnode_property_get_reference_args(fwnode, "io-channels",
152 "#io-channel-cells", 0,
153 index, &iiospec);
154 if (err)
155 return err;
156
157 idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode);
158 if (!idev) {
159 fwnode_handle_put(iiospec.fwnode);
160 return -EPROBE_DEFER;
161 }
162
163 indio_dev = dev_to_iio_dev(idev);
164 channel->indio_dev = indio_dev;
165 if (indio_dev->info->fwnode_xlate)
166 index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec);
167 else
168 index = __fwnode_iio_simple_xlate(indio_dev, &iiospec);
169 fwnode_handle_put(iiospec.fwnode);
170 if (index < 0)
171 goto err_put;
172 channel->channel = &indio_dev->channels[index];
173
174 return 0;
175
176 err_put:
177 iio_device_put(indio_dev);
178 return index;
179 }
180
fwnode_iio_channel_get(struct fwnode_handle * fwnode,int index)181 static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode,
182 int index)
183 {
184 int err;
185
186 if (index < 0)
187 return ERR_PTR(-EINVAL);
188
189 struct iio_channel *channel __free(kfree) =
190 kzalloc(sizeof(*channel), GFP_KERNEL);
191 if (!channel)
192 return ERR_PTR(-ENOMEM);
193
194 err = __fwnode_iio_channel_get(channel, fwnode, index);
195 if (err)
196 return ERR_PTR(err);
197
198 return_ptr(channel);
199 }
200
201 static struct iio_channel *
__fwnode_iio_channel_get_by_name(struct fwnode_handle * fwnode,const char * name)202 __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name)
203 {
204 struct iio_channel *chan;
205 int index = 0;
206
207 /*
208 * For named iio channels, first look up the name in the
209 * "io-channel-names" property. If it cannot be found, the
210 * index will be an error code, and fwnode_iio_channel_get()
211 * will fail.
212 */
213 if (name)
214 index = fwnode_property_match_string(fwnode, "io-channel-names",
215 name);
216
217 chan = fwnode_iio_channel_get(fwnode, index);
218 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
219 return chan;
220 if (name) {
221 if (index >= 0) {
222 pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n",
223 fwnode, name, index);
224 /*
225 * In this case, we found 'name' in 'io-channel-names'
226 * but somehow we still fail so that we should not proceed
227 * with any other lookup. Hence, explicitly return -EINVAL
228 * (maybe not the better error code) so that the caller
229 * won't do a system lookup.
230 */
231 return ERR_PTR(-EINVAL);
232 }
233 /*
234 * If index < 0, then fwnode_property_get_reference_args() fails
235 * with -EINVAL or -ENOENT (ACPI case) which is expected. We
236 * should not proceed if we get any other error.
237 */
238 if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT)
239 return chan;
240 } else if (PTR_ERR(chan) != -ENOENT) {
241 /*
242 * if !name, then we should only proceed the lookup if
243 * fwnode_property_get_reference_args() returns -ENOENT.
244 */
245 return chan;
246 }
247
248 /* so we continue the lookup */
249 return ERR_PTR(-ENODEV);
250 }
251
fwnode_iio_channel_get_by_name(struct fwnode_handle * fwnode,const char * name)252 struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode,
253 const char *name)
254 {
255 struct fwnode_handle *parent;
256 struct iio_channel *chan;
257
258 /* Walk up the tree of devices looking for a matching iio channel */
259 chan = __fwnode_iio_channel_get_by_name(fwnode, name);
260 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV)
261 return chan;
262
263 /*
264 * No matching IIO channel found on this node.
265 * If the parent node has a "io-channel-ranges" property,
266 * then we can try one of its channels.
267 */
268 fwnode_for_each_parent_node(fwnode, parent) {
269 if (!fwnode_property_present(parent, "io-channel-ranges")) {
270 fwnode_handle_put(parent);
271 return ERR_PTR(-ENODEV);
272 }
273
274 chan = __fwnode_iio_channel_get_by_name(parent, name);
275 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) {
276 fwnode_handle_put(parent);
277 return chan;
278 }
279 }
280
281 return ERR_PTR(-ENODEV);
282 }
283 EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name);
284
fwnode_iio_channel_get_all(struct device * dev)285 static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev)
286 {
287 struct fwnode_handle *fwnode = dev_fwnode(dev);
288 int i, mapind, nummaps = 0;
289 int ret;
290
291 do {
292 ret = fwnode_property_get_reference_args(fwnode, "io-channels",
293 "#io-channel-cells", 0,
294 nummaps, NULL);
295 if (ret < 0)
296 break;
297 } while (++nummaps);
298
299 if (nummaps == 0)
300 return ERR_PTR(-ENODEV);
301
302 /* NULL terminated array to save passing size */
303 struct iio_channel *chans __free(kfree) =
304 kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
305 if (!chans)
306 return ERR_PTR(-ENOMEM);
307
308 /* Search for FW matches */
309 for (mapind = 0; mapind < nummaps; mapind++) {
310 ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind);
311 if (ret)
312 goto error_free_chans;
313 }
314 return_ptr(chans);
315
316 error_free_chans:
317 for (i = 0; i < mapind; i++)
318 iio_device_put(chans[i].indio_dev);
319 return ERR_PTR(ret);
320 }
321
iio_channel_get_sys(const char * name,const char * channel_name)322 static struct iio_channel *iio_channel_get_sys(const char *name,
323 const char *channel_name)
324 {
325 struct iio_map_internal *c_i = NULL, *c = NULL;
326 int err;
327
328 if (!(name || channel_name))
329 return ERR_PTR(-ENODEV);
330
331 /* first find matching entry the channel map */
332 scoped_guard(mutex, &iio_map_list_lock) {
333 list_for_each_entry(c_i, &iio_map_list, l) {
334 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) ||
335 (channel_name &&
336 strcmp(channel_name, c_i->map->consumer_channel) != 0))
337 continue;
338 c = c_i;
339 iio_device_get(c->indio_dev);
340 break;
341 }
342 }
343 if (!c)
344 return ERR_PTR(-ENODEV);
345
346 struct iio_channel *channel __free(kfree) =
347 kzalloc(sizeof(*channel), GFP_KERNEL);
348 if (!channel) {
349 err = -ENOMEM;
350 goto error_no_mem;
351 }
352
353 channel->indio_dev = c->indio_dev;
354
355 if (c->map->adc_channel_label) {
356 channel->channel =
357 iio_chan_spec_from_name(channel->indio_dev,
358 c->map->adc_channel_label);
359
360 if (!channel->channel) {
361 err = -EINVAL;
362 goto error_no_mem;
363 }
364 }
365
366 return_ptr(channel);
367
368 error_no_mem:
369 iio_device_put(c->indio_dev);
370 return ERR_PTR(err);
371 }
372
iio_channel_get(struct device * dev,const char * channel_name)373 struct iio_channel *iio_channel_get(struct device *dev,
374 const char *channel_name)
375 {
376 const char *name = dev ? dev_name(dev) : NULL;
377 struct iio_channel *channel;
378
379 if (dev) {
380 channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev),
381 channel_name);
382 if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV)
383 return channel;
384 }
385
386 return iio_channel_get_sys(name, channel_name);
387 }
388 EXPORT_SYMBOL_GPL(iio_channel_get);
389
iio_channel_release(struct iio_channel * channel)390 void iio_channel_release(struct iio_channel *channel)
391 {
392 if (!channel)
393 return;
394 iio_device_put(channel->indio_dev);
395 kfree(channel);
396 }
397 EXPORT_SYMBOL_GPL(iio_channel_release);
398
devm_iio_channel_free(void * iio_channel)399 static void devm_iio_channel_free(void *iio_channel)
400 {
401 iio_channel_release(iio_channel);
402 }
403
devm_iio_channel_get(struct device * dev,const char * channel_name)404 struct iio_channel *devm_iio_channel_get(struct device *dev,
405 const char *channel_name)
406 {
407 struct iio_channel *channel;
408 int ret;
409
410 channel = iio_channel_get(dev, channel_name);
411 if (IS_ERR(channel))
412 return channel;
413
414 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
415 if (ret)
416 return ERR_PTR(ret);
417
418 return channel;
419 }
420 EXPORT_SYMBOL_GPL(devm_iio_channel_get);
421
devm_fwnode_iio_channel_get_by_name(struct device * dev,struct fwnode_handle * fwnode,const char * channel_name)422 struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev,
423 struct fwnode_handle *fwnode,
424 const char *channel_name)
425 {
426 struct iio_channel *channel;
427 int ret;
428
429 channel = fwnode_iio_channel_get_by_name(fwnode, channel_name);
430 if (IS_ERR(channel))
431 return channel;
432
433 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel);
434 if (ret)
435 return ERR_PTR(ret);
436
437 return channel;
438 }
439 EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name);
440
iio_channel_get_all(struct device * dev)441 struct iio_channel *iio_channel_get_all(struct device *dev)
442 {
443 const char *name;
444 struct iio_map_internal *c = NULL;
445 struct iio_channel *fw_chans;
446 int nummaps = 0;
447 int mapind = 0;
448 int i, ret;
449
450 if (!dev)
451 return ERR_PTR(-EINVAL);
452
453 fw_chans = fwnode_iio_channel_get_all(dev);
454 /*
455 * We only want to carry on if the error is -ENODEV. Anything else
456 * should be reported up the stack.
457 */
458 if (!IS_ERR(fw_chans) || PTR_ERR(fw_chans) != -ENODEV)
459 return fw_chans;
460
461 name = dev_name(dev);
462
463 guard(mutex)(&iio_map_list_lock);
464 /* first count the matching maps */
465 list_for_each_entry(c, &iio_map_list, l)
466 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
467 continue;
468 else
469 nummaps++;
470
471 if (nummaps == 0)
472 return ERR_PTR(-ENODEV);
473
474 /* NULL terminated array to save passing size */
475 struct iio_channel *chans __free(kfree) =
476 kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL);
477 if (!chans)
478 return ERR_PTR(-ENOMEM);
479
480 /* for each map fill in the chans element */
481 list_for_each_entry(c, &iio_map_list, l) {
482 if (name && strcmp(name, c->map->consumer_dev_name) != 0)
483 continue;
484 chans[mapind].indio_dev = c->indio_dev;
485 chans[mapind].data = c->map->consumer_data;
486 chans[mapind].channel =
487 iio_chan_spec_from_name(chans[mapind].indio_dev,
488 c->map->adc_channel_label);
489 if (!chans[mapind].channel) {
490 ret = -EINVAL;
491 goto error_free_chans;
492 }
493 iio_device_get(chans[mapind].indio_dev);
494 mapind++;
495 }
496 if (mapind == 0) {
497 ret = -ENODEV;
498 goto error_free_chans;
499 }
500
501 return_ptr(chans);
502
503 error_free_chans:
504 for (i = 0; i < mapind; i++)
505 iio_device_put(chans[i].indio_dev);
506 return ERR_PTR(ret);
507 }
508 EXPORT_SYMBOL_GPL(iio_channel_get_all);
509
iio_channel_release_all(struct iio_channel * channels)510 void iio_channel_release_all(struct iio_channel *channels)
511 {
512 struct iio_channel *chan = &channels[0];
513
514 while (chan->indio_dev) {
515 iio_device_put(chan->indio_dev);
516 chan++;
517 }
518 kfree(channels);
519 }
520 EXPORT_SYMBOL_GPL(iio_channel_release_all);
521
devm_iio_channel_free_all(void * iio_channels)522 static void devm_iio_channel_free_all(void *iio_channels)
523 {
524 iio_channel_release_all(iio_channels);
525 }
526
devm_iio_channel_get_all(struct device * dev)527 struct iio_channel *devm_iio_channel_get_all(struct device *dev)
528 {
529 struct iio_channel *channels;
530 int ret;
531
532 channels = iio_channel_get_all(dev);
533 if (IS_ERR(channels))
534 return channels;
535
536 ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all,
537 channels);
538 if (ret)
539 return ERR_PTR(ret);
540
541 return channels;
542 }
543 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
544
iio_channel_read(struct iio_channel * chan,int * val,int * val2,enum iio_chan_info_enum info)545 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
546 enum iio_chan_info_enum info)
547 {
548 const struct iio_info *iio_info = chan->indio_dev->info;
549 int unused;
550 int vals[INDIO_MAX_RAW_ELEMENTS];
551 int ret;
552 int val_len = 2;
553
554 if (!val2)
555 val2 = &unused;
556
557 if (!iio_channel_has_info(chan->channel, info))
558 return -EINVAL;
559
560 if (iio_info->read_raw_multi) {
561 ret = iio_info->read_raw_multi(chan->indio_dev,
562 chan->channel,
563 INDIO_MAX_RAW_ELEMENTS,
564 vals, &val_len, info);
565 *val = vals[0];
566 *val2 = vals[1];
567 } else if (iio_info->read_raw) {
568 ret = iio_info->read_raw(chan->indio_dev,
569 chan->channel, val, val2, info);
570 } else {
571 return -EINVAL;
572 }
573
574 return ret;
575 }
576
iio_read_channel_raw(struct iio_channel * chan,int * val)577 int iio_read_channel_raw(struct iio_channel *chan, int *val)
578 {
579 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
580
581 guard(mutex)(&iio_dev_opaque->info_exist_lock);
582 if (!chan->indio_dev->info)
583 return -ENODEV;
584
585 return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
586 }
587 EXPORT_SYMBOL_GPL(iio_read_channel_raw);
588
iio_read_channel_average_raw(struct iio_channel * chan,int * val)589 int iio_read_channel_average_raw(struct iio_channel *chan, int *val)
590 {
591 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
592
593 guard(mutex)(&iio_dev_opaque->info_exist_lock);
594 if (!chan->indio_dev->info)
595 return -ENODEV;
596
597 return iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW);
598 }
599 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw);
600
iio_convert_raw_to_processed_unlocked(struct iio_channel * chan,int raw,int * processed,unsigned int scale)601 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan,
602 int raw, int *processed,
603 unsigned int scale)
604 {
605 int scale_type, scale_val, scale_val2;
606 int offset_type, offset_val, offset_val2;
607 s64 raw64 = raw;
608
609 offset_type = iio_channel_read(chan, &offset_val, &offset_val2,
610 IIO_CHAN_INFO_OFFSET);
611 if (offset_type >= 0) {
612 switch (offset_type) {
613 case IIO_VAL_INT:
614 break;
615 case IIO_VAL_INT_PLUS_MICRO:
616 case IIO_VAL_INT_PLUS_NANO:
617 /*
618 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO
619 * implicitely truncate the offset to it's integer form.
620 */
621 break;
622 case IIO_VAL_FRACTIONAL:
623 offset_val /= offset_val2;
624 break;
625 case IIO_VAL_FRACTIONAL_LOG2:
626 offset_val >>= offset_val2;
627 break;
628 default:
629 return -EINVAL;
630 }
631
632 raw64 += offset_val;
633 }
634
635 scale_type = iio_channel_read(chan, &scale_val, &scale_val2,
636 IIO_CHAN_INFO_SCALE);
637 if (scale_type < 0) {
638 /*
639 * If no channel scaling is available apply consumer scale to
640 * raw value and return.
641 */
642 *processed = raw * scale;
643 return 0;
644 }
645
646 switch (scale_type) {
647 case IIO_VAL_INT:
648 *processed = raw64 * scale_val * scale;
649 break;
650 case IIO_VAL_INT_PLUS_MICRO:
651 if (scale_val2 < 0)
652 *processed = -raw64 * scale_val * scale;
653 else
654 *processed = raw64 * scale_val * scale;
655 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
656 1000000LL);
657 break;
658 case IIO_VAL_INT_PLUS_NANO:
659 if (scale_val2 < 0)
660 *processed = -raw64 * scale_val * scale;
661 else
662 *processed = raw64 * scale_val * scale;
663 *processed += div_s64(raw64 * (s64)scale_val2 * scale,
664 1000000000LL);
665 break;
666 case IIO_VAL_FRACTIONAL:
667 *processed = div_s64(raw64 * (s64)scale_val * scale,
668 scale_val2);
669 break;
670 case IIO_VAL_FRACTIONAL_LOG2:
671 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2;
672 break;
673 default:
674 return -EINVAL;
675 }
676
677 return 0;
678 }
679
iio_convert_raw_to_processed(struct iio_channel * chan,int raw,int * processed,unsigned int scale)680 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw,
681 int *processed, unsigned int scale)
682 {
683 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
684
685 guard(mutex)(&iio_dev_opaque->info_exist_lock);
686 if (!chan->indio_dev->info)
687 return -ENODEV;
688
689 return iio_convert_raw_to_processed_unlocked(chan, raw, processed,
690 scale);
691 }
692 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed);
693
iio_read_channel_attribute(struct iio_channel * chan,int * val,int * val2,enum iio_chan_info_enum attribute)694 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2,
695 enum iio_chan_info_enum attribute)
696 {
697 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
698
699 guard(mutex)(&iio_dev_opaque->info_exist_lock);
700 if (!chan->indio_dev->info)
701 return -ENODEV;
702
703 return iio_channel_read(chan, val, val2, attribute);
704 }
705 EXPORT_SYMBOL_GPL(iio_read_channel_attribute);
706
iio_read_channel_offset(struct iio_channel * chan,int * val,int * val2)707 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2)
708 {
709 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET);
710 }
711 EXPORT_SYMBOL_GPL(iio_read_channel_offset);
712
iio_read_channel_processed_scale(struct iio_channel * chan,int * val,unsigned int scale)713 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val,
714 unsigned int scale)
715 {
716 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
717 int ret;
718
719 guard(mutex)(&iio_dev_opaque->info_exist_lock);
720 if (!chan->indio_dev->info)
721 return -ENODEV;
722
723 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) {
724 ret = iio_channel_read(chan, val, NULL,
725 IIO_CHAN_INFO_PROCESSED);
726 if (ret < 0)
727 return ret;
728 *val *= scale;
729
730 return ret;
731 } else {
732 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW);
733 if (ret < 0)
734 return ret;
735
736 return iio_convert_raw_to_processed_unlocked(chan, *val, val,
737 scale);
738 }
739 }
740 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale);
741
iio_read_channel_processed(struct iio_channel * chan,int * val)742 int iio_read_channel_processed(struct iio_channel *chan, int *val)
743 {
744 /* This is just a special case with scale factor 1 */
745 return iio_read_channel_processed_scale(chan, val, 1);
746 }
747 EXPORT_SYMBOL_GPL(iio_read_channel_processed);
748
iio_read_channel_scale(struct iio_channel * chan,int * val,int * val2)749 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2)
750 {
751 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE);
752 }
753 EXPORT_SYMBOL_GPL(iio_read_channel_scale);
754
iio_channel_read_avail(struct iio_channel * chan,const int ** vals,int * type,int * length,enum iio_chan_info_enum info)755 static int iio_channel_read_avail(struct iio_channel *chan,
756 const int **vals, int *type, int *length,
757 enum iio_chan_info_enum info)
758 {
759 const struct iio_info *iio_info = chan->indio_dev->info;
760
761 if (!iio_channel_has_available(chan->channel, info))
762 return -EINVAL;
763
764 if (iio_info->read_avail)
765 return iio_info->read_avail(chan->indio_dev, chan->channel,
766 vals, type, length, info);
767 return -EINVAL;
768 }
769
iio_read_avail_channel_attribute(struct iio_channel * chan,const int ** vals,int * type,int * length,enum iio_chan_info_enum attribute)770 int iio_read_avail_channel_attribute(struct iio_channel *chan,
771 const int **vals, int *type, int *length,
772 enum iio_chan_info_enum attribute)
773 {
774 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
775
776 guard(mutex)(&iio_dev_opaque->info_exist_lock);
777 if (!chan->indio_dev->info)
778 return -ENODEV;
779
780 return iio_channel_read_avail(chan, vals, type, length, attribute);
781 }
782 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute);
783
iio_read_avail_channel_raw(struct iio_channel * chan,const int ** vals,int * length)784 int iio_read_avail_channel_raw(struct iio_channel *chan,
785 const int **vals, int *length)
786 {
787 int ret;
788 int type;
789
790 ret = iio_read_avail_channel_attribute(chan, vals, &type, length,
791 IIO_CHAN_INFO_RAW);
792
793 if (ret >= 0 && type != IIO_VAL_INT)
794 /* raw values are assumed to be IIO_VAL_INT */
795 ret = -EINVAL;
796
797 return ret;
798 }
799 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw);
800
iio_channel_read_max(struct iio_channel * chan,int * val,int * val2,int * type,enum iio_chan_info_enum info)801 static int iio_channel_read_max(struct iio_channel *chan,
802 int *val, int *val2, int *type,
803 enum iio_chan_info_enum info)
804 {
805 const int *vals;
806 int length;
807 int ret;
808
809 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
810 if (ret < 0)
811 return ret;
812
813 switch (ret) {
814 case IIO_AVAIL_RANGE:
815 switch (*type) {
816 case IIO_VAL_INT:
817 *val = vals[2];
818 break;
819 default:
820 *val = vals[4];
821 if (val2)
822 *val2 = vals[5];
823 }
824 return 0;
825
826 case IIO_AVAIL_LIST:
827 if (length <= 0)
828 return -EINVAL;
829 switch (*type) {
830 case IIO_VAL_INT:
831 *val = max_array(vals, length);
832 break;
833 default:
834 /* TODO: learn about max for other iio values */
835 return -EINVAL;
836 }
837 return 0;
838
839 default:
840 return -EINVAL;
841 }
842 }
843
iio_read_max_channel_raw(struct iio_channel * chan,int * val)844 int iio_read_max_channel_raw(struct iio_channel *chan, int *val)
845 {
846 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
847 int type;
848
849 guard(mutex)(&iio_dev_opaque->info_exist_lock);
850 if (!chan->indio_dev->info)
851 return -ENODEV;
852
853 return iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
854 }
855 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw);
856
iio_channel_read_min(struct iio_channel * chan,int * val,int * val2,int * type,enum iio_chan_info_enum info)857 static int iio_channel_read_min(struct iio_channel *chan,
858 int *val, int *val2, int *type,
859 enum iio_chan_info_enum info)
860 {
861 const int *vals;
862 int length;
863 int ret;
864
865 ret = iio_channel_read_avail(chan, &vals, type, &length, info);
866 if (ret < 0)
867 return ret;
868
869 switch (ret) {
870 case IIO_AVAIL_RANGE:
871 switch (*type) {
872 case IIO_VAL_INT:
873 *val = vals[0];
874 break;
875 default:
876 *val = vals[0];
877 if (val2)
878 *val2 = vals[1];
879 }
880 return 0;
881
882 case IIO_AVAIL_LIST:
883 if (length <= 0)
884 return -EINVAL;
885 switch (*type) {
886 case IIO_VAL_INT:
887 *val = min_array(vals, length);
888 break;
889 default:
890 /* TODO: learn about min for other iio values */
891 return -EINVAL;
892 }
893 return 0;
894
895 default:
896 return -EINVAL;
897 }
898 }
899
iio_read_min_channel_raw(struct iio_channel * chan,int * val)900 int iio_read_min_channel_raw(struct iio_channel *chan, int *val)
901 {
902 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
903 int type;
904
905 guard(mutex)(&iio_dev_opaque->info_exist_lock);
906 if (!chan->indio_dev->info)
907 return -ENODEV;
908
909 return iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW);
910 }
911 EXPORT_SYMBOL_GPL(iio_read_min_channel_raw);
912
iio_get_channel_type(struct iio_channel * chan,enum iio_chan_type * type)913 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type)
914 {
915 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
916
917 guard(mutex)(&iio_dev_opaque->info_exist_lock);
918 if (!chan->indio_dev->info)
919 return -ENODEV;
920
921 *type = chan->channel->type;
922
923 return 0;
924 }
925 EXPORT_SYMBOL_GPL(iio_get_channel_type);
926
iio_channel_write(struct iio_channel * chan,int val,int val2,enum iio_chan_info_enum info)927 static int iio_channel_write(struct iio_channel *chan, int val, int val2,
928 enum iio_chan_info_enum info)
929 {
930 const struct iio_info *iio_info = chan->indio_dev->info;
931
932 if (iio_info->write_raw)
933 return iio_info->write_raw(chan->indio_dev,
934 chan->channel, val, val2, info);
935 return -EINVAL;
936 }
937
iio_write_channel_attribute(struct iio_channel * chan,int val,int val2,enum iio_chan_info_enum attribute)938 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2,
939 enum iio_chan_info_enum attribute)
940 {
941 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev);
942
943 guard(mutex)(&iio_dev_opaque->info_exist_lock);
944 if (!chan->indio_dev->info)
945 return -ENODEV;
946
947 return iio_channel_write(chan, val, val2, attribute);
948 }
949 EXPORT_SYMBOL_GPL(iio_write_channel_attribute);
950
iio_write_channel_raw(struct iio_channel * chan,int val)951 int iio_write_channel_raw(struct iio_channel *chan, int val)
952 {
953 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW);
954 }
955 EXPORT_SYMBOL_GPL(iio_write_channel_raw);
956
iio_get_channel_ext_info_count(struct iio_channel * chan)957 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan)
958 {
959 const struct iio_chan_spec_ext_info *ext_info;
960 unsigned int i = 0;
961
962 if (!chan->channel->ext_info)
963 return i;
964
965 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++)
966 ++i;
967
968 return i;
969 }
970 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count);
971
972 static const struct iio_chan_spec_ext_info *
iio_lookup_ext_info(const struct iio_channel * chan,const char * attr)973 iio_lookup_ext_info(const struct iio_channel *chan, const char *attr)
974 {
975 const struct iio_chan_spec_ext_info *ext_info;
976
977 if (!chan->channel->ext_info)
978 return NULL;
979
980 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) {
981 if (!strcmp(attr, ext_info->name))
982 return ext_info;
983 }
984
985 return NULL;
986 }
987
iio_read_channel_ext_info(struct iio_channel * chan,const char * attr,char * buf)988 ssize_t iio_read_channel_ext_info(struct iio_channel *chan,
989 const char *attr, char *buf)
990 {
991 const struct iio_chan_spec_ext_info *ext_info;
992
993 if (!buf || offset_in_page(buf)) {
994 pr_err("iio: invalid ext_info read buffer\n");
995 return -EINVAL;
996 }
997
998 ext_info = iio_lookup_ext_info(chan, attr);
999 if (!ext_info)
1000 return -EINVAL;
1001
1002 return ext_info->read(chan->indio_dev, ext_info->private,
1003 chan->channel, buf);
1004 }
1005 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info);
1006
iio_write_channel_ext_info(struct iio_channel * chan,const char * attr,const char * buf,size_t len)1007 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr,
1008 const char *buf, size_t len)
1009 {
1010 const struct iio_chan_spec_ext_info *ext_info;
1011
1012 ext_info = iio_lookup_ext_info(chan, attr);
1013 if (!ext_info)
1014 return -EINVAL;
1015
1016 return ext_info->write(chan->indio_dev, ext_info->private,
1017 chan->channel, buf, len);
1018 }
1019 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info);
1020
iio_read_channel_label(struct iio_channel * chan,char * buf)1021 ssize_t iio_read_channel_label(struct iio_channel *chan, char *buf)
1022 {
1023 if (!buf || offset_in_page(buf)) {
1024 pr_err("iio: invalid label read buffer\n");
1025 return -EINVAL;
1026 }
1027
1028 return do_iio_read_channel_label(chan->indio_dev, chan->channel, buf);
1029 }
1030 EXPORT_SYMBOL_GPL(iio_read_channel_label);
1031