1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core in kernel channel mapping 3 * 4 * Copyright (c) 2011 Jonathan Cameron 5 */ 6 #include <linux/err.h> 7 #include <linux/export.h> 8 #include <linux/minmax.h> 9 #include <linux/mutex.h> 10 #include <linux/property.h> 11 #include <linux/slab.h> 12 13 #include <linux/iio/iio.h> 14 #include <linux/iio/iio-opaque.h> 15 #include "iio_core.h" 16 #include <linux/iio/machine.h> 17 #include <linux/iio/driver.h> 18 #include <linux/iio/consumer.h> 19 20 struct iio_map_internal { 21 struct iio_dev *indio_dev; 22 struct iio_map *map; 23 struct list_head l; 24 }; 25 26 static LIST_HEAD(iio_map_list); 27 static DEFINE_MUTEX(iio_map_list_lock); 28 29 static int iio_map_array_unregister_locked(struct iio_dev *indio_dev) 30 { 31 int ret = -ENODEV; 32 struct iio_map_internal *mapi, *next; 33 34 list_for_each_entry_safe(mapi, next, &iio_map_list, l) { 35 if (indio_dev == mapi->indio_dev) { 36 list_del(&mapi->l); 37 kfree(mapi); 38 ret = 0; 39 } 40 } 41 return ret; 42 } 43 44 int iio_map_array_register(struct iio_dev *indio_dev, struct iio_map *maps) 45 { 46 int i = 0, ret = 0; 47 struct iio_map_internal *mapi; 48 49 if (!maps) 50 return 0; 51 52 mutex_lock(&iio_map_list_lock); 53 while (maps[i].consumer_dev_name) { 54 mapi = kzalloc(sizeof(*mapi), GFP_KERNEL); 55 if (!mapi) { 56 ret = -ENOMEM; 57 goto error_ret; 58 } 59 mapi->map = &maps[i]; 60 mapi->indio_dev = indio_dev; 61 list_add_tail(&mapi->l, &iio_map_list); 62 i++; 63 } 64 error_ret: 65 if (ret) 66 iio_map_array_unregister_locked(indio_dev); 67 mutex_unlock(&iio_map_list_lock); 68 69 return ret; 70 } 71 EXPORT_SYMBOL_GPL(iio_map_array_register); 72 73 /* 74 * Remove all map entries associated with the given iio device 75 */ 76 int iio_map_array_unregister(struct iio_dev *indio_dev) 77 { 78 int ret; 79 80 mutex_lock(&iio_map_list_lock); 81 ret = iio_map_array_unregister_locked(indio_dev); 82 mutex_unlock(&iio_map_list_lock); 83 84 return ret; 85 } 86 EXPORT_SYMBOL_GPL(iio_map_array_unregister); 87 88 static void iio_map_array_unregister_cb(void *indio_dev) 89 { 90 iio_map_array_unregister(indio_dev); 91 } 92 93 int devm_iio_map_array_register(struct device *dev, struct iio_dev *indio_dev, struct iio_map *maps) 94 { 95 int ret; 96 97 ret = iio_map_array_register(indio_dev, maps); 98 if (ret) 99 return ret; 100 101 return devm_add_action_or_reset(dev, iio_map_array_unregister_cb, indio_dev); 102 } 103 EXPORT_SYMBOL_GPL(devm_iio_map_array_register); 104 105 static const struct iio_chan_spec 106 *iio_chan_spec_from_name(const struct iio_dev *indio_dev, const char *name) 107 { 108 int i; 109 const struct iio_chan_spec *chan = NULL; 110 111 for (i = 0; i < indio_dev->num_channels; i++) 112 if (indio_dev->channels[i].datasheet_name && 113 strcmp(name, indio_dev->channels[i].datasheet_name) == 0) { 114 chan = &indio_dev->channels[i]; 115 break; 116 } 117 return chan; 118 } 119 120 /** 121 * __fwnode_iio_simple_xlate - translate iiospec to the IIO channel index 122 * @indio_dev: pointer to the iio_dev structure 123 * @iiospec: IIO specifier as found in the device tree 124 * 125 * This is simple translation function, suitable for the most 1:1 mapped 126 * channels in IIO chips. This function performs only one sanity check: 127 * whether IIO index is less than num_channels (that is specified in the 128 * iio_dev). 129 */ 130 static int __fwnode_iio_simple_xlate(struct iio_dev *indio_dev, 131 const struct fwnode_reference_args *iiospec) 132 { 133 if (!iiospec->nargs) 134 return 0; 135 136 if (iiospec->args[0] >= indio_dev->num_channels) { 137 dev_err(&indio_dev->dev, "invalid channel index %llu\n", 138 iiospec->args[0]); 139 return -EINVAL; 140 } 141 142 return iiospec->args[0]; 143 } 144 145 static int __fwnode_iio_channel_get(struct iio_channel *channel, 146 struct fwnode_handle *fwnode, int index) 147 { 148 struct fwnode_reference_args iiospec; 149 struct device *idev; 150 struct iio_dev *indio_dev; 151 int err; 152 153 err = fwnode_property_get_reference_args(fwnode, "io-channels", 154 "#io-channel-cells", 0, 155 index, &iiospec); 156 if (err) 157 return err; 158 159 idev = bus_find_device_by_fwnode(&iio_bus_type, iiospec.fwnode); 160 if (!idev) { 161 fwnode_handle_put(iiospec.fwnode); 162 return -EPROBE_DEFER; 163 } 164 165 indio_dev = dev_to_iio_dev(idev); 166 channel->indio_dev = indio_dev; 167 if (indio_dev->info->fwnode_xlate) 168 index = indio_dev->info->fwnode_xlate(indio_dev, &iiospec); 169 else 170 index = __fwnode_iio_simple_xlate(indio_dev, &iiospec); 171 fwnode_handle_put(iiospec.fwnode); 172 if (index < 0) 173 goto err_put; 174 channel->channel = &indio_dev->channels[index]; 175 176 return 0; 177 178 err_put: 179 iio_device_put(indio_dev); 180 return index; 181 } 182 183 static struct iio_channel *fwnode_iio_channel_get(struct fwnode_handle *fwnode, 184 int index) 185 { 186 struct iio_channel *channel; 187 int err; 188 189 if (index < 0) 190 return ERR_PTR(-EINVAL); 191 192 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 193 if (!channel) 194 return ERR_PTR(-ENOMEM); 195 196 err = __fwnode_iio_channel_get(channel, fwnode, index); 197 if (err) 198 goto err_free_channel; 199 200 return channel; 201 202 err_free_channel: 203 kfree(channel); 204 return ERR_PTR(err); 205 } 206 207 static struct iio_channel * 208 __fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, const char *name) 209 { 210 struct iio_channel *chan; 211 int index = 0; 212 213 /* 214 * For named iio channels, first look up the name in the 215 * "io-channel-names" property. If it cannot be found, the 216 * index will be an error code, and fwnode_iio_channel_get() 217 * will fail. 218 */ 219 if (name) 220 index = fwnode_property_match_string(fwnode, "io-channel-names", 221 name); 222 223 chan = fwnode_iio_channel_get(fwnode, index); 224 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) 225 return chan; 226 if (name) { 227 if (index >= 0) { 228 pr_err("ERROR: could not get IIO channel %pfw:%s(%i)\n", 229 fwnode, name, index); 230 /* 231 * In this case, we found 'name' in 'io-channel-names' 232 * but somehow we still fail so that we should not proceed 233 * with any other lookup. Hence, explicitly return -EINVAL 234 * (maybe not the better error code) so that the caller 235 * won't do a system lookup. 236 */ 237 return ERR_PTR(-EINVAL); 238 } 239 /* 240 * If index < 0, then fwnode_property_get_reference_args() fails 241 * with -EINVAL or -ENOENT (ACPI case) which is expected. We 242 * should not proceed if we get any other error. 243 */ 244 if (PTR_ERR(chan) != -EINVAL && PTR_ERR(chan) != -ENOENT) 245 return chan; 246 } else if (PTR_ERR(chan) != -ENOENT) { 247 /* 248 * if !name, then we should only proceed the lookup if 249 * fwnode_property_get_reference_args() returns -ENOENT. 250 */ 251 return chan; 252 } 253 254 /* so we continue the lookup */ 255 return ERR_PTR(-ENODEV); 256 } 257 258 struct iio_channel *fwnode_iio_channel_get_by_name(struct fwnode_handle *fwnode, 259 const char *name) 260 { 261 struct fwnode_handle *parent; 262 struct iio_channel *chan; 263 264 /* Walk up the tree of devices looking for a matching iio channel */ 265 chan = __fwnode_iio_channel_get_by_name(fwnode, name); 266 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) 267 return chan; 268 269 /* 270 * No matching IIO channel found on this node. 271 * If the parent node has a "io-channel-ranges" property, 272 * then we can try one of its channels. 273 */ 274 fwnode_for_each_parent_node(fwnode, parent) { 275 if (!fwnode_property_present(parent, "io-channel-ranges")) { 276 fwnode_handle_put(parent); 277 return ERR_PTR(-ENODEV); 278 } 279 280 chan = __fwnode_iio_channel_get_by_name(fwnode, name); 281 if (!IS_ERR(chan) || PTR_ERR(chan) != -ENODEV) { 282 fwnode_handle_put(parent); 283 return chan; 284 } 285 } 286 287 return ERR_PTR(-ENODEV); 288 } 289 EXPORT_SYMBOL_GPL(fwnode_iio_channel_get_by_name); 290 291 static struct iio_channel *fwnode_iio_channel_get_all(struct device *dev) 292 { 293 struct fwnode_handle *fwnode = dev_fwnode(dev); 294 struct iio_channel *chans; 295 int i, mapind, nummaps = 0; 296 int ret; 297 298 do { 299 ret = fwnode_property_get_reference_args(fwnode, "io-channels", 300 "#io-channel-cells", 0, 301 nummaps, NULL); 302 if (ret < 0) 303 break; 304 } while (++nummaps); 305 306 if (nummaps == 0) 307 return ERR_PTR(-ENODEV); 308 309 /* NULL terminated array to save passing size */ 310 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL); 311 if (!chans) 312 return ERR_PTR(-ENOMEM); 313 314 /* Search for FW matches */ 315 for (mapind = 0; mapind < nummaps; mapind++) { 316 ret = __fwnode_iio_channel_get(&chans[mapind], fwnode, mapind); 317 if (ret) 318 goto error_free_chans; 319 } 320 return chans; 321 322 error_free_chans: 323 for (i = 0; i < mapind; i++) 324 iio_device_put(chans[i].indio_dev); 325 kfree(chans); 326 return ERR_PTR(ret); 327 } 328 329 static struct iio_channel *iio_channel_get_sys(const char *name, 330 const char *channel_name) 331 { 332 struct iio_map_internal *c_i = NULL, *c = NULL; 333 struct iio_channel *channel; 334 int err; 335 336 if (!(name || channel_name)) 337 return ERR_PTR(-ENODEV); 338 339 /* first find matching entry the channel map */ 340 mutex_lock(&iio_map_list_lock); 341 list_for_each_entry(c_i, &iio_map_list, l) { 342 if ((name && strcmp(name, c_i->map->consumer_dev_name) != 0) || 343 (channel_name && 344 strcmp(channel_name, c_i->map->consumer_channel) != 0)) 345 continue; 346 c = c_i; 347 iio_device_get(c->indio_dev); 348 break; 349 } 350 mutex_unlock(&iio_map_list_lock); 351 if (!c) 352 return ERR_PTR(-ENODEV); 353 354 channel = kzalloc(sizeof(*channel), GFP_KERNEL); 355 if (!channel) { 356 err = -ENOMEM; 357 goto error_no_mem; 358 } 359 360 channel->indio_dev = c->indio_dev; 361 362 if (c->map->adc_channel_label) { 363 channel->channel = 364 iio_chan_spec_from_name(channel->indio_dev, 365 c->map->adc_channel_label); 366 367 if (!channel->channel) { 368 err = -EINVAL; 369 goto error_no_chan; 370 } 371 } 372 373 return channel; 374 375 error_no_chan: 376 kfree(channel); 377 error_no_mem: 378 iio_device_put(c->indio_dev); 379 return ERR_PTR(err); 380 } 381 382 struct iio_channel *iio_channel_get(struct device *dev, 383 const char *channel_name) 384 { 385 const char *name = dev ? dev_name(dev) : NULL; 386 struct iio_channel *channel; 387 388 if (dev) { 389 channel = fwnode_iio_channel_get_by_name(dev_fwnode(dev), 390 channel_name); 391 if (!IS_ERR(channel) || PTR_ERR(channel) != -ENODEV) 392 return channel; 393 } 394 395 return iio_channel_get_sys(name, channel_name); 396 } 397 EXPORT_SYMBOL_GPL(iio_channel_get); 398 399 void iio_channel_release(struct iio_channel *channel) 400 { 401 if (!channel) 402 return; 403 iio_device_put(channel->indio_dev); 404 kfree(channel); 405 } 406 EXPORT_SYMBOL_GPL(iio_channel_release); 407 408 static void devm_iio_channel_free(void *iio_channel) 409 { 410 iio_channel_release(iio_channel); 411 } 412 413 struct iio_channel *devm_iio_channel_get(struct device *dev, 414 const char *channel_name) 415 { 416 struct iio_channel *channel; 417 int ret; 418 419 channel = iio_channel_get(dev, channel_name); 420 if (IS_ERR(channel)) 421 return channel; 422 423 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel); 424 if (ret) 425 return ERR_PTR(ret); 426 427 return channel; 428 } 429 EXPORT_SYMBOL_GPL(devm_iio_channel_get); 430 431 struct iio_channel *devm_fwnode_iio_channel_get_by_name(struct device *dev, 432 struct fwnode_handle *fwnode, 433 const char *channel_name) 434 { 435 struct iio_channel *channel; 436 int ret; 437 438 channel = fwnode_iio_channel_get_by_name(fwnode, channel_name); 439 if (IS_ERR(channel)) 440 return channel; 441 442 ret = devm_add_action_or_reset(dev, devm_iio_channel_free, channel); 443 if (ret) 444 return ERR_PTR(ret); 445 446 return channel; 447 } 448 EXPORT_SYMBOL_GPL(devm_fwnode_iio_channel_get_by_name); 449 450 struct iio_channel *iio_channel_get_all(struct device *dev) 451 { 452 const char *name; 453 struct iio_channel *chans; 454 struct iio_map_internal *c = NULL; 455 int nummaps = 0; 456 int mapind = 0; 457 int i, ret; 458 459 if (!dev) 460 return ERR_PTR(-EINVAL); 461 462 chans = fwnode_iio_channel_get_all(dev); 463 /* 464 * We only want to carry on if the error is -ENODEV. Anything else 465 * should be reported up the stack. 466 */ 467 if (!IS_ERR(chans) || PTR_ERR(chans) != -ENODEV) 468 return chans; 469 470 name = dev_name(dev); 471 472 mutex_lock(&iio_map_list_lock); 473 /* first count the matching maps */ 474 list_for_each_entry(c, &iio_map_list, l) 475 if (name && strcmp(name, c->map->consumer_dev_name) != 0) 476 continue; 477 else 478 nummaps++; 479 480 if (nummaps == 0) { 481 ret = -ENODEV; 482 goto error_ret; 483 } 484 485 /* NULL terminated array to save passing size */ 486 chans = kcalloc(nummaps + 1, sizeof(*chans), GFP_KERNEL); 487 if (!chans) { 488 ret = -ENOMEM; 489 goto error_ret; 490 } 491 492 /* for each map fill in the chans element */ 493 list_for_each_entry(c, &iio_map_list, l) { 494 if (name && strcmp(name, c->map->consumer_dev_name) != 0) 495 continue; 496 chans[mapind].indio_dev = c->indio_dev; 497 chans[mapind].data = c->map->consumer_data; 498 chans[mapind].channel = 499 iio_chan_spec_from_name(chans[mapind].indio_dev, 500 c->map->adc_channel_label); 501 if (!chans[mapind].channel) { 502 ret = -EINVAL; 503 goto error_free_chans; 504 } 505 iio_device_get(chans[mapind].indio_dev); 506 mapind++; 507 } 508 if (mapind == 0) { 509 ret = -ENODEV; 510 goto error_free_chans; 511 } 512 mutex_unlock(&iio_map_list_lock); 513 514 return chans; 515 516 error_free_chans: 517 for (i = 0; i < nummaps; i++) 518 iio_device_put(chans[i].indio_dev); 519 kfree(chans); 520 error_ret: 521 mutex_unlock(&iio_map_list_lock); 522 523 return ERR_PTR(ret); 524 } 525 EXPORT_SYMBOL_GPL(iio_channel_get_all); 526 527 void iio_channel_release_all(struct iio_channel *channels) 528 { 529 struct iio_channel *chan = &channels[0]; 530 531 while (chan->indio_dev) { 532 iio_device_put(chan->indio_dev); 533 chan++; 534 } 535 kfree(channels); 536 } 537 EXPORT_SYMBOL_GPL(iio_channel_release_all); 538 539 static void devm_iio_channel_free_all(void *iio_channels) 540 { 541 iio_channel_release_all(iio_channels); 542 } 543 544 struct iio_channel *devm_iio_channel_get_all(struct device *dev) 545 { 546 struct iio_channel *channels; 547 int ret; 548 549 channels = iio_channel_get_all(dev); 550 if (IS_ERR(channels)) 551 return channels; 552 553 ret = devm_add_action_or_reset(dev, devm_iio_channel_free_all, 554 channels); 555 if (ret) 556 return ERR_PTR(ret); 557 558 return channels; 559 } 560 EXPORT_SYMBOL_GPL(devm_iio_channel_get_all); 561 562 static int iio_channel_read(struct iio_channel *chan, int *val, int *val2, 563 enum iio_chan_info_enum info) 564 { 565 int unused; 566 int vals[INDIO_MAX_RAW_ELEMENTS]; 567 int ret; 568 int val_len = 2; 569 570 if (!val2) 571 val2 = &unused; 572 573 if (!iio_channel_has_info(chan->channel, info)) 574 return -EINVAL; 575 576 if (chan->indio_dev->info->read_raw_multi) { 577 ret = chan->indio_dev->info->read_raw_multi(chan->indio_dev, 578 chan->channel, INDIO_MAX_RAW_ELEMENTS, 579 vals, &val_len, info); 580 *val = vals[0]; 581 *val2 = vals[1]; 582 } else { 583 ret = chan->indio_dev->info->read_raw(chan->indio_dev, 584 chan->channel, val, val2, info); 585 } 586 587 return ret; 588 } 589 590 int iio_read_channel_raw(struct iio_channel *chan, int *val) 591 { 592 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 593 int ret; 594 595 mutex_lock(&iio_dev_opaque->info_exist_lock); 596 if (!chan->indio_dev->info) { 597 ret = -ENODEV; 598 goto err_unlock; 599 } 600 601 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); 602 err_unlock: 603 mutex_unlock(&iio_dev_opaque->info_exist_lock); 604 605 return ret; 606 } 607 EXPORT_SYMBOL_GPL(iio_read_channel_raw); 608 609 int iio_read_channel_average_raw(struct iio_channel *chan, int *val) 610 { 611 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 612 int ret; 613 614 mutex_lock(&iio_dev_opaque->info_exist_lock); 615 if (!chan->indio_dev->info) { 616 ret = -ENODEV; 617 goto err_unlock; 618 } 619 620 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_AVERAGE_RAW); 621 err_unlock: 622 mutex_unlock(&iio_dev_opaque->info_exist_lock); 623 624 return ret; 625 } 626 EXPORT_SYMBOL_GPL(iio_read_channel_average_raw); 627 628 static int iio_convert_raw_to_processed_unlocked(struct iio_channel *chan, 629 int raw, int *processed, 630 unsigned int scale) 631 { 632 int scale_type, scale_val, scale_val2; 633 int offset_type, offset_val, offset_val2; 634 s64 raw64 = raw; 635 636 offset_type = iio_channel_read(chan, &offset_val, &offset_val2, 637 IIO_CHAN_INFO_OFFSET); 638 if (offset_type >= 0) { 639 switch (offset_type) { 640 case IIO_VAL_INT: 641 break; 642 case IIO_VAL_INT_PLUS_MICRO: 643 case IIO_VAL_INT_PLUS_NANO: 644 /* 645 * Both IIO_VAL_INT_PLUS_MICRO and IIO_VAL_INT_PLUS_NANO 646 * implicitely truncate the offset to it's integer form. 647 */ 648 break; 649 case IIO_VAL_FRACTIONAL: 650 offset_val /= offset_val2; 651 break; 652 case IIO_VAL_FRACTIONAL_LOG2: 653 offset_val >>= offset_val2; 654 break; 655 default: 656 return -EINVAL; 657 } 658 659 raw64 += offset_val; 660 } 661 662 scale_type = iio_channel_read(chan, &scale_val, &scale_val2, 663 IIO_CHAN_INFO_SCALE); 664 if (scale_type < 0) { 665 /* 666 * If no channel scaling is available apply consumer scale to 667 * raw value and return. 668 */ 669 *processed = raw * scale; 670 return 0; 671 } 672 673 switch (scale_type) { 674 case IIO_VAL_INT: 675 *processed = raw64 * scale_val * scale; 676 break; 677 case IIO_VAL_INT_PLUS_MICRO: 678 if (scale_val2 < 0) 679 *processed = -raw64 * scale_val; 680 else 681 *processed = raw64 * scale_val; 682 *processed += div_s64(raw64 * (s64)scale_val2 * scale, 683 1000000LL); 684 break; 685 case IIO_VAL_INT_PLUS_NANO: 686 if (scale_val2 < 0) 687 *processed = -raw64 * scale_val; 688 else 689 *processed = raw64 * scale_val; 690 *processed += div_s64(raw64 * (s64)scale_val2 * scale, 691 1000000000LL); 692 break; 693 case IIO_VAL_FRACTIONAL: 694 *processed = div_s64(raw64 * (s64)scale_val * scale, 695 scale_val2); 696 break; 697 case IIO_VAL_FRACTIONAL_LOG2: 698 *processed = (raw64 * (s64)scale_val * scale) >> scale_val2; 699 break; 700 default: 701 return -EINVAL; 702 } 703 704 return 0; 705 } 706 707 int iio_convert_raw_to_processed(struct iio_channel *chan, int raw, 708 int *processed, unsigned int scale) 709 { 710 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 711 int ret; 712 713 mutex_lock(&iio_dev_opaque->info_exist_lock); 714 if (!chan->indio_dev->info) { 715 ret = -ENODEV; 716 goto err_unlock; 717 } 718 719 ret = iio_convert_raw_to_processed_unlocked(chan, raw, processed, 720 scale); 721 err_unlock: 722 mutex_unlock(&iio_dev_opaque->info_exist_lock); 723 724 return ret; 725 } 726 EXPORT_SYMBOL_GPL(iio_convert_raw_to_processed); 727 728 int iio_read_channel_attribute(struct iio_channel *chan, int *val, int *val2, 729 enum iio_chan_info_enum attribute) 730 { 731 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 732 int ret; 733 734 mutex_lock(&iio_dev_opaque->info_exist_lock); 735 if (!chan->indio_dev->info) { 736 ret = -ENODEV; 737 goto err_unlock; 738 } 739 740 ret = iio_channel_read(chan, val, val2, attribute); 741 err_unlock: 742 mutex_unlock(&iio_dev_opaque->info_exist_lock); 743 744 return ret; 745 } 746 EXPORT_SYMBOL_GPL(iio_read_channel_attribute); 747 748 int iio_read_channel_offset(struct iio_channel *chan, int *val, int *val2) 749 { 750 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_OFFSET); 751 } 752 EXPORT_SYMBOL_GPL(iio_read_channel_offset); 753 754 int iio_read_channel_processed_scale(struct iio_channel *chan, int *val, 755 unsigned int scale) 756 { 757 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 758 int ret; 759 760 mutex_lock(&iio_dev_opaque->info_exist_lock); 761 if (!chan->indio_dev->info) { 762 ret = -ENODEV; 763 goto err_unlock; 764 } 765 766 if (iio_channel_has_info(chan->channel, IIO_CHAN_INFO_PROCESSED)) { 767 ret = iio_channel_read(chan, val, NULL, 768 IIO_CHAN_INFO_PROCESSED); 769 if (ret < 0) 770 goto err_unlock; 771 *val *= scale; 772 } else { 773 ret = iio_channel_read(chan, val, NULL, IIO_CHAN_INFO_RAW); 774 if (ret < 0) 775 goto err_unlock; 776 ret = iio_convert_raw_to_processed_unlocked(chan, *val, val, 777 scale); 778 } 779 780 err_unlock: 781 mutex_unlock(&iio_dev_opaque->info_exist_lock); 782 783 return ret; 784 } 785 EXPORT_SYMBOL_GPL(iio_read_channel_processed_scale); 786 787 int iio_read_channel_processed(struct iio_channel *chan, int *val) 788 { 789 /* This is just a special case with scale factor 1 */ 790 return iio_read_channel_processed_scale(chan, val, 1); 791 } 792 EXPORT_SYMBOL_GPL(iio_read_channel_processed); 793 794 int iio_read_channel_scale(struct iio_channel *chan, int *val, int *val2) 795 { 796 return iio_read_channel_attribute(chan, val, val2, IIO_CHAN_INFO_SCALE); 797 } 798 EXPORT_SYMBOL_GPL(iio_read_channel_scale); 799 800 static int iio_channel_read_avail(struct iio_channel *chan, 801 const int **vals, int *type, int *length, 802 enum iio_chan_info_enum info) 803 { 804 if (!iio_channel_has_available(chan->channel, info)) 805 return -EINVAL; 806 807 return chan->indio_dev->info->read_avail(chan->indio_dev, chan->channel, 808 vals, type, length, info); 809 } 810 811 int iio_read_avail_channel_attribute(struct iio_channel *chan, 812 const int **vals, int *type, int *length, 813 enum iio_chan_info_enum attribute) 814 { 815 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 816 int ret; 817 818 mutex_lock(&iio_dev_opaque->info_exist_lock); 819 if (!chan->indio_dev->info) { 820 ret = -ENODEV; 821 goto err_unlock; 822 } 823 824 ret = iio_channel_read_avail(chan, vals, type, length, attribute); 825 err_unlock: 826 mutex_unlock(&iio_dev_opaque->info_exist_lock); 827 828 return ret; 829 } 830 EXPORT_SYMBOL_GPL(iio_read_avail_channel_attribute); 831 832 int iio_read_avail_channel_raw(struct iio_channel *chan, 833 const int **vals, int *length) 834 { 835 int ret; 836 int type; 837 838 ret = iio_read_avail_channel_attribute(chan, vals, &type, length, 839 IIO_CHAN_INFO_RAW); 840 841 if (ret >= 0 && type != IIO_VAL_INT) 842 /* raw values are assumed to be IIO_VAL_INT */ 843 ret = -EINVAL; 844 845 return ret; 846 } 847 EXPORT_SYMBOL_GPL(iio_read_avail_channel_raw); 848 849 static int iio_channel_read_max(struct iio_channel *chan, 850 int *val, int *val2, int *type, 851 enum iio_chan_info_enum info) 852 { 853 const int *vals; 854 int length; 855 int ret; 856 857 ret = iio_channel_read_avail(chan, &vals, type, &length, info); 858 if (ret < 0) 859 return ret; 860 861 switch (ret) { 862 case IIO_AVAIL_RANGE: 863 switch (*type) { 864 case IIO_VAL_INT: 865 *val = vals[2]; 866 break; 867 default: 868 *val = vals[4]; 869 if (val2) 870 *val2 = vals[5]; 871 } 872 return 0; 873 874 case IIO_AVAIL_LIST: 875 if (length <= 0) 876 return -EINVAL; 877 switch (*type) { 878 case IIO_VAL_INT: 879 *val = max_array(vals, length); 880 break; 881 default: 882 /* TODO: learn about max for other iio values */ 883 return -EINVAL; 884 } 885 return 0; 886 887 default: 888 return -EINVAL; 889 } 890 } 891 892 int iio_read_max_channel_raw(struct iio_channel *chan, int *val) 893 { 894 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 895 int ret; 896 int type; 897 898 mutex_lock(&iio_dev_opaque->info_exist_lock); 899 if (!chan->indio_dev->info) { 900 ret = -ENODEV; 901 goto err_unlock; 902 } 903 904 ret = iio_channel_read_max(chan, val, NULL, &type, IIO_CHAN_INFO_RAW); 905 err_unlock: 906 mutex_unlock(&iio_dev_opaque->info_exist_lock); 907 908 return ret; 909 } 910 EXPORT_SYMBOL_GPL(iio_read_max_channel_raw); 911 912 static int iio_channel_read_min(struct iio_channel *chan, 913 int *val, int *val2, int *type, 914 enum iio_chan_info_enum info) 915 { 916 const int *vals; 917 int length; 918 int ret; 919 920 ret = iio_channel_read_avail(chan, &vals, type, &length, info); 921 if (ret < 0) 922 return ret; 923 924 switch (ret) { 925 case IIO_AVAIL_RANGE: 926 switch (*type) { 927 case IIO_VAL_INT: 928 *val = vals[0]; 929 break; 930 default: 931 *val = vals[0]; 932 if (val2) 933 *val2 = vals[1]; 934 } 935 return 0; 936 937 case IIO_AVAIL_LIST: 938 if (length <= 0) 939 return -EINVAL; 940 switch (*type) { 941 case IIO_VAL_INT: 942 *val = min_array(vals, length); 943 break; 944 default: 945 /* TODO: learn about min for other iio values */ 946 return -EINVAL; 947 } 948 return 0; 949 950 default: 951 return -EINVAL; 952 } 953 } 954 955 int iio_read_min_channel_raw(struct iio_channel *chan, int *val) 956 { 957 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 958 int ret; 959 int type; 960 961 mutex_lock(&iio_dev_opaque->info_exist_lock); 962 if (!chan->indio_dev->info) { 963 ret = -ENODEV; 964 goto err_unlock; 965 } 966 967 ret = iio_channel_read_min(chan, val, NULL, &type, IIO_CHAN_INFO_RAW); 968 err_unlock: 969 mutex_unlock(&iio_dev_opaque->info_exist_lock); 970 971 return ret; 972 } 973 EXPORT_SYMBOL_GPL(iio_read_min_channel_raw); 974 975 int iio_get_channel_type(struct iio_channel *chan, enum iio_chan_type *type) 976 { 977 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 978 int ret = 0; 979 /* Need to verify underlying driver has not gone away */ 980 981 mutex_lock(&iio_dev_opaque->info_exist_lock); 982 if (!chan->indio_dev->info) { 983 ret = -ENODEV; 984 goto err_unlock; 985 } 986 987 *type = chan->channel->type; 988 err_unlock: 989 mutex_unlock(&iio_dev_opaque->info_exist_lock); 990 991 return ret; 992 } 993 EXPORT_SYMBOL_GPL(iio_get_channel_type); 994 995 static int iio_channel_write(struct iio_channel *chan, int val, int val2, 996 enum iio_chan_info_enum info) 997 { 998 return chan->indio_dev->info->write_raw(chan->indio_dev, 999 chan->channel, val, val2, info); 1000 } 1001 1002 int iio_write_channel_attribute(struct iio_channel *chan, int val, int val2, 1003 enum iio_chan_info_enum attribute) 1004 { 1005 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(chan->indio_dev); 1006 int ret; 1007 1008 mutex_lock(&iio_dev_opaque->info_exist_lock); 1009 if (!chan->indio_dev->info) { 1010 ret = -ENODEV; 1011 goto err_unlock; 1012 } 1013 1014 ret = iio_channel_write(chan, val, val2, attribute); 1015 err_unlock: 1016 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1017 1018 return ret; 1019 } 1020 EXPORT_SYMBOL_GPL(iio_write_channel_attribute); 1021 1022 int iio_write_channel_raw(struct iio_channel *chan, int val) 1023 { 1024 return iio_write_channel_attribute(chan, val, 0, IIO_CHAN_INFO_RAW); 1025 } 1026 EXPORT_SYMBOL_GPL(iio_write_channel_raw); 1027 1028 unsigned int iio_get_channel_ext_info_count(struct iio_channel *chan) 1029 { 1030 const struct iio_chan_spec_ext_info *ext_info; 1031 unsigned int i = 0; 1032 1033 if (!chan->channel->ext_info) 1034 return i; 1035 1036 for (ext_info = chan->channel->ext_info; ext_info->name; ext_info++) 1037 ++i; 1038 1039 return i; 1040 } 1041 EXPORT_SYMBOL_GPL(iio_get_channel_ext_info_count); 1042 1043 static const struct iio_chan_spec_ext_info * 1044 iio_lookup_ext_info(const struct iio_channel *chan, const char *attr) 1045 { 1046 const struct iio_chan_spec_ext_info *ext_info; 1047 1048 if (!chan->channel->ext_info) 1049 return NULL; 1050 1051 for (ext_info = chan->channel->ext_info; ext_info->name; ++ext_info) { 1052 if (!strcmp(attr, ext_info->name)) 1053 return ext_info; 1054 } 1055 1056 return NULL; 1057 } 1058 1059 ssize_t iio_read_channel_ext_info(struct iio_channel *chan, 1060 const char *attr, char *buf) 1061 { 1062 const struct iio_chan_spec_ext_info *ext_info; 1063 1064 ext_info = iio_lookup_ext_info(chan, attr); 1065 if (!ext_info) 1066 return -EINVAL; 1067 1068 return ext_info->read(chan->indio_dev, ext_info->private, 1069 chan->channel, buf); 1070 } 1071 EXPORT_SYMBOL_GPL(iio_read_channel_ext_info); 1072 1073 ssize_t iio_write_channel_ext_info(struct iio_channel *chan, const char *attr, 1074 const char *buf, size_t len) 1075 { 1076 const struct iio_chan_spec_ext_info *ext_info; 1077 1078 ext_info = iio_lookup_ext_info(chan, attr); 1079 if (!ext_info) 1080 return -EINVAL; 1081 1082 return ext_info->write(chan->indio_dev, ext_info->private, 1083 chan->channel, buf, len); 1084 } 1085 EXPORT_SYMBOL_GPL(iio_write_channel_ext_info); 1086