1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework to handle complex IIO aggregate devices. 4 * 5 * The typical architecture is to have one device as the frontend device which 6 * can be "linked" against one or multiple backend devices. All the IIO and 7 * userspace interface is expected to be registers/managed by the frontend 8 * device which will callback into the backends when needed (to get/set some 9 * configuration that it does not directly control). 10 * 11 * ------------------------------------------------------- 12 * ------------------ | ------------ ------------ ------- FPGA| 13 * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | | 14 * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | | 15 * | |------------------------| ------------ ------------ ------- | 16 * ------------------ ------------------------------------------------------- 17 * 18 * The framework interface is pretty simple: 19 * - Backends should register themselves with devm_iio_backend_register() 20 * - Frontend devices should get backends with devm_iio_backend_get() 21 * 22 * Also to note that the primary target for this framework are converters like 23 * ADC/DACs so iio_backend_ops will have some operations typical of converter 24 * devices. On top of that, this is "generic" for all IIO which means any kind 25 * of device can make use of the framework. That said, If the iio_backend_ops 26 * struct begins to grow out of control, we can always refactor things so that 27 * the industrialio-backend.c is only left with the really generic stuff. Then, 28 * we can build on top of it depending on the needs. 29 * 30 * Copyright (C) 2023-2024 Analog Devices Inc. 31 */ 32 #define dev_fmt(fmt) "iio-backend: " fmt 33 34 #include <linux/cleanup.h> 35 #include <linux/debugfs.h> 36 #include <linux/device.h> 37 #include <linux/err.h> 38 #include <linux/errno.h> 39 #include <linux/list.h> 40 #include <linux/module.h> 41 #include <linux/mutex.h> 42 #include <linux/property.h> 43 #include <linux/slab.h> 44 #include <linux/stringify.h> 45 #include <linux/types.h> 46 47 #include <linux/iio/backend.h> 48 #include <linux/iio/iio.h> 49 50 struct iio_backend { 51 struct list_head entry; 52 const struct iio_backend_ops *ops; 53 struct device *frontend_dev; 54 struct device *dev; 55 struct module *owner; 56 void *priv; 57 const char *name; 58 unsigned int cached_reg_addr; 59 u32 caps; 60 /* 61 * This index is relative to the frontend. Meaning that for 62 * frontends with multiple backends, this will be the index of this 63 * backend. Used for the debugfs directory name. 64 */ 65 u8 idx; 66 }; 67 68 /* 69 * Helper struct for requesting buffers. This ensures that we have all data 70 * that we need to free the buffer in a device managed action. 71 */ 72 struct iio_backend_buffer_pair { 73 struct iio_backend *back; 74 struct iio_buffer *buffer; 75 }; 76 77 static LIST_HEAD(iio_back_list); 78 static DEFINE_MUTEX(iio_back_lock); 79 80 /* 81 * Helper macros to call backend ops. Makes sure the option is supported. 82 */ 83 #define iio_backend_check_op(back, op) ({ \ 84 struct iio_backend *____back = back; \ 85 int ____ret = 0; \ 86 \ 87 if (!____back->ops->op) \ 88 ____ret = -EOPNOTSUPP; \ 89 \ 90 ____ret; \ 91 }) 92 93 #define iio_backend_op_call(back, op, args...) ({ \ 94 struct iio_backend *__back = back; \ 95 int __ret; \ 96 \ 97 __ret = iio_backend_check_op(__back, op); \ 98 if (!__ret) \ 99 __ret = __back->ops->op(__back, ##args); \ 100 \ 101 __ret; \ 102 }) 103 104 #define iio_backend_ptr_op_call(back, op, args...) ({ \ 105 struct iio_backend *__back = back; \ 106 void *ptr_err; \ 107 int __ret; \ 108 \ 109 __ret = iio_backend_check_op(__back, op); \ 110 if (__ret) \ 111 ptr_err = ERR_PTR(__ret); \ 112 else \ 113 ptr_err = __back->ops->op(__back, ##args); \ 114 \ 115 ptr_err; \ 116 }) 117 118 #define iio_backend_void_op_call(back, op, args...) { \ 119 struct iio_backend *__back = back; \ 120 int __ret; \ 121 \ 122 __ret = iio_backend_check_op(__back, op); \ 123 if (!__ret) \ 124 __back->ops->op(__back, ##args); \ 125 else \ 126 dev_dbg(__back->dev, "Op(%s) not implemented\n",\ 127 __stringify(op)); \ 128 } 129 130 static ssize_t iio_backend_debugfs_read_reg(struct file *file, 131 char __user *userbuf, 132 size_t count, loff_t *ppos) 133 { 134 struct iio_backend *back = file->private_data; 135 char read_buf[20]; 136 unsigned int val; 137 int ret, len; 138 139 ret = iio_backend_op_call(back, debugfs_reg_access, 140 back->cached_reg_addr, 0, &val); 141 if (ret) 142 return ret; 143 144 len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val); 145 146 return simple_read_from_buffer(userbuf, count, ppos, read_buf, len); 147 } 148 149 static ssize_t iio_backend_debugfs_write_reg(struct file *file, 150 const char __user *userbuf, 151 size_t count, loff_t *ppos) 152 { 153 struct iio_backend *back = file->private_data; 154 unsigned int val; 155 char buf[80]; 156 ssize_t rc; 157 int ret; 158 159 if (count >= sizeof(buf)) 160 return -ENOSPC; 161 162 rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); 163 if (rc < 0) 164 return rc; 165 166 buf[rc] = '\0'; 167 168 ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val); 169 170 switch (ret) { 171 case 1: 172 return count; 173 case 2: 174 ret = iio_backend_op_call(back, debugfs_reg_access, 175 back->cached_reg_addr, val, NULL); 176 if (ret) 177 return ret; 178 return count; 179 default: 180 return -EINVAL; 181 } 182 } 183 184 static const struct file_operations iio_backend_debugfs_reg_fops = { 185 .open = simple_open, 186 .read = iio_backend_debugfs_read_reg, 187 .write = iio_backend_debugfs_write_reg, 188 }; 189 190 static ssize_t iio_backend_debugfs_read_name(struct file *file, 191 char __user *userbuf, 192 size_t count, loff_t *ppos) 193 { 194 struct iio_backend *back = file->private_data; 195 char name[128]; 196 int len; 197 198 len = scnprintf(name, sizeof(name), "%s\n", back->name); 199 200 return simple_read_from_buffer(userbuf, count, ppos, name, len); 201 } 202 203 static const struct file_operations iio_backend_debugfs_name_fops = { 204 .open = simple_open, 205 .read = iio_backend_debugfs_read_name, 206 }; 207 208 /** 209 * iio_backend_debugfs_add - Add debugfs interfaces for Backends 210 * @back: Backend device 211 * @indio_dev: IIO device 212 */ 213 void iio_backend_debugfs_add(struct iio_backend *back, 214 struct iio_dev *indio_dev) 215 { 216 struct dentry *d = iio_get_debugfs_dentry(indio_dev); 217 struct dentry *back_d; 218 char name[128]; 219 220 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d) 221 return; 222 if (!back->ops->debugfs_reg_access && !back->name) 223 return; 224 225 snprintf(name, sizeof(name), "backend%d", back->idx); 226 227 back_d = debugfs_create_dir(name, d); 228 if (IS_ERR(back_d)) 229 return; 230 231 if (back->ops->debugfs_reg_access) 232 debugfs_create_file("direct_reg_access", 0600, back_d, back, 233 &iio_backend_debugfs_reg_fops); 234 235 if (back->name) 236 debugfs_create_file("name", 0400, back_d, back, 237 &iio_backend_debugfs_name_fops); 238 } 239 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, "IIO_BACKEND"); 240 241 /** 242 * iio_backend_debugfs_print_chan_status - Print channel status 243 * @back: Backend device 244 * @chan: Channel number 245 * @buf: Buffer where to print the status 246 * @len: Available space 247 * 248 * One usecase where this is useful is for testing test tones in a digital 249 * interface and "ask" the backend to dump more details on why a test tone might 250 * have errors. 251 * 252 * RETURNS: 253 * Number of copied bytes on success, negative error code on failure. 254 */ 255 ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back, 256 unsigned int chan, char *buf, 257 size_t len) 258 { 259 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 260 return -ENODEV; 261 262 return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf, 263 len); 264 } 265 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, "IIO_BACKEND"); 266 267 /** 268 * iio_backend_chan_enable - Enable a backend channel 269 * @back: Backend device 270 * @chan: Channel number 271 * 272 * RETURNS: 273 * 0 on success, negative error number on failure. 274 */ 275 int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan) 276 { 277 return iio_backend_op_call(back, chan_enable, chan); 278 } 279 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, "IIO_BACKEND"); 280 281 /** 282 * iio_backend_chan_disable - Disable a backend channel 283 * @back: Backend device 284 * @chan: Channel number 285 * 286 * RETURNS: 287 * 0 on success, negative error number on failure. 288 */ 289 int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan) 290 { 291 return iio_backend_op_call(back, chan_disable, chan); 292 } 293 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, "IIO_BACKEND"); 294 295 static void __iio_backend_disable(void *back) 296 { 297 iio_backend_void_op_call(back, disable); 298 } 299 300 /** 301 * iio_backend_disable - Backend disable 302 * @back: Backend device 303 */ 304 void iio_backend_disable(struct iio_backend *back) 305 { 306 __iio_backend_disable(back); 307 } 308 EXPORT_SYMBOL_NS_GPL(iio_backend_disable, "IIO_BACKEND"); 309 310 /** 311 * iio_backend_enable - Backend enable 312 * @back: Backend device 313 * 314 * RETURNS: 315 * 0 on success, negative error number on failure. 316 */ 317 int iio_backend_enable(struct iio_backend *back) 318 { 319 return iio_backend_op_call(back, enable); 320 } 321 EXPORT_SYMBOL_NS_GPL(iio_backend_enable, "IIO_BACKEND"); 322 323 /** 324 * devm_iio_backend_enable - Device managed backend enable 325 * @dev: Consumer device for the backend 326 * @back: Backend device 327 * 328 * RETURNS: 329 * 0 on success, negative error number on failure. 330 */ 331 int devm_iio_backend_enable(struct device *dev, struct iio_backend *back) 332 { 333 int ret; 334 335 ret = iio_backend_enable(back); 336 if (ret) 337 return ret; 338 339 return devm_add_action_or_reset(dev, __iio_backend_disable, back); 340 } 341 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, "IIO_BACKEND"); 342 343 /** 344 * iio_backend_data_format_set - Configure the channel data format 345 * @back: Backend device 346 * @chan: Channel number 347 * @data: Data format 348 * 349 * Properly configure a channel with respect to the expected data format. A 350 * @struct iio_backend_data_fmt must be passed with the settings. 351 * 352 * RETURNS: 353 * 0 on success, negative error number on failure. 354 */ 355 int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, 356 const struct iio_backend_data_fmt *data) 357 { 358 if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX) 359 return -EINVAL; 360 361 return iio_backend_op_call(back, data_format_set, chan, data); 362 } 363 EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, "IIO_BACKEND"); 364 365 /** 366 * iio_backend_data_source_set - Select data source 367 * @back: Backend device 368 * @chan: Channel number 369 * @data: Data source 370 * 371 * A given backend may have different sources to stream/sync data. This allows 372 * to choose that source. 373 * 374 * RETURNS: 375 * 0 on success, negative error number on failure. 376 */ 377 int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, 378 enum iio_backend_data_source data) 379 { 380 if (data >= IIO_BACKEND_DATA_SOURCE_MAX) 381 return -EINVAL; 382 383 return iio_backend_op_call(back, data_source_set, chan, data); 384 } 385 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, "IIO_BACKEND"); 386 387 /** 388 * iio_backend_data_source_get - Get current data source 389 * @back: Backend device 390 * @chan: Channel number 391 * @data: Pointer to receive the current source value 392 * 393 * A given backend may have different sources to stream/sync data. This allows 394 * to know what source is in use. 395 * 396 * RETURNS: 397 * 0 on success, negative error number on failure. 398 */ 399 int iio_backend_data_source_get(struct iio_backend *back, unsigned int chan, 400 enum iio_backend_data_source *data) 401 { 402 int ret; 403 404 ret = iio_backend_op_call(back, data_source_get, chan, data); 405 if (ret) 406 return ret; 407 408 if (*data >= IIO_BACKEND_DATA_SOURCE_MAX) 409 return -EINVAL; 410 411 return 0; 412 } 413 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_get, "IIO_BACKEND"); 414 415 /** 416 * iio_backend_set_sampling_freq - Set channel sampling rate 417 * @back: Backend device 418 * @chan: Channel number 419 * @sample_rate_hz: Sample rate 420 * 421 * RETURNS: 422 * 0 on success, negative error number on failure. 423 */ 424 int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, 425 u64 sample_rate_hz) 426 { 427 return iio_backend_op_call(back, set_sample_rate, chan, sample_rate_hz); 428 } 429 EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, "IIO_BACKEND"); 430 431 /** 432 * iio_backend_test_pattern_set - Configure a test pattern 433 * @back: Backend device 434 * @chan: Channel number 435 * @pattern: Test pattern 436 * 437 * Configure a test pattern on the backend. This is typically used for 438 * calibrating the timings on the data digital interface. 439 * 440 * RETURNS: 441 * 0 on success, negative error number on failure. 442 */ 443 int iio_backend_test_pattern_set(struct iio_backend *back, 444 unsigned int chan, 445 enum iio_backend_test_pattern pattern) 446 { 447 if (pattern >= IIO_BACKEND_TEST_PATTERN_MAX) 448 return -EINVAL; 449 450 return iio_backend_op_call(back, test_pattern_set, chan, pattern); 451 } 452 EXPORT_SYMBOL_NS_GPL(iio_backend_test_pattern_set, "IIO_BACKEND"); 453 454 /** 455 * iio_backend_chan_status - Get the channel status 456 * @back: Backend device 457 * @chan: Channel number 458 * @error: Error indication 459 * 460 * Get the current state of the backend channel. Typically used to check if 461 * there were any errors sending/receiving data. 462 * 463 * RETURNS: 464 * 0 on success, negative error number on failure. 465 */ 466 int iio_backend_chan_status(struct iio_backend *back, unsigned int chan, 467 bool *error) 468 { 469 return iio_backend_op_call(back, chan_status, chan, error); 470 } 471 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_status, "IIO_BACKEND"); 472 473 /** 474 * iio_backend_iodelay_set - Set digital I/O delay 475 * @back: Backend device 476 * @lane: Lane number 477 * @taps: Number of taps 478 * 479 * Controls delays on sending/receiving data. One usecase for this is to 480 * calibrate the data digital interface so we get the best results when 481 * transferring data. Note that @taps has no unit since the actual delay per tap 482 * is very backend specific. Hence, frontend devices typically should go through 483 * an array of @taps (the size of that array should typically match the size of 484 * calibration points on the frontend device) and call this API. 485 * 486 * RETURNS: 487 * 0 on success, negative error number on failure. 488 */ 489 int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane, 490 unsigned int taps) 491 { 492 return iio_backend_op_call(back, iodelay_set, lane, taps); 493 } 494 EXPORT_SYMBOL_NS_GPL(iio_backend_iodelay_set, "IIO_BACKEND"); 495 496 /** 497 * iio_backend_data_sample_trigger - Control when to sample data 498 * @back: Backend device 499 * @trigger: Data trigger 500 * 501 * Mostly useful for input backends. Configures the backend for when to sample 502 * data (eg: rising vs falling edge). 503 * 504 * RETURNS: 505 * 0 on success, negative error number on failure. 506 */ 507 int iio_backend_data_sample_trigger(struct iio_backend *back, 508 enum iio_backend_sample_trigger trigger) 509 { 510 if (trigger >= IIO_BACKEND_SAMPLE_TRIGGER_MAX) 511 return -EINVAL; 512 513 return iio_backend_op_call(back, data_sample_trigger, trigger); 514 } 515 EXPORT_SYMBOL_NS_GPL(iio_backend_data_sample_trigger, "IIO_BACKEND"); 516 517 static void iio_backend_free_buffer(void *arg) 518 { 519 struct iio_backend_buffer_pair *pair = arg; 520 521 iio_backend_void_op_call(pair->back, free_buffer, pair->buffer); 522 } 523 524 /** 525 * devm_iio_backend_request_buffer - Device managed buffer request 526 * @dev: Consumer device for the backend 527 * @back: Backend device 528 * @indio_dev: IIO device 529 * 530 * Request an IIO buffer from the backend. The type of the buffer (typically 531 * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because, 532 * normally, the backend dictates what kind of buffering we can get. 533 * 534 * The backend .free_buffer() hooks is automatically called on @dev detach. 535 * 536 * RETURNS: 537 * 0 on success, negative error number on failure. 538 */ 539 int devm_iio_backend_request_buffer(struct device *dev, 540 struct iio_backend *back, 541 struct iio_dev *indio_dev) 542 { 543 struct iio_backend_buffer_pair *pair; 544 struct iio_buffer *buffer; 545 546 pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL); 547 if (!pair) 548 return -ENOMEM; 549 550 buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev); 551 if (IS_ERR(buffer)) 552 return PTR_ERR(buffer); 553 554 /* weak reference should be all what we need */ 555 pair->back = back; 556 pair->buffer = buffer; 557 558 return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair); 559 } 560 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, "IIO_BACKEND"); 561 562 /** 563 * iio_backend_read_raw - Read a channel attribute from a backend device. 564 * @back: Backend device 565 * @chan: IIO channel reference 566 * @val: First returned value 567 * @val2: Second returned value 568 * @mask: Specify the attribute to return 569 * 570 * RETURNS: 571 * 0 on success, negative error number on failure. 572 */ 573 int iio_backend_read_raw(struct iio_backend *back, 574 struct iio_chan_spec const *chan, int *val, int *val2, 575 long mask) 576 { 577 return iio_backend_op_call(back, read_raw, chan, val, val2, mask); 578 } 579 EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, "IIO_BACKEND"); 580 581 static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev) 582 { 583 struct iio_backend *back = ERR_PTR(-ENODEV), *iter; 584 585 /* 586 * We deliberately go through all backends even after finding a match. 587 * The reason is that we want to catch frontend devices which have more 588 * than one backend in which case returning the first we find is bogus. 589 * For those cases, frontends need to explicitly define 590 * get_iio_backend() in struct iio_info. 591 */ 592 guard(mutex)(&iio_back_lock); 593 list_for_each_entry(iter, &iio_back_list, entry) { 594 if (dev == iter->frontend_dev) { 595 if (!IS_ERR(back)) { 596 dev_warn(dev, 597 "Multiple backends! get_iio_backend() needs to be implemented"); 598 return ERR_PTR(-ENODEV); 599 } 600 601 back = iter; 602 } 603 } 604 605 return back; 606 } 607 608 /** 609 * iio_backend_ext_info_get - IIO ext_info read callback 610 * @indio_dev: IIO device 611 * @private: Data private to the driver 612 * @chan: IIO channel 613 * @buf: Buffer where to place the attribute data 614 * 615 * This helper is intended to be used by backends that extend an IIO channel 616 * (through iio_backend_extend_chan_spec()) with extended info. In that case, 617 * backends are not supposed to give their own callbacks (as they would not have 618 * a way to get the backend from indio_dev). This is the getter. 619 * 620 * RETURNS: 621 * Number of bytes written to buf, negative error number on failure. 622 */ 623 ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private, 624 const struct iio_chan_spec *chan, char *buf) 625 { 626 struct iio_backend *back; 627 628 /* 629 * The below should work for the majority of the cases. It will not work 630 * when one frontend has multiple backends in which case we'll need a 631 * new callback in struct iio_info so we can directly request the proper 632 * backend from the frontend. Anyways, let's only introduce new options 633 * when really needed... 634 */ 635 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 636 if (IS_ERR(back)) 637 return PTR_ERR(back); 638 639 return iio_backend_op_call(back, ext_info_get, private, chan, buf); 640 } 641 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, "IIO_BACKEND"); 642 643 /** 644 * iio_backend_ext_info_set - IIO ext_info write callback 645 * @indio_dev: IIO device 646 * @private: Data private to the driver 647 * @chan: IIO channel 648 * @buf: Buffer holding the sysfs attribute 649 * @len: Buffer length 650 * 651 * This helper is intended to be used by backends that extend an IIO channel 652 * (trough iio_backend_extend_chan_spec()) with extended info. In that case, 653 * backends are not supposed to give their own callbacks (as they would not have 654 * a way to get the backend from indio_dev). This is the setter. 655 * 656 * RETURNS: 657 * Buffer length on success, negative error number on failure. 658 */ 659 ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, 660 const struct iio_chan_spec *chan, 661 const char *buf, size_t len) 662 { 663 struct iio_backend *back; 664 665 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 666 if (IS_ERR(back)) 667 return PTR_ERR(back); 668 669 return iio_backend_op_call(back, ext_info_set, private, chan, buf, len); 670 } 671 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, "IIO_BACKEND"); 672 673 /** 674 * iio_backend_interface_type_get - get the interface type used. 675 * @back: Backend device 676 * @type: Interface type 677 * 678 * RETURNS: 679 * 0 on success, negative error number on failure. 680 */ 681 int iio_backend_interface_type_get(struct iio_backend *back, 682 enum iio_backend_interface_type *type) 683 { 684 int ret; 685 686 ret = iio_backend_op_call(back, interface_type_get, type); 687 if (ret) 688 return ret; 689 690 if (*type >= IIO_BACKEND_INTERFACE_MAX) 691 return -EINVAL; 692 693 return 0; 694 } 695 EXPORT_SYMBOL_NS_GPL(iio_backend_interface_type_get, "IIO_BACKEND"); 696 697 /** 698 * iio_backend_data_size_set - set the data width/size in the data bus. 699 * @back: Backend device 700 * @size: Size in bits 701 * 702 * Some frontend devices can dynamically control the word/data size on the 703 * interface/data bus. Hence, the backend device needs to be aware of it so 704 * data can be correctly transferred. 705 * 706 * RETURNS: 707 * 0 on success, negative error number on failure. 708 */ 709 int iio_backend_data_size_set(struct iio_backend *back, unsigned int size) 710 { 711 if (!size) 712 return -EINVAL; 713 714 return iio_backend_op_call(back, data_size_set, size); 715 } 716 EXPORT_SYMBOL_NS_GPL(iio_backend_data_size_set, "IIO_BACKEND"); 717 718 /** 719 * iio_backend_oversampling_ratio_set - set the oversampling ratio 720 * @back: Backend device 721 * @chan: Channel number 722 * @ratio: The oversampling ratio - value 1 corresponds to no oversampling. 723 * 724 * RETURNS: 725 * 0 on success, negative error number on failure. 726 */ 727 int iio_backend_oversampling_ratio_set(struct iio_backend *back, 728 unsigned int chan, 729 unsigned int ratio) 730 { 731 return iio_backend_op_call(back, oversampling_ratio_set, chan, ratio); 732 } 733 EXPORT_SYMBOL_NS_GPL(iio_backend_oversampling_ratio_set, "IIO_BACKEND"); 734 735 /** 736 * iio_backend_extend_chan_spec - Extend an IIO channel 737 * @back: Backend device 738 * @chan: IIO channel 739 * 740 * Some backends may have their own functionalities and hence capable of 741 * extending a frontend's channel. 742 * 743 * RETURNS: 744 * 0 on success, negative error number on failure. 745 */ 746 int iio_backend_extend_chan_spec(struct iio_backend *back, 747 struct iio_chan_spec *chan) 748 { 749 const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info; 750 const struct iio_chan_spec_ext_info *back_ext_info; 751 int ret; 752 753 ret = iio_backend_op_call(back, extend_chan_spec, chan); 754 if (ret) 755 return ret; 756 /* 757 * Let's keep things simple for now. Don't allow to overwrite the 758 * frontend's extended info. If ever needed, we can support appending 759 * it. 760 */ 761 if (frontend_ext_info && chan->ext_info != frontend_ext_info) 762 return -EOPNOTSUPP; 763 if (!chan->ext_info) 764 return 0; 765 766 /* Don't allow backends to get creative and force their own handlers */ 767 for (back_ext_info = chan->ext_info; back_ext_info->name; back_ext_info++) { 768 if (back_ext_info->read != iio_backend_ext_info_get) 769 return -EINVAL; 770 if (back_ext_info->write != iio_backend_ext_info_set) 771 return -EINVAL; 772 } 773 774 return 0; 775 } 776 EXPORT_SYMBOL_NS_GPL(iio_backend_extend_chan_spec, "IIO_BACKEND"); 777 778 /** 779 * iio_backend_has_caps - Check if backend has specific capabilities 780 * @back: Backend device 781 * @caps: Capabilities to check 782 * 783 * RETURNS: 784 * True if backend has all the requested capabilities, false otherwise. 785 */ 786 bool iio_backend_has_caps(struct iio_backend *back, u32 caps) 787 { 788 return (back->caps & caps) == caps; 789 } 790 EXPORT_SYMBOL_NS_GPL(iio_backend_has_caps, "IIO_BACKEND"); 791 792 static void iio_backend_release(void *arg) 793 { 794 struct iio_backend *back = arg; 795 796 module_put(back->owner); 797 } 798 799 static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) 800 { 801 struct device_link *link; 802 int ret; 803 804 /* 805 * Make sure the provider cannot be unloaded before the consumer module. 806 * Note that device_links would still guarantee that nothing is 807 * accessible (and breaks) but this makes it explicit that the consumer 808 * module must be also unloaded. 809 */ 810 if (!try_module_get(back->owner)) 811 return dev_err_probe(dev, -ENODEV, 812 "Cannot get module reference\n"); 813 814 ret = devm_add_action_or_reset(dev, iio_backend_release, back); 815 if (ret) 816 return ret; 817 818 link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 819 if (!link) 820 return dev_err_probe(dev, -EINVAL, 821 "Could not link to supplier(%s)\n", 822 dev_name(back->dev)); 823 824 back->frontend_dev = dev; 825 826 dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev)); 827 828 return 0; 829 } 830 831 /** 832 * iio_backend_filter_type_set - Set filter type 833 * @back: Backend device 834 * @type: Filter type. 835 * 836 * RETURNS: 837 * 0 on success, negative error number on failure. 838 */ 839 int iio_backend_filter_type_set(struct iio_backend *back, 840 enum iio_backend_filter_type type) 841 { 842 if (type >= IIO_BACKEND_FILTER_TYPE_MAX) 843 return -EINVAL; 844 845 return iio_backend_op_call(back, filter_type_set, type); 846 } 847 EXPORT_SYMBOL_NS_GPL(iio_backend_filter_type_set, "IIO_BACKEND"); 848 849 /** 850 * iio_backend_interface_data_align - Perform the data alignment process. 851 * @back: Backend device 852 * @timeout_us: Timeout value in us. 853 * 854 * When activated, it initates a proccess that aligns the sample's most 855 * significant bit (MSB) based solely on the captured data, without 856 * considering any other external signals. 857 * 858 * The timeout_us value must be greater than 0. 859 * 860 * RETURNS: 861 * 0 on success, negative error number on failure. 862 */ 863 int iio_backend_interface_data_align(struct iio_backend *back, u32 timeout_us) 864 { 865 if (!timeout_us) 866 return -EINVAL; 867 868 return iio_backend_op_call(back, interface_data_align, timeout_us); 869 } 870 EXPORT_SYMBOL_NS_GPL(iio_backend_interface_data_align, "IIO_BACKEND"); 871 872 /** 873 * iio_backend_num_lanes_set - Number of lanes enabled. 874 * @back: Backend device 875 * @num_lanes: Number of lanes. 876 * 877 * RETURNS: 878 * 0 on success, negative error number on failure. 879 */ 880 int iio_backend_num_lanes_set(struct iio_backend *back, unsigned int num_lanes) 881 { 882 if (!num_lanes) 883 return -EINVAL; 884 885 return iio_backend_op_call(back, num_lanes_set, num_lanes); 886 } 887 EXPORT_SYMBOL_NS_GPL(iio_backend_num_lanes_set, "IIO_BACKEND"); 888 889 /** 890 * iio_backend_ddr_enable - Enable interface DDR (Double Data Rate) mode 891 * @back: Backend device 892 * 893 * Enable DDR, data is generated by the IP at each front (raising and falling) 894 * of the bus clock signal. 895 * 896 * RETURNS: 897 * 0 on success, negative error number on failure. 898 */ 899 int iio_backend_ddr_enable(struct iio_backend *back) 900 { 901 return iio_backend_op_call(back, ddr_enable); 902 } 903 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_enable, "IIO_BACKEND"); 904 905 /** 906 * iio_backend_ddr_disable - Disable interface DDR (Double Data Rate) mode 907 * @back: Backend device 908 * 909 * Disable DDR, setting into SDR mode (Single Data Rate). 910 * 911 * RETURNS: 912 * 0 on success, negative error number on failure. 913 */ 914 int iio_backend_ddr_disable(struct iio_backend *back) 915 { 916 return iio_backend_op_call(back, ddr_disable); 917 } 918 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_disable, "IIO_BACKEND"); 919 920 /** 921 * iio_backend_data_stream_enable - Enable data stream 922 * @back: Backend device 923 * 924 * Enable data stream over the bus interface. 925 * 926 * RETURNS: 927 * 0 on success, negative error number on failure. 928 */ 929 int iio_backend_data_stream_enable(struct iio_backend *back) 930 { 931 return iio_backend_op_call(back, data_stream_enable); 932 } 933 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_enable, "IIO_BACKEND"); 934 935 /** 936 * iio_backend_data_stream_disable - Disable data stream 937 * @back: Backend device 938 * 939 * Disable data stream over the bus interface. 940 * 941 * RETURNS: 942 * 0 on success, negative error number on failure. 943 */ 944 int iio_backend_data_stream_disable(struct iio_backend *back) 945 { 946 return iio_backend_op_call(back, data_stream_disable); 947 } 948 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_disable, "IIO_BACKEND"); 949 950 /** 951 * iio_backend_data_transfer_addr - Set data address. 952 * @back: Backend device 953 * @address: Data register address 954 * 955 * Some devices may need to inform the backend about an address 956 * where to read or write the data. 957 * 958 * RETURNS: 959 * 0 on success, negative error number on failure. 960 */ 961 int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address) 962 { 963 return iio_backend_op_call(back, data_transfer_addr, address); 964 } 965 EXPORT_SYMBOL_NS_GPL(iio_backend_data_transfer_addr, "IIO_BACKEND"); 966 967 static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name, 968 struct fwnode_handle *fwnode) 969 { 970 struct iio_backend *back; 971 unsigned int index; 972 int ret; 973 974 if (name) { 975 ret = device_property_match_string(dev, "io-backend-names", 976 name); 977 if (ret < 0) 978 return ERR_PTR(ret); 979 index = ret; 980 } else { 981 index = 0; 982 } 983 984 struct fwnode_handle *fwnode_back __free(fwnode_handle) = 985 fwnode_find_reference(fwnode, "io-backends", index); 986 if (IS_ERR(fwnode_back)) 987 return dev_err_cast_probe(dev, fwnode_back, 988 "Cannot get Firmware reference\n"); 989 990 guard(mutex)(&iio_back_lock); 991 list_for_each_entry(back, &iio_back_list, entry) { 992 if (!device_match_fwnode(back->dev, fwnode_back)) 993 continue; 994 995 ret = __devm_iio_backend_get(dev, back); 996 if (ret) 997 return ERR_PTR(ret); 998 999 if (name) 1000 back->idx = index; 1001 1002 return back; 1003 } 1004 1005 return ERR_PTR(-EPROBE_DEFER); 1006 } 1007 1008 /** 1009 * devm_iio_backend_get - Device managed backend device get 1010 * @dev: Consumer device for the backend 1011 * @name: Backend name 1012 * 1013 * Get's the backend associated with @dev. 1014 * 1015 * RETURNS: 1016 * A backend pointer, negative error pointer otherwise. 1017 */ 1018 struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) 1019 { 1020 return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev)); 1021 } 1022 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, "IIO_BACKEND"); 1023 1024 /** 1025 * devm_iio_backend_fwnode_get - Device managed backend firmware node get 1026 * @dev: Consumer device for the backend 1027 * @name: Backend name 1028 * @fwnode: Firmware node of the backend consumer 1029 * 1030 * Get's the backend associated with a firmware node. 1031 * 1032 * RETURNS: 1033 * A backend pointer, negative error pointer otherwise. 1034 */ 1035 struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev, 1036 const char *name, 1037 struct fwnode_handle *fwnode) 1038 { 1039 return __devm_iio_backend_fwnode_get(dev, name, fwnode); 1040 } 1041 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, "IIO_BACKEND"); 1042 1043 /** 1044 * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get 1045 * @dev: Consumer device for the backend 1046 * @fwnode: Firmware node of the backend device 1047 * 1048 * Search the backend list for a device matching @fwnode. 1049 * This API should not be used and it's only present for preventing the first 1050 * user of this framework to break it's DT ABI. 1051 * 1052 * RETURNS: 1053 * A backend pointer, negative error pointer otherwise. 1054 */ 1055 struct iio_backend * 1056 __devm_iio_backend_get_from_fwnode_lookup(struct device *dev, 1057 struct fwnode_handle *fwnode) 1058 { 1059 struct iio_backend *back; 1060 int ret; 1061 1062 guard(mutex)(&iio_back_lock); 1063 list_for_each_entry(back, &iio_back_list, entry) { 1064 if (!device_match_fwnode(back->dev, fwnode)) 1065 continue; 1066 1067 ret = __devm_iio_backend_get(dev, back); 1068 if (ret) 1069 return ERR_PTR(ret); 1070 1071 return back; 1072 } 1073 1074 return ERR_PTR(-EPROBE_DEFER); 1075 } 1076 EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, "IIO_BACKEND"); 1077 1078 /** 1079 * iio_backend_get_priv - Get driver private data 1080 * @back: Backend device 1081 * 1082 * RETURNS: 1083 * Pointer to the driver private data associated with the backend. 1084 */ 1085 void *iio_backend_get_priv(const struct iio_backend *back) 1086 { 1087 return back->priv; 1088 } 1089 EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, "IIO_BACKEND"); 1090 1091 static void iio_backend_unregister(void *arg) 1092 { 1093 struct iio_backend *back = arg; 1094 1095 guard(mutex)(&iio_back_lock); 1096 list_del(&back->entry); 1097 } 1098 1099 /** 1100 * devm_iio_backend_register - Device managed backend device register 1101 * @dev: Backend device being registered 1102 * @info: Backend info 1103 * @priv: Device private data 1104 * 1105 * @info is mandatory. Not providing it results in -EINVAL. 1106 * 1107 * RETURNS: 1108 * 0 on success, negative error number on failure. 1109 */ 1110 int devm_iio_backend_register(struct device *dev, 1111 const struct iio_backend_info *info, void *priv) 1112 { 1113 struct iio_backend *back; 1114 1115 if (!info || !info->ops) 1116 return dev_err_probe(dev, -EINVAL, "No backend ops given\n"); 1117 1118 /* 1119 * Through device_links, we guarantee that a frontend device cannot be 1120 * bound/exist if the backend driver is not around. Hence, we can bind 1121 * the backend object lifetime with the device being passed since 1122 * removing it will tear the frontend/consumer down. 1123 */ 1124 back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL); 1125 if (!back) 1126 return -ENOMEM; 1127 1128 back->ops = info->ops; 1129 back->name = info->name; 1130 back->caps = info->caps; 1131 back->owner = dev->driver->owner; 1132 back->dev = dev; 1133 back->priv = priv; 1134 scoped_guard(mutex, &iio_back_lock) 1135 list_add(&back->entry, &iio_back_list); 1136 1137 return devm_add_action_or_reset(dev, iio_backend_unregister, back); 1138 } 1139 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, "IIO_BACKEND"); 1140 1141 MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>"); 1142 MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices"); 1143 MODULE_LICENSE("GPL"); 1144