1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework to handle complex IIO aggregate devices. 4 * 5 * The typical architecture is to have one device as the frontend device which 6 * can be "linked" against one or multiple backend devices. All the IIO and 7 * userspace interface is expected to be registers/managed by the frontend 8 * device which will callback into the backends when needed (to get/set some 9 * configuration that it does not directly control). 10 * 11 * ------------------------------------------------------- 12 * ------------------ | ------------ ------------ ------- FPGA| 13 * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | | 14 * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | | 15 * | |------------------------| ------------ ------------ ------- | 16 * ------------------ ------------------------------------------------------- 17 * 18 * The framework interface is pretty simple: 19 * - Backends should register themselves with devm_iio_backend_register() 20 * - Frontend devices should get backends with devm_iio_backend_get() 21 * 22 * Also to note that the primary target for this framework are converters like 23 * ADC/DACs so iio_backend_ops will have some operations typical of converter 24 * devices. On top of that, this is "generic" for all IIO which means any kind 25 * of device can make use of the framework. That said, If the iio_backend_ops 26 * struct begins to grow out of control, we can always refactor things so that 27 * the industrialio-backend.c is only left with the really generic stuff. Then, 28 * we can build on top of it depending on the needs. 29 * 30 * Copyright (C) 2023-2024 Analog Devices Inc. 31 */ 32 #define dev_fmt(fmt) "iio-backend: " fmt 33 34 #include <linux/cleanup.h> 35 #include <linux/debugfs.h> 36 #include <linux/device.h> 37 #include <linux/err.h> 38 #include <linux/errno.h> 39 #include <linux/list.h> 40 #include <linux/module.h> 41 #include <linux/mutex.h> 42 #include <linux/property.h> 43 #include <linux/slab.h> 44 #include <linux/stringify.h> 45 #include <linux/types.h> 46 47 #include <linux/iio/backend.h> 48 #include <linux/iio/iio.h> 49 50 struct iio_backend { 51 struct list_head entry; 52 const struct iio_backend_ops *ops; 53 struct device *frontend_dev; 54 struct device *dev; 55 struct module *owner; 56 void *priv; 57 const char *name; 58 unsigned int cached_reg_addr; 59 /* 60 * This index is relative to the frontend. Meaning that for 61 * frontends with multiple backends, this will be the index of this 62 * backend. Used for the debugfs directory name. 63 */ 64 u8 idx; 65 }; 66 67 /* 68 * Helper struct for requesting buffers. This ensures that we have all data 69 * that we need to free the buffer in a device managed action. 70 */ 71 struct iio_backend_buffer_pair { 72 struct iio_backend *back; 73 struct iio_buffer *buffer; 74 }; 75 76 static LIST_HEAD(iio_back_list); 77 static DEFINE_MUTEX(iio_back_lock); 78 79 /* 80 * Helper macros to call backend ops. Makes sure the option is supported. 81 */ 82 #define iio_backend_check_op(back, op) ({ \ 83 struct iio_backend *____back = back; \ 84 int ____ret = 0; \ 85 \ 86 if (!____back->ops->op) \ 87 ____ret = -EOPNOTSUPP; \ 88 \ 89 ____ret; \ 90 }) 91 92 #define iio_backend_op_call(back, op, args...) ({ \ 93 struct iio_backend *__back = back; \ 94 int __ret; \ 95 \ 96 __ret = iio_backend_check_op(__back, op); \ 97 if (!__ret) \ 98 __ret = __back->ops->op(__back, ##args); \ 99 \ 100 __ret; \ 101 }) 102 103 #define iio_backend_ptr_op_call(back, op, args...) ({ \ 104 struct iio_backend *__back = back; \ 105 void *ptr_err; \ 106 int __ret; \ 107 \ 108 __ret = iio_backend_check_op(__back, op); \ 109 if (__ret) \ 110 ptr_err = ERR_PTR(__ret); \ 111 else \ 112 ptr_err = __back->ops->op(__back, ##args); \ 113 \ 114 ptr_err; \ 115 }) 116 117 #define iio_backend_void_op_call(back, op, args...) { \ 118 struct iio_backend *__back = back; \ 119 int __ret; \ 120 \ 121 __ret = iio_backend_check_op(__back, op); \ 122 if (!__ret) \ 123 __back->ops->op(__back, ##args); \ 124 else \ 125 dev_dbg(__back->dev, "Op(%s) not implemented\n",\ 126 __stringify(op)); \ 127 } 128 129 static ssize_t iio_backend_debugfs_read_reg(struct file *file, 130 char __user *userbuf, 131 size_t count, loff_t *ppos) 132 { 133 struct iio_backend *back = file->private_data; 134 char read_buf[20]; 135 unsigned int val; 136 int ret, len; 137 138 ret = iio_backend_op_call(back, debugfs_reg_access, 139 back->cached_reg_addr, 0, &val); 140 if (ret) 141 return ret; 142 143 len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val); 144 145 return simple_read_from_buffer(userbuf, count, ppos, read_buf, len); 146 } 147 148 static ssize_t iio_backend_debugfs_write_reg(struct file *file, 149 const char __user *userbuf, 150 size_t count, loff_t *ppos) 151 { 152 struct iio_backend *back = file->private_data; 153 unsigned int val; 154 char buf[80]; 155 ssize_t rc; 156 int ret; 157 158 rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count); 159 if (rc < 0) 160 return rc; 161 162 buf[count] = '\0'; 163 164 ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val); 165 166 switch (ret) { 167 case 1: 168 return count; 169 case 2: 170 ret = iio_backend_op_call(back, debugfs_reg_access, 171 back->cached_reg_addr, val, NULL); 172 if (ret) 173 return ret; 174 return count; 175 default: 176 return -EINVAL; 177 } 178 } 179 180 static const struct file_operations iio_backend_debugfs_reg_fops = { 181 .open = simple_open, 182 .read = iio_backend_debugfs_read_reg, 183 .write = iio_backend_debugfs_write_reg, 184 }; 185 186 static ssize_t iio_backend_debugfs_read_name(struct file *file, 187 char __user *userbuf, 188 size_t count, loff_t *ppos) 189 { 190 struct iio_backend *back = file->private_data; 191 char name[128]; 192 int len; 193 194 len = scnprintf(name, sizeof(name), "%s\n", back->name); 195 196 return simple_read_from_buffer(userbuf, count, ppos, name, len); 197 } 198 199 static const struct file_operations iio_backend_debugfs_name_fops = { 200 .open = simple_open, 201 .read = iio_backend_debugfs_read_name, 202 }; 203 204 /** 205 * iio_backend_debugfs_add - Add debugfs interfaces for Backends 206 * @back: Backend device 207 * @indio_dev: IIO device 208 */ 209 void iio_backend_debugfs_add(struct iio_backend *back, 210 struct iio_dev *indio_dev) 211 { 212 struct dentry *d = iio_get_debugfs_dentry(indio_dev); 213 struct dentry *back_d; 214 char name[128]; 215 216 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d) 217 return; 218 if (!back->ops->debugfs_reg_access && !back->name) 219 return; 220 221 snprintf(name, sizeof(name), "backend%d", back->idx); 222 223 back_d = debugfs_create_dir(name, d); 224 if (IS_ERR(back_d)) 225 return; 226 227 if (back->ops->debugfs_reg_access) 228 debugfs_create_file("direct_reg_access", 0600, back_d, back, 229 &iio_backend_debugfs_reg_fops); 230 231 if (back->name) 232 debugfs_create_file("name", 0400, back_d, back, 233 &iio_backend_debugfs_name_fops); 234 } 235 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, "IIO_BACKEND"); 236 237 /** 238 * iio_backend_debugfs_print_chan_status - Print channel status 239 * @back: Backend device 240 * @chan: Channel number 241 * @buf: Buffer where to print the status 242 * @len: Available space 243 * 244 * One usecase where this is useful is for testing test tones in a digital 245 * interface and "ask" the backend to dump more details on why a test tone might 246 * have errors. 247 * 248 * RETURNS: 249 * Number of copied bytes on success, negative error code on failure. 250 */ 251 ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back, 252 unsigned int chan, char *buf, 253 size_t len) 254 { 255 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 256 return -ENODEV; 257 258 return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf, 259 len); 260 } 261 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, "IIO_BACKEND"); 262 263 /** 264 * iio_backend_chan_enable - Enable a backend channel 265 * @back: Backend device 266 * @chan: Channel number 267 * 268 * RETURNS: 269 * 0 on success, negative error number on failure. 270 */ 271 int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan) 272 { 273 return iio_backend_op_call(back, chan_enable, chan); 274 } 275 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, "IIO_BACKEND"); 276 277 /** 278 * iio_backend_chan_disable - Disable a backend channel 279 * @back: Backend device 280 * @chan: Channel number 281 * 282 * RETURNS: 283 * 0 on success, negative error number on failure. 284 */ 285 int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan) 286 { 287 return iio_backend_op_call(back, chan_disable, chan); 288 } 289 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, "IIO_BACKEND"); 290 291 static void __iio_backend_disable(void *back) 292 { 293 iio_backend_void_op_call(back, disable); 294 } 295 296 /** 297 * iio_backend_disable - Backend disable 298 * @back: Backend device 299 */ 300 void iio_backend_disable(struct iio_backend *back) 301 { 302 __iio_backend_disable(back); 303 } 304 EXPORT_SYMBOL_NS_GPL(iio_backend_disable, "IIO_BACKEND"); 305 306 /** 307 * iio_backend_enable - Backend enable 308 * @back: Backend device 309 * 310 * RETURNS: 311 * 0 on success, negative error number on failure. 312 */ 313 int iio_backend_enable(struct iio_backend *back) 314 { 315 return iio_backend_op_call(back, enable); 316 } 317 EXPORT_SYMBOL_NS_GPL(iio_backend_enable, "IIO_BACKEND"); 318 319 /** 320 * devm_iio_backend_enable - Device managed backend enable 321 * @dev: Consumer device for the backend 322 * @back: Backend device 323 * 324 * RETURNS: 325 * 0 on success, negative error number on failure. 326 */ 327 int devm_iio_backend_enable(struct device *dev, struct iio_backend *back) 328 { 329 int ret; 330 331 ret = iio_backend_enable(back); 332 if (ret) 333 return ret; 334 335 return devm_add_action_or_reset(dev, __iio_backend_disable, back); 336 } 337 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, "IIO_BACKEND"); 338 339 /** 340 * iio_backend_data_format_set - Configure the channel data format 341 * @back: Backend device 342 * @chan: Channel number 343 * @data: Data format 344 * 345 * Properly configure a channel with respect to the expected data format. A 346 * @struct iio_backend_data_fmt must be passed with the settings. 347 * 348 * RETURNS: 349 * 0 on success, negative error number on failure. 350 */ 351 int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, 352 const struct iio_backend_data_fmt *data) 353 { 354 if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX) 355 return -EINVAL; 356 357 return iio_backend_op_call(back, data_format_set, chan, data); 358 } 359 EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, "IIO_BACKEND"); 360 361 /** 362 * iio_backend_data_source_set - Select data source 363 * @back: Backend device 364 * @chan: Channel number 365 * @data: Data source 366 * 367 * A given backend may have different sources to stream/sync data. This allows 368 * to choose that source. 369 * 370 * RETURNS: 371 * 0 on success, negative error number on failure. 372 */ 373 int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, 374 enum iio_backend_data_source data) 375 { 376 if (data >= IIO_BACKEND_DATA_SOURCE_MAX) 377 return -EINVAL; 378 379 return iio_backend_op_call(back, data_source_set, chan, data); 380 } 381 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, "IIO_BACKEND"); 382 383 /** 384 * iio_backend_data_source_get - Get current data source 385 * @back: Backend device 386 * @chan: Channel number 387 * @data: Pointer to receive the current source value 388 * 389 * A given backend may have different sources to stream/sync data. This allows 390 * to know what source is in use. 391 * 392 * RETURNS: 393 * 0 on success, negative error number on failure. 394 */ 395 int iio_backend_data_source_get(struct iio_backend *back, unsigned int chan, 396 enum iio_backend_data_source *data) 397 { 398 int ret; 399 400 ret = iio_backend_op_call(back, data_source_get, chan, data); 401 if (ret) 402 return ret; 403 404 if (*data >= IIO_BACKEND_DATA_SOURCE_MAX) 405 return -EINVAL; 406 407 return 0; 408 } 409 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_get, "IIO_BACKEND"); 410 411 /** 412 * iio_backend_set_sampling_freq - Set channel sampling rate 413 * @back: Backend device 414 * @chan: Channel number 415 * @sample_rate_hz: Sample rate 416 * 417 * RETURNS: 418 * 0 on success, negative error number on failure. 419 */ 420 int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, 421 u64 sample_rate_hz) 422 { 423 return iio_backend_op_call(back, set_sample_rate, chan, sample_rate_hz); 424 } 425 EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, "IIO_BACKEND"); 426 427 /** 428 * iio_backend_test_pattern_set - Configure a test pattern 429 * @back: Backend device 430 * @chan: Channel number 431 * @pattern: Test pattern 432 * 433 * Configure a test pattern on the backend. This is typically used for 434 * calibrating the timings on the data digital interface. 435 * 436 * RETURNS: 437 * 0 on success, negative error number on failure. 438 */ 439 int iio_backend_test_pattern_set(struct iio_backend *back, 440 unsigned int chan, 441 enum iio_backend_test_pattern pattern) 442 { 443 if (pattern >= IIO_BACKEND_TEST_PATTERN_MAX) 444 return -EINVAL; 445 446 return iio_backend_op_call(back, test_pattern_set, chan, pattern); 447 } 448 EXPORT_SYMBOL_NS_GPL(iio_backend_test_pattern_set, "IIO_BACKEND"); 449 450 /** 451 * iio_backend_chan_status - Get the channel status 452 * @back: Backend device 453 * @chan: Channel number 454 * @error: Error indication 455 * 456 * Get the current state of the backend channel. Typically used to check if 457 * there were any errors sending/receiving data. 458 * 459 * RETURNS: 460 * 0 on success, negative error number on failure. 461 */ 462 int iio_backend_chan_status(struct iio_backend *back, unsigned int chan, 463 bool *error) 464 { 465 return iio_backend_op_call(back, chan_status, chan, error); 466 } 467 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_status, "IIO_BACKEND"); 468 469 /** 470 * iio_backend_iodelay_set - Set digital I/O delay 471 * @back: Backend device 472 * @lane: Lane number 473 * @taps: Number of taps 474 * 475 * Controls delays on sending/receiving data. One usecase for this is to 476 * calibrate the data digital interface so we get the best results when 477 * transferring data. Note that @taps has no unit since the actual delay per tap 478 * is very backend specific. Hence, frontend devices typically should go through 479 * an array of @taps (the size of that array should typically match the size of 480 * calibration points on the frontend device) and call this API. 481 * 482 * RETURNS: 483 * 0 on success, negative error number on failure. 484 */ 485 int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane, 486 unsigned int taps) 487 { 488 return iio_backend_op_call(back, iodelay_set, lane, taps); 489 } 490 EXPORT_SYMBOL_NS_GPL(iio_backend_iodelay_set, "IIO_BACKEND"); 491 492 /** 493 * iio_backend_data_sample_trigger - Control when to sample data 494 * @back: Backend device 495 * @trigger: Data trigger 496 * 497 * Mostly useful for input backends. Configures the backend for when to sample 498 * data (eg: rising vs falling edge). 499 * 500 * RETURNS: 501 * 0 on success, negative error number on failure. 502 */ 503 int iio_backend_data_sample_trigger(struct iio_backend *back, 504 enum iio_backend_sample_trigger trigger) 505 { 506 if (trigger >= IIO_BACKEND_SAMPLE_TRIGGER_MAX) 507 return -EINVAL; 508 509 return iio_backend_op_call(back, data_sample_trigger, trigger); 510 } 511 EXPORT_SYMBOL_NS_GPL(iio_backend_data_sample_trigger, "IIO_BACKEND"); 512 513 static void iio_backend_free_buffer(void *arg) 514 { 515 struct iio_backend_buffer_pair *pair = arg; 516 517 iio_backend_void_op_call(pair->back, free_buffer, pair->buffer); 518 } 519 520 /** 521 * devm_iio_backend_request_buffer - Device managed buffer request 522 * @dev: Consumer device for the backend 523 * @back: Backend device 524 * @indio_dev: IIO device 525 * 526 * Request an IIO buffer from the backend. The type of the buffer (typically 527 * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because, 528 * normally, the backend dictates what kind of buffering we can get. 529 * 530 * The backend .free_buffer() hooks is automatically called on @dev detach. 531 * 532 * RETURNS: 533 * 0 on success, negative error number on failure. 534 */ 535 int devm_iio_backend_request_buffer(struct device *dev, 536 struct iio_backend *back, 537 struct iio_dev *indio_dev) 538 { 539 struct iio_backend_buffer_pair *pair; 540 struct iio_buffer *buffer; 541 542 pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL); 543 if (!pair) 544 return -ENOMEM; 545 546 buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev); 547 if (IS_ERR(buffer)) 548 return PTR_ERR(buffer); 549 550 /* weak reference should be all what we need */ 551 pair->back = back; 552 pair->buffer = buffer; 553 554 return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair); 555 } 556 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, "IIO_BACKEND"); 557 558 /** 559 * iio_backend_read_raw - Read a channel attribute from a backend device. 560 * @back: Backend device 561 * @chan: IIO channel reference 562 * @val: First returned value 563 * @val2: Second returned value 564 * @mask: Specify the attribute to return 565 * 566 * RETURNS: 567 * 0 on success, negative error number on failure. 568 */ 569 int iio_backend_read_raw(struct iio_backend *back, 570 struct iio_chan_spec const *chan, int *val, int *val2, 571 long mask) 572 { 573 return iio_backend_op_call(back, read_raw, chan, val, val2, mask); 574 } 575 EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, "IIO_BACKEND"); 576 577 static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev) 578 { 579 struct iio_backend *back = ERR_PTR(-ENODEV), *iter; 580 581 /* 582 * We deliberately go through all backends even after finding a match. 583 * The reason is that we want to catch frontend devices which have more 584 * than one backend in which case returning the first we find is bogus. 585 * For those cases, frontends need to explicitly define 586 * get_iio_backend() in struct iio_info. 587 */ 588 guard(mutex)(&iio_back_lock); 589 list_for_each_entry(iter, &iio_back_list, entry) { 590 if (dev == iter->frontend_dev) { 591 if (!IS_ERR(back)) { 592 dev_warn(dev, 593 "Multiple backends! get_iio_backend() needs to be implemented"); 594 return ERR_PTR(-ENODEV); 595 } 596 597 back = iter; 598 } 599 } 600 601 return back; 602 } 603 604 /** 605 * iio_backend_ext_info_get - IIO ext_info read callback 606 * @indio_dev: IIO device 607 * @private: Data private to the driver 608 * @chan: IIO channel 609 * @buf: Buffer where to place the attribute data 610 * 611 * This helper is intended to be used by backends that extend an IIO channel 612 * (through iio_backend_extend_chan_spec()) with extended info. In that case, 613 * backends are not supposed to give their own callbacks (as they would not have 614 * a way to get the backend from indio_dev). This is the getter. 615 * 616 * RETURNS: 617 * Number of bytes written to buf, negative error number on failure. 618 */ 619 ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private, 620 const struct iio_chan_spec *chan, char *buf) 621 { 622 struct iio_backend *back; 623 624 /* 625 * The below should work for the majority of the cases. It will not work 626 * when one frontend has multiple backends in which case we'll need a 627 * new callback in struct iio_info so we can directly request the proper 628 * backend from the frontend. Anyways, let's only introduce new options 629 * when really needed... 630 */ 631 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 632 if (IS_ERR(back)) 633 return PTR_ERR(back); 634 635 return iio_backend_op_call(back, ext_info_get, private, chan, buf); 636 } 637 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, "IIO_BACKEND"); 638 639 /** 640 * iio_backend_ext_info_set - IIO ext_info write callback 641 * @indio_dev: IIO device 642 * @private: Data private to the driver 643 * @chan: IIO channel 644 * @buf: Buffer holding the sysfs attribute 645 * @len: Buffer length 646 * 647 * This helper is intended to be used by backends that extend an IIO channel 648 * (trough iio_backend_extend_chan_spec()) with extended info. In that case, 649 * backends are not supposed to give their own callbacks (as they would not have 650 * a way to get the backend from indio_dev). This is the setter. 651 * 652 * RETURNS: 653 * Buffer length on success, negative error number on failure. 654 */ 655 ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, 656 const struct iio_chan_spec *chan, 657 const char *buf, size_t len) 658 { 659 struct iio_backend *back; 660 661 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 662 if (IS_ERR(back)) 663 return PTR_ERR(back); 664 665 return iio_backend_op_call(back, ext_info_set, private, chan, buf, len); 666 } 667 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, "IIO_BACKEND"); 668 669 /** 670 * iio_backend_interface_type_get - get the interface type used. 671 * @back: Backend device 672 * @type: Interface type 673 * 674 * RETURNS: 675 * 0 on success, negative error number on failure. 676 */ 677 int iio_backend_interface_type_get(struct iio_backend *back, 678 enum iio_backend_interface_type *type) 679 { 680 int ret; 681 682 ret = iio_backend_op_call(back, interface_type_get, type); 683 if (ret) 684 return ret; 685 686 if (*type >= IIO_BACKEND_INTERFACE_MAX) 687 return -EINVAL; 688 689 return 0; 690 } 691 EXPORT_SYMBOL_NS_GPL(iio_backend_interface_type_get, "IIO_BACKEND"); 692 693 /** 694 * iio_backend_data_size_set - set the data width/size in the data bus. 695 * @back: Backend device 696 * @size: Size in bits 697 * 698 * Some frontend devices can dynamically control the word/data size on the 699 * interface/data bus. Hence, the backend device needs to be aware of it so 700 * data can be correctly transferred. 701 * 702 * Return: 703 * 0 on success, negative error number on failure. 704 */ 705 int iio_backend_data_size_set(struct iio_backend *back, unsigned int size) 706 { 707 if (!size) 708 return -EINVAL; 709 710 return iio_backend_op_call(back, data_size_set, size); 711 } 712 EXPORT_SYMBOL_NS_GPL(iio_backend_data_size_set, "IIO_BACKEND"); 713 714 /** 715 * iio_backend_oversampling_ratio_set - set the oversampling ratio 716 * @back: Backend device 717 * @ratio: The oversampling ratio - value 1 corresponds to no oversampling. 718 * 719 * Return: 720 * 0 on success, negative error number on failure. 721 */ 722 int iio_backend_oversampling_ratio_set(struct iio_backend *back, 723 unsigned int ratio) 724 { 725 return iio_backend_op_call(back, oversampling_ratio_set, ratio); 726 } 727 EXPORT_SYMBOL_NS_GPL(iio_backend_oversampling_ratio_set, "IIO_BACKEND"); 728 729 /** 730 * iio_backend_extend_chan_spec - Extend an IIO channel 731 * @back: Backend device 732 * @chan: IIO channel 733 * 734 * Some backends may have their own functionalities and hence capable of 735 * extending a frontend's channel. 736 * 737 * RETURNS: 738 * 0 on success, negative error number on failure. 739 */ 740 int iio_backend_extend_chan_spec(struct iio_backend *back, 741 struct iio_chan_spec *chan) 742 { 743 const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info; 744 const struct iio_chan_spec_ext_info *back_ext_info; 745 int ret; 746 747 ret = iio_backend_op_call(back, extend_chan_spec, chan); 748 if (ret) 749 return ret; 750 /* 751 * Let's keep things simple for now. Don't allow to overwrite the 752 * frontend's extended info. If ever needed, we can support appending 753 * it. 754 */ 755 if (frontend_ext_info && chan->ext_info != frontend_ext_info) 756 return -EOPNOTSUPP; 757 if (!chan->ext_info) 758 return 0; 759 760 /* Don't allow backends to get creative and force their own handlers */ 761 for (back_ext_info = chan->ext_info; back_ext_info->name; back_ext_info++) { 762 if (back_ext_info->read != iio_backend_ext_info_get) 763 return -EINVAL; 764 if (back_ext_info->write != iio_backend_ext_info_set) 765 return -EINVAL; 766 } 767 768 return 0; 769 } 770 EXPORT_SYMBOL_NS_GPL(iio_backend_extend_chan_spec, "IIO_BACKEND"); 771 772 static void iio_backend_release(void *arg) 773 { 774 struct iio_backend *back = arg; 775 776 module_put(back->owner); 777 } 778 779 static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) 780 { 781 struct device_link *link; 782 int ret; 783 784 /* 785 * Make sure the provider cannot be unloaded before the consumer module. 786 * Note that device_links would still guarantee that nothing is 787 * accessible (and breaks) but this makes it explicit that the consumer 788 * module must be also unloaded. 789 */ 790 if (!try_module_get(back->owner)) 791 return dev_err_probe(dev, -ENODEV, 792 "Cannot get module reference\n"); 793 794 ret = devm_add_action_or_reset(dev, iio_backend_release, back); 795 if (ret) 796 return ret; 797 798 link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 799 if (!link) 800 return dev_err_probe(dev, -EINVAL, 801 "Could not link to supplier(%s)\n", 802 dev_name(back->dev)); 803 804 back->frontend_dev = dev; 805 806 dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev)); 807 808 return 0; 809 } 810 811 /** 812 * iio_backend_ddr_enable - Enable interface DDR (Double Data Rate) mode 813 * @back: Backend device 814 * 815 * Enable DDR, data is generated by the IP at each front (raising and falling) 816 * of the bus clock signal. 817 * 818 * RETURNS: 819 * 0 on success, negative error number on failure. 820 */ 821 int iio_backend_ddr_enable(struct iio_backend *back) 822 { 823 return iio_backend_op_call(back, ddr_enable); 824 } 825 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_enable, "IIO_BACKEND"); 826 827 /** 828 * iio_backend_ddr_disable - Disable interface DDR (Double Data Rate) mode 829 * @back: Backend device 830 * 831 * Disable DDR, setting into SDR mode (Single Data Rate). 832 * 833 * RETURNS: 834 * 0 on success, negative error number on failure. 835 */ 836 int iio_backend_ddr_disable(struct iio_backend *back) 837 { 838 return iio_backend_op_call(back, ddr_disable); 839 } 840 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_disable, "IIO_BACKEND"); 841 842 /** 843 * iio_backend_data_stream_enable - Enable data stream 844 * @back: Backend device 845 * 846 * Enable data stream over the bus interface. 847 * 848 * RETURNS: 849 * 0 on success, negative error number on failure. 850 */ 851 int iio_backend_data_stream_enable(struct iio_backend *back) 852 { 853 return iio_backend_op_call(back, data_stream_enable); 854 } 855 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_enable, "IIO_BACKEND"); 856 857 /** 858 * iio_backend_data_stream_disable - Disable data stream 859 * @back: Backend device 860 * 861 * Disable data stream over the bus interface. 862 * 863 * RETURNS: 864 * 0 on success, negative error number on failure. 865 */ 866 int iio_backend_data_stream_disable(struct iio_backend *back) 867 { 868 return iio_backend_op_call(back, data_stream_disable); 869 } 870 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_disable, "IIO_BACKEND"); 871 872 /** 873 * iio_backend_data_transfer_addr - Set data address. 874 * @back: Backend device 875 * @address: Data register address 876 * 877 * Some devices may need to inform the backend about an address 878 * where to read or write the data. 879 * 880 * RETURNS: 881 * 0 on success, negative error number on failure. 882 */ 883 int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address) 884 { 885 return iio_backend_op_call(back, data_transfer_addr, address); 886 } 887 EXPORT_SYMBOL_NS_GPL(iio_backend_data_transfer_addr, "IIO_BACKEND"); 888 889 static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name, 890 struct fwnode_handle *fwnode) 891 { 892 struct fwnode_handle *fwnode_back; 893 struct iio_backend *back; 894 unsigned int index; 895 int ret; 896 897 if (name) { 898 ret = device_property_match_string(dev, "io-backend-names", 899 name); 900 if (ret < 0) 901 return ERR_PTR(ret); 902 index = ret; 903 } else { 904 index = 0; 905 } 906 907 fwnode_back = fwnode_find_reference(fwnode, "io-backends", index); 908 if (IS_ERR(fwnode_back)) 909 return dev_err_cast_probe(dev, fwnode_back, 910 "Cannot get Firmware reference\n"); 911 912 guard(mutex)(&iio_back_lock); 913 list_for_each_entry(back, &iio_back_list, entry) { 914 if (!device_match_fwnode(back->dev, fwnode_back)) 915 continue; 916 917 fwnode_handle_put(fwnode_back); 918 ret = __devm_iio_backend_get(dev, back); 919 if (ret) 920 return ERR_PTR(ret); 921 922 if (name) 923 back->idx = index; 924 925 return back; 926 } 927 928 fwnode_handle_put(fwnode_back); 929 return ERR_PTR(-EPROBE_DEFER); 930 } 931 932 /** 933 * devm_iio_backend_get - Device managed backend device get 934 * @dev: Consumer device for the backend 935 * @name: Backend name 936 * 937 * Get's the backend associated with @dev. 938 * 939 * RETURNS: 940 * A backend pointer, negative error pointer otherwise. 941 */ 942 struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) 943 { 944 return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev)); 945 } 946 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, "IIO_BACKEND"); 947 948 /** 949 * devm_iio_backend_fwnode_get - Device managed backend firmware node get 950 * @dev: Consumer device for the backend 951 * @name: Backend name 952 * @fwnode: Firmware node of the backend consumer 953 * 954 * Get's the backend associated with a firmware node. 955 * 956 * RETURNS: 957 * A backend pointer, negative error pointer otherwise. 958 */ 959 struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev, 960 const char *name, 961 struct fwnode_handle *fwnode) 962 { 963 return __devm_iio_backend_fwnode_get(dev, name, fwnode); 964 } 965 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, "IIO_BACKEND"); 966 967 /** 968 * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get 969 * @dev: Consumer device for the backend 970 * @fwnode: Firmware node of the backend device 971 * 972 * Search the backend list for a device matching @fwnode. 973 * This API should not be used and it's only present for preventing the first 974 * user of this framework to break it's DT ABI. 975 * 976 * RETURNS: 977 * A backend pointer, negative error pointer otherwise. 978 */ 979 struct iio_backend * 980 __devm_iio_backend_get_from_fwnode_lookup(struct device *dev, 981 struct fwnode_handle *fwnode) 982 { 983 struct iio_backend *back; 984 int ret; 985 986 guard(mutex)(&iio_back_lock); 987 list_for_each_entry(back, &iio_back_list, entry) { 988 if (!device_match_fwnode(back->dev, fwnode)) 989 continue; 990 991 ret = __devm_iio_backend_get(dev, back); 992 if (ret) 993 return ERR_PTR(ret); 994 995 return back; 996 } 997 998 return ERR_PTR(-EPROBE_DEFER); 999 } 1000 EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, "IIO_BACKEND"); 1001 1002 /** 1003 * iio_backend_get_priv - Get driver private data 1004 * @back: Backend device 1005 */ 1006 void *iio_backend_get_priv(const struct iio_backend *back) 1007 { 1008 return back->priv; 1009 } 1010 EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, "IIO_BACKEND"); 1011 1012 static void iio_backend_unregister(void *arg) 1013 { 1014 struct iio_backend *back = arg; 1015 1016 guard(mutex)(&iio_back_lock); 1017 list_del(&back->entry); 1018 } 1019 1020 /** 1021 * devm_iio_backend_register - Device managed backend device register 1022 * @dev: Backend device being registered 1023 * @info: Backend info 1024 * @priv: Device private data 1025 * 1026 * @info is mandatory. Not providing it results in -EINVAL. 1027 * 1028 * RETURNS: 1029 * 0 on success, negative error number on failure. 1030 */ 1031 int devm_iio_backend_register(struct device *dev, 1032 const struct iio_backend_info *info, void *priv) 1033 { 1034 struct iio_backend *back; 1035 1036 if (!info || !info->ops) 1037 return dev_err_probe(dev, -EINVAL, "No backend ops given\n"); 1038 1039 /* 1040 * Through device_links, we guarantee that a frontend device cannot be 1041 * bound/exist if the backend driver is not around. Hence, we can bind 1042 * the backend object lifetime with the device being passed since 1043 * removing it will tear the frontend/consumer down. 1044 */ 1045 back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL); 1046 if (!back) 1047 return -ENOMEM; 1048 1049 back->ops = info->ops; 1050 back->name = info->name; 1051 back->owner = dev->driver->owner; 1052 back->dev = dev; 1053 back->priv = priv; 1054 scoped_guard(mutex, &iio_back_lock) 1055 list_add(&back->entry, &iio_back_list); 1056 1057 return devm_add_action_or_reset(dev, iio_backend_unregister, back); 1058 } 1059 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, "IIO_BACKEND"); 1060 1061 MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>"); 1062 MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices"); 1063 MODULE_LICENSE("GPL"); 1064