1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework to handle complex IIO aggregate devices. 4 * 5 * The typical architecture is to have one device as the frontend device which 6 * can be "linked" against one or multiple backend devices. All the IIO and 7 * userspace interface is expected to be registers/managed by the frontend 8 * device which will callback into the backends when needed (to get/set some 9 * configuration that it does not directly control). 10 * 11 * ------------------------------------------------------- 12 * ------------------ | ------------ ------------ ------- FPGA| 13 * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | | 14 * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | | 15 * | |------------------------| ------------ ------------ ------- | 16 * ------------------ ------------------------------------------------------- 17 * 18 * The framework interface is pretty simple: 19 * - Backends should register themselves with devm_iio_backend_register() 20 * - Frontend devices should get backends with devm_iio_backend_get() 21 * 22 * Also to note that the primary target for this framework are converters like 23 * ADC/DACs so iio_backend_ops will have some operations typical of converter 24 * devices. On top of that, this is "generic" for all IIO which means any kind 25 * of device can make use of the framework. That said, If the iio_backend_ops 26 * struct begins to grow out of control, we can always refactor things so that 27 * the industrialio-backend.c is only left with the really generic stuff. Then, 28 * we can build on top of it depending on the needs. 29 * 30 * Copyright (C) 2023-2024 Analog Devices Inc. 31 */ 32 #define dev_fmt(fmt) "iio-backend: " fmt 33 34 #include <linux/cleanup.h> 35 #include <linux/debugfs.h> 36 #include <linux/device.h> 37 #include <linux/err.h> 38 #include <linux/errno.h> 39 #include <linux/list.h> 40 #include <linux/module.h> 41 #include <linux/mutex.h> 42 #include <linux/property.h> 43 #include <linux/slab.h> 44 #include <linux/stringify.h> 45 #include <linux/types.h> 46 47 #include <linux/iio/backend.h> 48 #include <linux/iio/iio.h> 49 50 struct iio_backend { 51 struct list_head entry; 52 const struct iio_backend_ops *ops; 53 struct device *frontend_dev; 54 struct device *dev; 55 struct module *owner; 56 void *priv; 57 const char *name; 58 unsigned int cached_reg_addr; 59 /* 60 * This index is relative to the frontend. Meaning that for 61 * frontends with multiple backends, this will be the index of this 62 * backend. Used for the debugfs directory name. 63 */ 64 u8 idx; 65 }; 66 67 /* 68 * Helper struct for requesting buffers. This ensures that we have all data 69 * that we need to free the buffer in a device managed action. 70 */ 71 struct iio_backend_buffer_pair { 72 struct iio_backend *back; 73 struct iio_buffer *buffer; 74 }; 75 76 static LIST_HEAD(iio_back_list); 77 static DEFINE_MUTEX(iio_back_lock); 78 79 /* 80 * Helper macros to call backend ops. Makes sure the option is supported. 81 */ 82 #define iio_backend_check_op(back, op) ({ \ 83 struct iio_backend *____back = back; \ 84 int ____ret = 0; \ 85 \ 86 if (!____back->ops->op) \ 87 ____ret = -EOPNOTSUPP; \ 88 \ 89 ____ret; \ 90 }) 91 92 #define iio_backend_op_call(back, op, args...) ({ \ 93 struct iio_backend *__back = back; \ 94 int __ret; \ 95 \ 96 __ret = iio_backend_check_op(__back, op); \ 97 if (!__ret) \ 98 __ret = __back->ops->op(__back, ##args); \ 99 \ 100 __ret; \ 101 }) 102 103 #define iio_backend_ptr_op_call(back, op, args...) ({ \ 104 struct iio_backend *__back = back; \ 105 void *ptr_err; \ 106 int __ret; \ 107 \ 108 __ret = iio_backend_check_op(__back, op); \ 109 if (__ret) \ 110 ptr_err = ERR_PTR(__ret); \ 111 else \ 112 ptr_err = __back->ops->op(__back, ##args); \ 113 \ 114 ptr_err; \ 115 }) 116 117 #define iio_backend_void_op_call(back, op, args...) { \ 118 struct iio_backend *__back = back; \ 119 int __ret; \ 120 \ 121 __ret = iio_backend_check_op(__back, op); \ 122 if (!__ret) \ 123 __back->ops->op(__back, ##args); \ 124 else \ 125 dev_dbg(__back->dev, "Op(%s) not implemented\n",\ 126 __stringify(op)); \ 127 } 128 129 static ssize_t iio_backend_debugfs_read_reg(struct file *file, 130 char __user *userbuf, 131 size_t count, loff_t *ppos) 132 { 133 struct iio_backend *back = file->private_data; 134 char read_buf[20]; 135 unsigned int val; 136 int ret, len; 137 138 ret = iio_backend_op_call(back, debugfs_reg_access, 139 back->cached_reg_addr, 0, &val); 140 if (ret) 141 return ret; 142 143 len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val); 144 145 return simple_read_from_buffer(userbuf, count, ppos, read_buf, len); 146 } 147 148 static ssize_t iio_backend_debugfs_write_reg(struct file *file, 149 const char __user *userbuf, 150 size_t count, loff_t *ppos) 151 { 152 struct iio_backend *back = file->private_data; 153 unsigned int val; 154 char buf[80]; 155 ssize_t rc; 156 int ret; 157 158 rc = simple_write_to_buffer(buf, sizeof(buf), ppos, userbuf, count); 159 if (rc < 0) 160 return rc; 161 162 ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val); 163 164 switch (ret) { 165 case 1: 166 return count; 167 case 2: 168 ret = iio_backend_op_call(back, debugfs_reg_access, 169 back->cached_reg_addr, val, NULL); 170 if (ret) 171 return ret; 172 return count; 173 default: 174 return -EINVAL; 175 } 176 } 177 178 static const struct file_operations iio_backend_debugfs_reg_fops = { 179 .open = simple_open, 180 .read = iio_backend_debugfs_read_reg, 181 .write = iio_backend_debugfs_write_reg, 182 }; 183 184 static ssize_t iio_backend_debugfs_read_name(struct file *file, 185 char __user *userbuf, 186 size_t count, loff_t *ppos) 187 { 188 struct iio_backend *back = file->private_data; 189 char name[128]; 190 int len; 191 192 len = scnprintf(name, sizeof(name), "%s\n", back->name); 193 194 return simple_read_from_buffer(userbuf, count, ppos, name, len); 195 } 196 197 static const struct file_operations iio_backend_debugfs_name_fops = { 198 .open = simple_open, 199 .read = iio_backend_debugfs_read_name, 200 }; 201 202 /** 203 * iio_backend_debugfs_add - Add debugfs interfaces for Backends 204 * @back: Backend device 205 * @indio_dev: IIO device 206 */ 207 void iio_backend_debugfs_add(struct iio_backend *back, 208 struct iio_dev *indio_dev) 209 { 210 struct dentry *d = iio_get_debugfs_dentry(indio_dev); 211 struct dentry *back_d; 212 char name[128]; 213 214 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d) 215 return; 216 if (!back->ops->debugfs_reg_access && !back->name) 217 return; 218 219 snprintf(name, sizeof(name), "backend%d", back->idx); 220 221 back_d = debugfs_create_dir(name, d); 222 if (IS_ERR(back_d)) 223 return; 224 225 if (back->ops->debugfs_reg_access) 226 debugfs_create_file("direct_reg_access", 0600, back_d, back, 227 &iio_backend_debugfs_reg_fops); 228 229 if (back->name) 230 debugfs_create_file("name", 0400, back_d, back, 231 &iio_backend_debugfs_name_fops); 232 } 233 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, IIO_BACKEND); 234 235 /** 236 * iio_backend_debugfs_print_chan_status - Print channel status 237 * @back: Backend device 238 * @chan: Channel number 239 * @buf: Buffer where to print the status 240 * @len: Available space 241 * 242 * One usecase where this is useful is for testing test tones in a digital 243 * interface and "ask" the backend to dump more details on why a test tone might 244 * have errors. 245 * 246 * RETURNS: 247 * Number of copied bytes on success, negative error code on failure. 248 */ 249 ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back, 250 unsigned int chan, char *buf, 251 size_t len) 252 { 253 if (!IS_ENABLED(CONFIG_DEBUG_FS)) 254 return -ENODEV; 255 256 return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf, 257 len); 258 } 259 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, IIO_BACKEND); 260 261 /** 262 * iio_backend_chan_enable - Enable a backend channel 263 * @back: Backend device 264 * @chan: Channel number 265 * 266 * RETURNS: 267 * 0 on success, negative error number on failure. 268 */ 269 int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan) 270 { 271 return iio_backend_op_call(back, chan_enable, chan); 272 } 273 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND); 274 275 /** 276 * iio_backend_chan_disable - Disable a backend channel 277 * @back: Backend device 278 * @chan: Channel number 279 * 280 * RETURNS: 281 * 0 on success, negative error number on failure. 282 */ 283 int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan) 284 { 285 return iio_backend_op_call(back, chan_disable, chan); 286 } 287 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, IIO_BACKEND); 288 289 static void __iio_backend_disable(void *back) 290 { 291 iio_backend_void_op_call(back, disable); 292 } 293 294 /** 295 * iio_backend_disable - Backend disable 296 * @back: Backend device 297 */ 298 void iio_backend_disable(struct iio_backend *back) 299 { 300 __iio_backend_disable(back); 301 } 302 EXPORT_SYMBOL_NS_GPL(iio_backend_disable, IIO_BACKEND); 303 304 /** 305 * iio_backend_enable - Backend enable 306 * @back: Backend device 307 * 308 * RETURNS: 309 * 0 on success, negative error number on failure. 310 */ 311 int iio_backend_enable(struct iio_backend *back) 312 { 313 return iio_backend_op_call(back, enable); 314 } 315 EXPORT_SYMBOL_NS_GPL(iio_backend_enable, IIO_BACKEND); 316 317 /** 318 * devm_iio_backend_enable - Device managed backend enable 319 * @dev: Consumer device for the backend 320 * @back: Backend device 321 * 322 * RETURNS: 323 * 0 on success, negative error number on failure. 324 */ 325 int devm_iio_backend_enable(struct device *dev, struct iio_backend *back) 326 { 327 int ret; 328 329 ret = iio_backend_enable(back); 330 if (ret) 331 return ret; 332 333 return devm_add_action_or_reset(dev, __iio_backend_disable, back); 334 } 335 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND); 336 337 /** 338 * iio_backend_data_format_set - Configure the channel data format 339 * @back: Backend device 340 * @chan: Channel number 341 * @data: Data format 342 * 343 * Properly configure a channel with respect to the expected data format. A 344 * @struct iio_backend_data_fmt must be passed with the settings. 345 * 346 * RETURNS: 347 * 0 on success, negative error number on failure. 348 */ 349 int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, 350 const struct iio_backend_data_fmt *data) 351 { 352 if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX) 353 return -EINVAL; 354 355 return iio_backend_op_call(back, data_format_set, chan, data); 356 } 357 EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND); 358 359 /** 360 * iio_backend_data_source_set - Select data source 361 * @back: Backend device 362 * @chan: Channel number 363 * @data: Data source 364 * 365 * A given backend may have different sources to stream/sync data. This allows 366 * to choose that source. 367 * 368 * RETURNS: 369 * 0 on success, negative error number on failure. 370 */ 371 int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, 372 enum iio_backend_data_source data) 373 { 374 if (data >= IIO_BACKEND_DATA_SOURCE_MAX) 375 return -EINVAL; 376 377 return iio_backend_op_call(back, data_source_set, chan, data); 378 } 379 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, IIO_BACKEND); 380 381 /** 382 * iio_backend_set_sampling_freq - Set channel sampling rate 383 * @back: Backend device 384 * @chan: Channel number 385 * @sample_rate_hz: Sample rate 386 * 387 * RETURNS: 388 * 0 on success, negative error number on failure. 389 */ 390 int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, 391 u64 sample_rate_hz) 392 { 393 return iio_backend_op_call(back, set_sample_rate, chan, sample_rate_hz); 394 } 395 EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, IIO_BACKEND); 396 397 /** 398 * iio_backend_test_pattern_set - Configure a test pattern 399 * @back: Backend device 400 * @chan: Channel number 401 * @pattern: Test pattern 402 * 403 * Configure a test pattern on the backend. This is typically used for 404 * calibrating the timings on the data digital interface. 405 * 406 * RETURNS: 407 * 0 on success, negative error number on failure. 408 */ 409 int iio_backend_test_pattern_set(struct iio_backend *back, 410 unsigned int chan, 411 enum iio_backend_test_pattern pattern) 412 { 413 if (pattern >= IIO_BACKEND_TEST_PATTERN_MAX) 414 return -EINVAL; 415 416 return iio_backend_op_call(back, test_pattern_set, chan, pattern); 417 } 418 EXPORT_SYMBOL_NS_GPL(iio_backend_test_pattern_set, IIO_BACKEND); 419 420 /** 421 * iio_backend_chan_status - Get the channel status 422 * @back: Backend device 423 * @chan: Channel number 424 * @error: Error indication 425 * 426 * Get the current state of the backend channel. Typically used to check if 427 * there were any errors sending/receiving data. 428 * 429 * RETURNS: 430 * 0 on success, negative error number on failure. 431 */ 432 int iio_backend_chan_status(struct iio_backend *back, unsigned int chan, 433 bool *error) 434 { 435 return iio_backend_op_call(back, chan_status, chan, error); 436 } 437 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_status, IIO_BACKEND); 438 439 /** 440 * iio_backend_iodelay_set - Set digital I/O delay 441 * @back: Backend device 442 * @lane: Lane number 443 * @taps: Number of taps 444 * 445 * Controls delays on sending/receiving data. One usecase for this is to 446 * calibrate the data digital interface so we get the best results when 447 * transferring data. Note that @taps has no unit since the actual delay per tap 448 * is very backend specific. Hence, frontend devices typically should go through 449 * an array of @taps (the size of that array should typically match the size of 450 * calibration points on the frontend device) and call this API. 451 * 452 * RETURNS: 453 * 0 on success, negative error number on failure. 454 */ 455 int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane, 456 unsigned int taps) 457 { 458 return iio_backend_op_call(back, iodelay_set, lane, taps); 459 } 460 EXPORT_SYMBOL_NS_GPL(iio_backend_iodelay_set, IIO_BACKEND); 461 462 /** 463 * iio_backend_data_sample_trigger - Control when to sample data 464 * @back: Backend device 465 * @trigger: Data trigger 466 * 467 * Mostly useful for input backends. Configures the backend for when to sample 468 * data (eg: rising vs falling edge). 469 * 470 * RETURNS: 471 * 0 on success, negative error number on failure. 472 */ 473 int iio_backend_data_sample_trigger(struct iio_backend *back, 474 enum iio_backend_sample_trigger trigger) 475 { 476 if (trigger >= IIO_BACKEND_SAMPLE_TRIGGER_MAX) 477 return -EINVAL; 478 479 return iio_backend_op_call(back, data_sample_trigger, trigger); 480 } 481 EXPORT_SYMBOL_NS_GPL(iio_backend_data_sample_trigger, IIO_BACKEND); 482 483 static void iio_backend_free_buffer(void *arg) 484 { 485 struct iio_backend_buffer_pair *pair = arg; 486 487 iio_backend_void_op_call(pair->back, free_buffer, pair->buffer); 488 } 489 490 /** 491 * devm_iio_backend_request_buffer - Device managed buffer request 492 * @dev: Consumer device for the backend 493 * @back: Backend device 494 * @indio_dev: IIO device 495 * 496 * Request an IIO buffer from the backend. The type of the buffer (typically 497 * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because, 498 * normally, the backend dictates what kind of buffering we can get. 499 * 500 * The backend .free_buffer() hooks is automatically called on @dev detach. 501 * 502 * RETURNS: 503 * 0 on success, negative error number on failure. 504 */ 505 int devm_iio_backend_request_buffer(struct device *dev, 506 struct iio_backend *back, 507 struct iio_dev *indio_dev) 508 { 509 struct iio_backend_buffer_pair *pair; 510 struct iio_buffer *buffer; 511 512 pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL); 513 if (!pair) 514 return -ENOMEM; 515 516 buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev); 517 if (IS_ERR(buffer)) 518 return PTR_ERR(buffer); 519 520 /* weak reference should be all what we need */ 521 pair->back = back; 522 pair->buffer = buffer; 523 524 return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair); 525 } 526 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND); 527 528 /** 529 * iio_backend_read_raw - Read a channel attribute from a backend device. 530 * @back: Backend device 531 * @chan: IIO channel reference 532 * @val: First returned value 533 * @val2: Second returned value 534 * @mask: Specify the attribute to return 535 * 536 * RETURNS: 537 * 0 on success, negative error number on failure. 538 */ 539 int iio_backend_read_raw(struct iio_backend *back, 540 struct iio_chan_spec const *chan, int *val, int *val2, 541 long mask) 542 { 543 return iio_backend_op_call(back, read_raw, chan, val, val2, mask); 544 } 545 EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, IIO_BACKEND); 546 547 static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev) 548 { 549 struct iio_backend *back = ERR_PTR(-ENODEV), *iter; 550 551 /* 552 * We deliberately go through all backends even after finding a match. 553 * The reason is that we want to catch frontend devices which have more 554 * than one backend in which case returning the first we find is bogus. 555 * For those cases, frontends need to explicitly define 556 * get_iio_backend() in struct iio_info. 557 */ 558 guard(mutex)(&iio_back_lock); 559 list_for_each_entry(iter, &iio_back_list, entry) { 560 if (dev == iter->frontend_dev) { 561 if (!IS_ERR(back)) { 562 dev_warn(dev, 563 "Multiple backends! get_iio_backend() needs to be implemented"); 564 return ERR_PTR(-ENODEV); 565 } 566 567 back = iter; 568 } 569 } 570 571 return back; 572 } 573 574 /** 575 * iio_backend_ext_info_get - IIO ext_info read callback 576 * @indio_dev: IIO device 577 * @private: Data private to the driver 578 * @chan: IIO channel 579 * @buf: Buffer where to place the attribute data 580 * 581 * This helper is intended to be used by backends that extend an IIO channel 582 * (through iio_backend_extend_chan_spec()) with extended info. In that case, 583 * backends are not supposed to give their own callbacks (as they would not have 584 * a way to get the backend from indio_dev). This is the getter. 585 * 586 * RETURNS: 587 * Number of bytes written to buf, negative error number on failure. 588 */ 589 ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private, 590 const struct iio_chan_spec *chan, char *buf) 591 { 592 struct iio_backend *back; 593 594 /* 595 * The below should work for the majority of the cases. It will not work 596 * when one frontend has multiple backends in which case we'll need a 597 * new callback in struct iio_info so we can directly request the proper 598 * backend from the frontend. Anyways, let's only introduce new options 599 * when really needed... 600 */ 601 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 602 if (IS_ERR(back)) 603 return PTR_ERR(back); 604 605 return iio_backend_op_call(back, ext_info_get, private, chan, buf); 606 } 607 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, IIO_BACKEND); 608 609 /** 610 * iio_backend_ext_info_set - IIO ext_info write callback 611 * @indio_dev: IIO device 612 * @private: Data private to the driver 613 * @chan: IIO channel 614 * @buf: Buffer holding the sysfs attribute 615 * @len: Buffer length 616 * 617 * This helper is intended to be used by backends that extend an IIO channel 618 * (trough iio_backend_extend_chan_spec()) with extended info. In that case, 619 * backends are not supposed to give their own callbacks (as they would not have 620 * a way to get the backend from indio_dev). This is the setter. 621 * 622 * RETURNS: 623 * Buffer length on success, negative error number on failure. 624 */ 625 ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, 626 const struct iio_chan_spec *chan, 627 const char *buf, size_t len) 628 { 629 struct iio_backend *back; 630 631 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 632 if (IS_ERR(back)) 633 return PTR_ERR(back); 634 635 return iio_backend_op_call(back, ext_info_set, private, chan, buf, len); 636 } 637 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND); 638 639 /** 640 * iio_backend_extend_chan_spec - Extend an IIO channel 641 * @back: Backend device 642 * @chan: IIO channel 643 * 644 * Some backends may have their own functionalities and hence capable of 645 * extending a frontend's channel. 646 * 647 * RETURNS: 648 * 0 on success, negative error number on failure. 649 */ 650 int iio_backend_extend_chan_spec(struct iio_backend *back, 651 struct iio_chan_spec *chan) 652 { 653 const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info; 654 const struct iio_chan_spec_ext_info *back_ext_info; 655 int ret; 656 657 ret = iio_backend_op_call(back, extend_chan_spec, chan); 658 if (ret) 659 return ret; 660 /* 661 * Let's keep things simple for now. Don't allow to overwrite the 662 * frontend's extended info. If ever needed, we can support appending 663 * it. 664 */ 665 if (frontend_ext_info && chan->ext_info != frontend_ext_info) 666 return -EOPNOTSUPP; 667 if (!chan->ext_info) 668 return 0; 669 670 /* Don't allow backends to get creative and force their own handlers */ 671 for (back_ext_info = chan->ext_info; back_ext_info->name; back_ext_info++) { 672 if (back_ext_info->read != iio_backend_ext_info_get) 673 return -EINVAL; 674 if (back_ext_info->write != iio_backend_ext_info_set) 675 return -EINVAL; 676 } 677 678 return 0; 679 } 680 EXPORT_SYMBOL_NS_GPL(iio_backend_extend_chan_spec, IIO_BACKEND); 681 682 static void iio_backend_release(void *arg) 683 { 684 struct iio_backend *back = arg; 685 686 module_put(back->owner); 687 } 688 689 static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) 690 { 691 struct device_link *link; 692 int ret; 693 694 /* 695 * Make sure the provider cannot be unloaded before the consumer module. 696 * Note that device_links would still guarantee that nothing is 697 * accessible (and breaks) but this makes it explicit that the consumer 698 * module must be also unloaded. 699 */ 700 if (!try_module_get(back->owner)) 701 return dev_err_probe(dev, -ENODEV, 702 "Cannot get module reference\n"); 703 704 ret = devm_add_action_or_reset(dev, iio_backend_release, back); 705 if (ret) 706 return ret; 707 708 link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 709 if (!link) 710 return dev_err_probe(dev, -EINVAL, 711 "Could not link to supplier(%s)\n", 712 dev_name(back->dev)); 713 714 back->frontend_dev = dev; 715 716 dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev)); 717 718 return 0; 719 } 720 721 /** 722 * iio_backend_ddr_enable - Enable interface DDR (Double Data Rate) mode 723 * @back: Backend device 724 * 725 * Enable DDR, data is generated by the IP at each front (raising and falling) 726 * of the bus clock signal. 727 * 728 * RETURNS: 729 * 0 on success, negative error number on failure. 730 */ 731 int iio_backend_ddr_enable(struct iio_backend *back) 732 { 733 return iio_backend_op_call(back, ddr_enable); 734 } 735 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_enable, IIO_BACKEND); 736 737 /** 738 * iio_backend_ddr_disable - Disable interface DDR (Double Data Rate) mode 739 * @back: Backend device 740 * 741 * Disable DDR, setting into SDR mode (Single Data Rate). 742 * 743 * RETURNS: 744 * 0 on success, negative error number on failure. 745 */ 746 int iio_backend_ddr_disable(struct iio_backend *back) 747 { 748 return iio_backend_op_call(back, ddr_disable); 749 } 750 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_disable, IIO_BACKEND); 751 752 /** 753 * iio_backend_data_stream_enable - Enable data stream 754 * @back: Backend device 755 * 756 * Enable data stream over the bus interface. 757 * 758 * RETURNS: 759 * 0 on success, negative error number on failure. 760 */ 761 int iio_backend_data_stream_enable(struct iio_backend *back) 762 { 763 return iio_backend_op_call(back, data_stream_enable); 764 } 765 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_enable, IIO_BACKEND); 766 767 /** 768 * iio_backend_data_stream_disable - Disable data stream 769 * @back: Backend device 770 * 771 * Disable data stream over the bus interface. 772 * 773 * RETURNS: 774 * 0 on success, negative error number on failure. 775 */ 776 int iio_backend_data_stream_disable(struct iio_backend *back) 777 { 778 return iio_backend_op_call(back, data_stream_disable); 779 } 780 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_disable, IIO_BACKEND); 781 782 /** 783 * iio_backend_data_transfer_addr - Set data address. 784 * @back: Backend device 785 * @address: Data register address 786 * 787 * Some devices may need to inform the backend about an address 788 * where to read or write the data. 789 * 790 * RETURNS: 791 * 0 on success, negative error number on failure. 792 */ 793 int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address) 794 { 795 return iio_backend_op_call(back, data_transfer_addr, address); 796 } 797 EXPORT_SYMBOL_NS_GPL(iio_backend_data_transfer_addr, IIO_BACKEND); 798 799 static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name, 800 struct fwnode_handle *fwnode) 801 { 802 struct fwnode_handle *fwnode_back; 803 struct iio_backend *back; 804 unsigned int index; 805 int ret; 806 807 if (name) { 808 ret = device_property_match_string(dev, "io-backend-names", 809 name); 810 if (ret < 0) 811 return ERR_PTR(ret); 812 index = ret; 813 } else { 814 index = 0; 815 } 816 817 fwnode_back = fwnode_find_reference(fwnode, "io-backends", index); 818 if (IS_ERR(fwnode_back)) 819 return dev_err_cast_probe(dev, fwnode_back, 820 "Cannot get Firmware reference\n"); 821 822 guard(mutex)(&iio_back_lock); 823 list_for_each_entry(back, &iio_back_list, entry) { 824 if (!device_match_fwnode(back->dev, fwnode_back)) 825 continue; 826 827 fwnode_handle_put(fwnode_back); 828 ret = __devm_iio_backend_get(dev, back); 829 if (ret) 830 return ERR_PTR(ret); 831 832 if (name) 833 back->idx = index; 834 835 return back; 836 } 837 838 fwnode_handle_put(fwnode_back); 839 return ERR_PTR(-EPROBE_DEFER); 840 } 841 842 /** 843 * devm_iio_backend_get - Device managed backend device get 844 * @dev: Consumer device for the backend 845 * @name: Backend name 846 * 847 * Get's the backend associated with @dev. 848 * 849 * RETURNS: 850 * A backend pointer, negative error pointer otherwise. 851 */ 852 struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) 853 { 854 return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev)); 855 } 856 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND); 857 858 /** 859 * devm_iio_backend_fwnode_get - Device managed backend firmware node get 860 * @dev: Consumer device for the backend 861 * @name: Backend name 862 * @fwnode: Firmware node of the backend consumer 863 * 864 * Get's the backend associated with a firmware node. 865 * 866 * RETURNS: 867 * A backend pointer, negative error pointer otherwise. 868 */ 869 struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev, 870 const char *name, 871 struct fwnode_handle *fwnode) 872 { 873 return __devm_iio_backend_fwnode_get(dev, name, fwnode); 874 } 875 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, IIO_BACKEND); 876 877 /** 878 * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get 879 * @dev: Consumer device for the backend 880 * @fwnode: Firmware node of the backend device 881 * 882 * Search the backend list for a device matching @fwnode. 883 * This API should not be used and it's only present for preventing the first 884 * user of this framework to break it's DT ABI. 885 * 886 * RETURNS: 887 * A backend pointer, negative error pointer otherwise. 888 */ 889 struct iio_backend * 890 __devm_iio_backend_get_from_fwnode_lookup(struct device *dev, 891 struct fwnode_handle *fwnode) 892 { 893 struct iio_backend *back; 894 int ret; 895 896 guard(mutex)(&iio_back_lock); 897 list_for_each_entry(back, &iio_back_list, entry) { 898 if (!device_match_fwnode(back->dev, fwnode)) 899 continue; 900 901 ret = __devm_iio_backend_get(dev, back); 902 if (ret) 903 return ERR_PTR(ret); 904 905 return back; 906 } 907 908 return ERR_PTR(-EPROBE_DEFER); 909 } 910 EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND); 911 912 /** 913 * iio_backend_get_priv - Get driver private data 914 * @back: Backend device 915 */ 916 void *iio_backend_get_priv(const struct iio_backend *back) 917 { 918 return back->priv; 919 } 920 EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, IIO_BACKEND); 921 922 static void iio_backend_unregister(void *arg) 923 { 924 struct iio_backend *back = arg; 925 926 guard(mutex)(&iio_back_lock); 927 list_del(&back->entry); 928 } 929 930 /** 931 * devm_iio_backend_register - Device managed backend device register 932 * @dev: Backend device being registered 933 * @info: Backend info 934 * @priv: Device private data 935 * 936 * @info is mandatory. Not providing it results in -EINVAL. 937 * 938 * RETURNS: 939 * 0 on success, negative error number on failure. 940 */ 941 int devm_iio_backend_register(struct device *dev, 942 const struct iio_backend_info *info, void *priv) 943 { 944 struct iio_backend *back; 945 946 if (!info || !info->ops) 947 return dev_err_probe(dev, -EINVAL, "No backend ops given\n"); 948 949 /* 950 * Through device_links, we guarantee that a frontend device cannot be 951 * bound/exist if the backend driver is not around. Hence, we can bind 952 * the backend object lifetime with the device being passed since 953 * removing it will tear the frontend/consumer down. 954 */ 955 back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL); 956 if (!back) 957 return -ENOMEM; 958 959 back->ops = info->ops; 960 back->name = info->name; 961 back->owner = dev->driver->owner; 962 back->dev = dev; 963 back->priv = priv; 964 scoped_guard(mutex, &iio_back_lock) 965 list_add(&back->entry, &iio_back_list); 966 967 return devm_add_action_or_reset(dev, iio_backend_unregister, back); 968 } 969 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, IIO_BACKEND); 970 971 MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>"); 972 MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices"); 973 MODULE_LICENSE("GPL"); 974