1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework to handle complex IIO aggregate devices. 4 * 5 * The typical architecture is to have one device as the frontend device which 6 * can be "linked" against one or multiple backend devices. All the IIO and 7 * userspace interface is expected to be registers/managed by the frontend 8 * device which will callback into the backends when needed (to get/set some 9 * configuration that it does not directly control). 10 * 11 * ------------------------------------------------------- 12 * ------------------ | ------------ ------------ ------- FPGA| 13 * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | | 14 * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | | 15 * | |------------------------| ------------ ------------ ------- | 16 * ------------------ ------------------------------------------------------- 17 * 18 * The framework interface is pretty simple: 19 * - Backends should register themselves with devm_iio_backend_register() 20 * - Frontend devices should get backends with devm_iio_backend_get() 21 * 22 * Also to note that the primary target for this framework are converters like 23 * ADC/DACs so iio_backend_ops will have some operations typical of converter 24 * devices. On top of that, this is "generic" for all IIO which means any kind 25 * of device can make use of the framework. That said, If the iio_backend_ops 26 * struct begins to grow out of control, we can always refactor things so that 27 * the industrialio-backend.c is only left with the really generic stuff. Then, 28 * we can build on top of it depending on the needs. 29 * 30 * Copyright (C) 2023-2024 Analog Devices Inc. 31 */ 32 #define dev_fmt(fmt) "iio-backend: " fmt 33 34 #include <linux/cleanup.h> 35 #include <linux/device.h> 36 #include <linux/err.h> 37 #include <linux/errno.h> 38 #include <linux/list.h> 39 #include <linux/module.h> 40 #include <linux/mutex.h> 41 #include <linux/property.h> 42 #include <linux/slab.h> 43 #include <linux/types.h> 44 45 #include <linux/iio/backend.h> 46 #include <linux/iio/iio.h> 47 48 struct iio_backend { 49 struct list_head entry; 50 const struct iio_backend_ops *ops; 51 struct device *frontend_dev; 52 struct device *dev; 53 struct module *owner; 54 void *priv; 55 }; 56 57 /* 58 * Helper struct for requesting buffers. This ensures that we have all data 59 * that we need to free the buffer in a device managed action. 60 */ 61 struct iio_backend_buffer_pair { 62 struct iio_backend *back; 63 struct iio_buffer *buffer; 64 }; 65 66 static LIST_HEAD(iio_back_list); 67 static DEFINE_MUTEX(iio_back_lock); 68 69 /* 70 * Helper macros to call backend ops. Makes sure the option is supported. 71 */ 72 #define iio_backend_check_op(back, op) ({ \ 73 struct iio_backend *____back = back; \ 74 int ____ret = 0; \ 75 \ 76 if (!____back->ops->op) \ 77 ____ret = -EOPNOTSUPP; \ 78 \ 79 ____ret; \ 80 }) 81 82 #define iio_backend_op_call(back, op, args...) ({ \ 83 struct iio_backend *__back = back; \ 84 int __ret; \ 85 \ 86 __ret = iio_backend_check_op(__back, op); \ 87 if (!__ret) \ 88 __ret = __back->ops->op(__back, ##args); \ 89 \ 90 __ret; \ 91 }) 92 93 #define iio_backend_ptr_op_call(back, op, args...) ({ \ 94 struct iio_backend *__back = back; \ 95 void *ptr_err; \ 96 int __ret; \ 97 \ 98 __ret = iio_backend_check_op(__back, op); \ 99 if (__ret) \ 100 ptr_err = ERR_PTR(__ret); \ 101 else \ 102 ptr_err = __back->ops->op(__back, ##args); \ 103 \ 104 ptr_err; \ 105 }) 106 107 #define iio_backend_void_op_call(back, op, args...) { \ 108 struct iio_backend *__back = back; \ 109 int __ret; \ 110 \ 111 __ret = iio_backend_check_op(__back, op); \ 112 if (!__ret) \ 113 __back->ops->op(__back, ##args); \ 114 } 115 116 /** 117 * iio_backend_chan_enable - Enable a backend channel 118 * @back: Backend device 119 * @chan: Channel number 120 * 121 * RETURNS: 122 * 0 on success, negative error number on failure. 123 */ 124 int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan) 125 { 126 return iio_backend_op_call(back, chan_enable, chan); 127 } 128 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND); 129 130 /** 131 * iio_backend_chan_disable - Disable a backend channel 132 * @back: Backend device 133 * @chan: Channel number 134 * 135 * RETURNS: 136 * 0 on success, negative error number on failure. 137 */ 138 int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan) 139 { 140 return iio_backend_op_call(back, chan_disable, chan); 141 } 142 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, IIO_BACKEND); 143 144 static void __iio_backend_disable(void *back) 145 { 146 iio_backend_void_op_call(back, disable); 147 } 148 149 /** 150 * devm_iio_backend_enable - Device managed backend enable 151 * @dev: Consumer device for the backend 152 * @back: Backend device 153 * 154 * RETURNS: 155 * 0 on success, negative error number on failure. 156 */ 157 int devm_iio_backend_enable(struct device *dev, struct iio_backend *back) 158 { 159 int ret; 160 161 ret = iio_backend_op_call(back, enable); 162 if (ret) 163 return ret; 164 165 return devm_add_action_or_reset(dev, __iio_backend_disable, back); 166 } 167 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND); 168 169 /** 170 * iio_backend_data_format_set - Configure the channel data format 171 * @back: Backend device 172 * @chan: Channel number 173 * @data: Data format 174 * 175 * Properly configure a channel with respect to the expected data format. A 176 * @struct iio_backend_data_fmt must be passed with the settings. 177 * 178 * RETURNS: 179 * 0 on success, negative error number on failure. 180 */ 181 int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, 182 const struct iio_backend_data_fmt *data) 183 { 184 if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX) 185 return -EINVAL; 186 187 return iio_backend_op_call(back, data_format_set, chan, data); 188 } 189 EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND); 190 191 /** 192 * iio_backend_data_source_set - Select data source 193 * @back: Backend device 194 * @chan: Channel number 195 * @data: Data source 196 * 197 * A given backend may have different sources to stream/sync data. This allows 198 * to choose that source. 199 * 200 * RETURNS: 201 * 0 on success, negative error number on failure. 202 */ 203 int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan, 204 enum iio_backend_data_source data) 205 { 206 if (data >= IIO_BACKEND_DATA_SOURCE_MAX) 207 return -EINVAL; 208 209 return iio_backend_op_call(back, data_source_set, chan, data); 210 } 211 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, IIO_BACKEND); 212 213 /** 214 * iio_backend_set_sampling_freq - Set channel sampling rate 215 * @back: Backend device 216 * @chan: Channel number 217 * @sample_rate_hz: Sample rate 218 * 219 * RETURNS: 220 * 0 on success, negative error number on failure. 221 */ 222 int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan, 223 u64 sample_rate_hz) 224 { 225 return iio_backend_op_call(back, set_sample_rate, chan, sample_rate_hz); 226 } 227 EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, IIO_BACKEND); 228 229 static void iio_backend_free_buffer(void *arg) 230 { 231 struct iio_backend_buffer_pair *pair = arg; 232 233 iio_backend_void_op_call(pair->back, free_buffer, pair->buffer); 234 } 235 236 /** 237 * devm_iio_backend_request_buffer - Device managed buffer request 238 * @dev: Consumer device for the backend 239 * @back: Backend device 240 * @indio_dev: IIO device 241 * 242 * Request an IIO buffer from the backend. The type of the buffer (typically 243 * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because, 244 * normally, the backend dictates what kind of buffering we can get. 245 * 246 * The backend .free_buffer() hooks is automatically called on @dev detach. 247 * 248 * RETURNS: 249 * 0 on success, negative error number on failure. 250 */ 251 int devm_iio_backend_request_buffer(struct device *dev, 252 struct iio_backend *back, 253 struct iio_dev *indio_dev) 254 { 255 struct iio_backend_buffer_pair *pair; 256 struct iio_buffer *buffer; 257 258 pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL); 259 if (!pair) 260 return -ENOMEM; 261 262 buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev); 263 if (IS_ERR(buffer)) 264 return PTR_ERR(buffer); 265 266 /* weak reference should be all what we need */ 267 pair->back = back; 268 pair->buffer = buffer; 269 270 return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair); 271 } 272 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND); 273 274 static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev) 275 { 276 struct iio_backend *back = ERR_PTR(-ENODEV), *iter; 277 278 /* 279 * We deliberately go through all backends even after finding a match. 280 * The reason is that we want to catch frontend devices which have more 281 * than one backend in which case returning the first we find is bogus. 282 * For those cases, frontends need to explicitly define 283 * get_iio_backend() in struct iio_info. 284 */ 285 guard(mutex)(&iio_back_lock); 286 list_for_each_entry(iter, &iio_back_list, entry) { 287 if (dev == iter->frontend_dev) { 288 if (!IS_ERR(back)) { 289 dev_warn(dev, 290 "Multiple backends! get_iio_backend() needs to be implemented"); 291 return ERR_PTR(-ENODEV); 292 } 293 294 back = iter; 295 } 296 } 297 298 return back; 299 } 300 301 /** 302 * iio_backend_ext_info_get - IIO ext_info read callback 303 * @indio_dev: IIO device 304 * @private: Data private to the driver 305 * @chan: IIO channel 306 * @buf: Buffer where to place the attribute data 307 * 308 * This helper is intended to be used by backends that extend an IIO channel 309 * (through iio_backend_extend_chan_spec()) with extended info. In that case, 310 * backends are not supposed to give their own callbacks (as they would not have 311 * a way to get the backend from indio_dev). This is the getter. 312 * 313 * RETURNS: 314 * Number of bytes written to buf, negative error number on failure. 315 */ 316 ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private, 317 const struct iio_chan_spec *chan, char *buf) 318 { 319 struct iio_backend *back; 320 321 /* 322 * The below should work for the majority of the cases. It will not work 323 * when one frontend has multiple backends in which case we'll need a 324 * new callback in struct iio_info so we can directly request the proper 325 * backend from the frontend. Anyways, let's only introduce new options 326 * when really needed... 327 */ 328 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 329 if (IS_ERR(back)) 330 return PTR_ERR(back); 331 332 return iio_backend_op_call(back, ext_info_get, private, chan, buf); 333 } 334 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, IIO_BACKEND); 335 336 /** 337 * iio_backend_ext_info_set - IIO ext_info write callback 338 * @indio_dev: IIO device 339 * @private: Data private to the driver 340 * @chan: IIO channel 341 * @buf: Buffer holding the sysfs attribute 342 * @len: Buffer length 343 * 344 * This helper is intended to be used by backends that extend an IIO channel 345 * (trough iio_backend_extend_chan_spec()) with extended info. In that case, 346 * backends are not supposed to give their own callbacks (as they would not have 347 * a way to get the backend from indio_dev). This is the setter. 348 * 349 * RETURNS: 350 * Buffer length on success, negative error number on failure. 351 */ 352 ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private, 353 const struct iio_chan_spec *chan, 354 const char *buf, size_t len) 355 { 356 struct iio_backend *back; 357 358 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent); 359 if (IS_ERR(back)) 360 return PTR_ERR(back); 361 362 return iio_backend_op_call(back, ext_info_set, private, chan, buf, len); 363 } 364 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, IIO_BACKEND); 365 366 /** 367 * iio_backend_extend_chan_spec - Extend an IIO channel 368 * @indio_dev: IIO device 369 * @back: Backend device 370 * @chan: IIO channel 371 * 372 * Some backends may have their own functionalities and hence capable of 373 * extending a frontend's channel. 374 * 375 * RETURNS: 376 * 0 on success, negative error number on failure. 377 */ 378 int iio_backend_extend_chan_spec(struct iio_dev *indio_dev, 379 struct iio_backend *back, 380 struct iio_chan_spec *chan) 381 { 382 const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info; 383 const struct iio_chan_spec_ext_info *back_ext_info; 384 int ret; 385 386 ret = iio_backend_op_call(back, extend_chan_spec, chan); 387 if (ret) 388 return ret; 389 /* 390 * Let's keep things simple for now. Don't allow to overwrite the 391 * frontend's extended info. If ever needed, we can support appending 392 * it. 393 */ 394 if (frontend_ext_info && chan->ext_info != frontend_ext_info) 395 return -EOPNOTSUPP; 396 if (!chan->ext_info) 397 return 0; 398 399 /* Don't allow backends to get creative and force their own handlers */ 400 for (back_ext_info = chan->ext_info; back_ext_info->name; back_ext_info++) { 401 if (back_ext_info->read != iio_backend_ext_info_get) 402 return -EINVAL; 403 if (back_ext_info->write != iio_backend_ext_info_set) 404 return -EINVAL; 405 } 406 407 return 0; 408 } 409 EXPORT_SYMBOL_NS_GPL(iio_backend_extend_chan_spec, IIO_BACKEND); 410 411 static void iio_backend_release(void *arg) 412 { 413 struct iio_backend *back = arg; 414 415 module_put(back->owner); 416 } 417 418 static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) 419 { 420 struct device_link *link; 421 int ret; 422 423 /* 424 * Make sure the provider cannot be unloaded before the consumer module. 425 * Note that device_links would still guarantee that nothing is 426 * accessible (and breaks) but this makes it explicit that the consumer 427 * module must be also unloaded. 428 */ 429 if (!try_module_get(back->owner)) 430 return dev_err_probe(dev, -ENODEV, 431 "Cannot get module reference\n"); 432 433 ret = devm_add_action_or_reset(dev, iio_backend_release, back); 434 if (ret) 435 return ret; 436 437 link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 438 if (!link) 439 return dev_err_probe(dev, -EINVAL, 440 "Could not link to supplier(%s)\n", 441 dev_name(back->dev)); 442 443 back->frontend_dev = dev; 444 445 dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev)); 446 447 return 0; 448 } 449 450 /** 451 * devm_iio_backend_get - Device managed backend device get 452 * @dev: Consumer device for the backend 453 * @name: Backend name 454 * 455 * Get's the backend associated with @dev. 456 * 457 * RETURNS: 458 * A backend pointer, negative error pointer otherwise. 459 */ 460 struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) 461 { 462 struct fwnode_handle *fwnode; 463 struct iio_backend *back; 464 unsigned int index; 465 int ret; 466 467 if (name) { 468 ret = device_property_match_string(dev, "io-backend-names", 469 name); 470 if (ret < 0) 471 return ERR_PTR(ret); 472 index = ret; 473 } else { 474 index = 0; 475 } 476 477 fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index); 478 if (IS_ERR(fwnode)) { 479 dev_err_probe(dev, PTR_ERR(fwnode), 480 "Cannot get Firmware reference\n"); 481 return ERR_CAST(fwnode); 482 } 483 484 guard(mutex)(&iio_back_lock); 485 list_for_each_entry(back, &iio_back_list, entry) { 486 if (!device_match_fwnode(back->dev, fwnode)) 487 continue; 488 489 fwnode_handle_put(fwnode); 490 ret = __devm_iio_backend_get(dev, back); 491 if (ret) 492 return ERR_PTR(ret); 493 494 return back; 495 } 496 497 fwnode_handle_put(fwnode); 498 return ERR_PTR(-EPROBE_DEFER); 499 } 500 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND); 501 502 /** 503 * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get 504 * @dev: Consumer device for the backend 505 * @fwnode: Firmware node of the backend device 506 * 507 * Search the backend list for a device matching @fwnode. 508 * This API should not be used and it's only present for preventing the first 509 * user of this framework to break it's DT ABI. 510 * 511 * RETURNS: 512 * A backend pointer, negative error pointer otherwise. 513 */ 514 struct iio_backend * 515 __devm_iio_backend_get_from_fwnode_lookup(struct device *dev, 516 struct fwnode_handle *fwnode) 517 { 518 struct iio_backend *back; 519 int ret; 520 521 guard(mutex)(&iio_back_lock); 522 list_for_each_entry(back, &iio_back_list, entry) { 523 if (!device_match_fwnode(back->dev, fwnode)) 524 continue; 525 526 ret = __devm_iio_backend_get(dev, back); 527 if (ret) 528 return ERR_PTR(ret); 529 530 return back; 531 } 532 533 return ERR_PTR(-EPROBE_DEFER); 534 } 535 EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND); 536 537 /** 538 * iio_backend_get_priv - Get driver private data 539 * @back: Backend device 540 */ 541 void *iio_backend_get_priv(const struct iio_backend *back) 542 { 543 return back->priv; 544 } 545 EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, IIO_BACKEND); 546 547 static void iio_backend_unregister(void *arg) 548 { 549 struct iio_backend *back = arg; 550 551 guard(mutex)(&iio_back_lock); 552 list_del(&back->entry); 553 } 554 555 /** 556 * devm_iio_backend_register - Device managed backend device register 557 * @dev: Backend device being registered 558 * @ops: Backend ops 559 * @priv: Device private data 560 * 561 * @ops is mandatory. Not providing it results in -EINVAL. 562 * 563 * RETURNS: 564 * 0 on success, negative error number on failure. 565 */ 566 int devm_iio_backend_register(struct device *dev, 567 const struct iio_backend_ops *ops, void *priv) 568 { 569 struct iio_backend *back; 570 571 if (!ops) 572 return dev_err_probe(dev, -EINVAL, "No backend ops given\n"); 573 574 /* 575 * Through device_links, we guarantee that a frontend device cannot be 576 * bound/exist if the backend driver is not around. Hence, we can bind 577 * the backend object lifetime with the device being passed since 578 * removing it will tear the frontend/consumer down. 579 */ 580 back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL); 581 if (!back) 582 return -ENOMEM; 583 584 back->ops = ops; 585 back->owner = dev->driver->owner; 586 back->dev = dev; 587 back->priv = priv; 588 scoped_guard(mutex, &iio_back_lock) 589 list_add(&back->entry, &iio_back_list); 590 591 return devm_add_action_or_reset(dev, iio_backend_unregister, back); 592 } 593 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, IIO_BACKEND); 594 595 MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>"); 596 MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices"); 597 MODULE_LICENSE("GPL"); 598