1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Framework to handle complex IIO aggregate devices. 4 * 5 * The typical architecture is to have one device as the frontend device which 6 * can be "linked" against one or multiple backend devices. All the IIO and 7 * userspace interface is expected to be registers/managed by the frontend 8 * device which will callback into the backends when needed (to get/set some 9 * configuration that it does not directly control). 10 * 11 * ------------------------------------------------------- 12 * ------------------ | ------------ ------------ ------- FPGA| 13 * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | | 14 * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | | 15 * | |------------------------| ------------ ------------ ------- | 16 * ------------------ ------------------------------------------------------- 17 * 18 * The framework interface is pretty simple: 19 * - Backends should register themselves with devm_iio_backend_register() 20 * - Frontend devices should get backends with devm_iio_backend_get() 21 * 22 * Also to note that the primary target for this framework are converters like 23 * ADC/DACs so iio_backend_ops will have some operations typical of converter 24 * devices. On top of that, this is "generic" for all IIO which means any kind 25 * of device can make use of the framework. That said, If the iio_backend_ops 26 * struct begins to grow out of control, we can always refactor things so that 27 * the industrialio-backend.c is only left with the really generic stuff. Then, 28 * we can build on top of it depending on the needs. 29 * 30 * Copyright (C) 2023-2024 Analog Devices Inc. 31 */ 32 #define dev_fmt(fmt) "iio-backend: " fmt 33 34 #include <linux/cleanup.h> 35 #include <linux/device.h> 36 #include <linux/err.h> 37 #include <linux/errno.h> 38 #include <linux/list.h> 39 #include <linux/module.h> 40 #include <linux/mutex.h> 41 #include <linux/property.h> 42 #include <linux/slab.h> 43 #include <linux/types.h> 44 45 #include <linux/iio/backend.h> 46 47 struct iio_backend { 48 struct list_head entry; 49 const struct iio_backend_ops *ops; 50 struct device *dev; 51 struct module *owner; 52 void *priv; 53 }; 54 55 /* 56 * Helper struct for requesting buffers. This ensures that we have all data 57 * that we need to free the buffer in a device managed action. 58 */ 59 struct iio_backend_buffer_pair { 60 struct iio_backend *back; 61 struct iio_buffer *buffer; 62 }; 63 64 static LIST_HEAD(iio_back_list); 65 static DEFINE_MUTEX(iio_back_lock); 66 67 /* 68 * Helper macros to call backend ops. Makes sure the option is supported. 69 */ 70 #define iio_backend_check_op(back, op) ({ \ 71 struct iio_backend *____back = back; \ 72 int ____ret = 0; \ 73 \ 74 if (!____back->ops->op) \ 75 ____ret = -EOPNOTSUPP; \ 76 \ 77 ____ret; \ 78 }) 79 80 #define iio_backend_op_call(back, op, args...) ({ \ 81 struct iio_backend *__back = back; \ 82 int __ret; \ 83 \ 84 __ret = iio_backend_check_op(__back, op); \ 85 if (!__ret) \ 86 __ret = __back->ops->op(__back, ##args); \ 87 \ 88 __ret; \ 89 }) 90 91 #define iio_backend_ptr_op_call(back, op, args...) ({ \ 92 struct iio_backend *__back = back; \ 93 void *ptr_err; \ 94 int __ret; \ 95 \ 96 __ret = iio_backend_check_op(__back, op); \ 97 if (__ret) \ 98 ptr_err = ERR_PTR(__ret); \ 99 else \ 100 ptr_err = __back->ops->op(__back, ##args); \ 101 \ 102 ptr_err; \ 103 }) 104 105 #define iio_backend_void_op_call(back, op, args...) { \ 106 struct iio_backend *__back = back; \ 107 int __ret; \ 108 \ 109 __ret = iio_backend_check_op(__back, op); \ 110 if (!__ret) \ 111 __back->ops->op(__back, ##args); \ 112 } 113 114 /** 115 * iio_backend_chan_enable - Enable a backend channel 116 * @back: Backend device 117 * @chan: Channel number 118 * 119 * RETURNS: 120 * 0 on success, negative error number on failure. 121 */ 122 int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan) 123 { 124 return iio_backend_op_call(back, chan_enable, chan); 125 } 126 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, IIO_BACKEND); 127 128 /** 129 * iio_backend_chan_disable - Disable a backend channel 130 * @back: Backend device 131 * @chan: Channel number 132 * 133 * RETURNS: 134 * 0 on success, negative error number on failure. 135 */ 136 int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan) 137 { 138 return iio_backend_op_call(back, chan_disable, chan); 139 } 140 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, IIO_BACKEND); 141 142 static void __iio_backend_disable(void *back) 143 { 144 iio_backend_void_op_call(back, disable); 145 } 146 147 /** 148 * devm_iio_backend_enable - Device managed backend enable 149 * @dev: Consumer device for the backend 150 * @back: Backend device 151 * 152 * RETURNS: 153 * 0 on success, negative error number on failure. 154 */ 155 int devm_iio_backend_enable(struct device *dev, struct iio_backend *back) 156 { 157 int ret; 158 159 ret = iio_backend_op_call(back, enable); 160 if (ret) 161 return ret; 162 163 return devm_add_action_or_reset(dev, __iio_backend_disable, back); 164 } 165 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, IIO_BACKEND); 166 167 /** 168 * iio_backend_data_format_set - Configure the channel data format 169 * @back: Backend device 170 * @chan: Channel number 171 * @data: Data format 172 * 173 * Properly configure a channel with respect to the expected data format. A 174 * @struct iio_backend_data_fmt must be passed with the settings. 175 * 176 * RETURNS: 177 * 0 on success, negative error number on failure. 178 */ 179 int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan, 180 const struct iio_backend_data_fmt *data) 181 { 182 if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX) 183 return -EINVAL; 184 185 return iio_backend_op_call(back, data_format_set, chan, data); 186 } 187 EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, IIO_BACKEND); 188 189 static void iio_backend_free_buffer(void *arg) 190 { 191 struct iio_backend_buffer_pair *pair = arg; 192 193 iio_backend_void_op_call(pair->back, free_buffer, pair->buffer); 194 } 195 196 /** 197 * devm_iio_backend_request_buffer - Device managed buffer request 198 * @dev: Consumer device for the backend 199 * @back: Backend device 200 * @indio_dev: IIO device 201 * 202 * Request an IIO buffer from the backend. The type of the buffer (typically 203 * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because, 204 * normally, the backend dictates what kind of buffering we can get. 205 * 206 * The backend .free_buffer() hooks is automatically called on @dev detach. 207 * 208 * RETURNS: 209 * 0 on success, negative error number on failure. 210 */ 211 int devm_iio_backend_request_buffer(struct device *dev, 212 struct iio_backend *back, 213 struct iio_dev *indio_dev) 214 { 215 struct iio_backend_buffer_pair *pair; 216 struct iio_buffer *buffer; 217 218 pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL); 219 if (!pair) 220 return -ENOMEM; 221 222 buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev); 223 if (IS_ERR(buffer)) 224 return PTR_ERR(buffer); 225 226 /* weak reference should be all what we need */ 227 pair->back = back; 228 pair->buffer = buffer; 229 230 return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair); 231 } 232 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, IIO_BACKEND); 233 234 static void iio_backend_release(void *arg) 235 { 236 struct iio_backend *back = arg; 237 238 module_put(back->owner); 239 } 240 241 static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back) 242 { 243 struct device_link *link; 244 int ret; 245 246 /* 247 * Make sure the provider cannot be unloaded before the consumer module. 248 * Note that device_links would still guarantee that nothing is 249 * accessible (and breaks) but this makes it explicit that the consumer 250 * module must be also unloaded. 251 */ 252 if (!try_module_get(back->owner)) 253 return dev_err_probe(dev, -ENODEV, 254 "Cannot get module reference\n"); 255 256 ret = devm_add_action_or_reset(dev, iio_backend_release, back); 257 if (ret) 258 return ret; 259 260 link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER); 261 if (!link) 262 return dev_err_probe(dev, -EINVAL, 263 "Could not link to supplier(%s)\n", 264 dev_name(back->dev)); 265 266 dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev)); 267 268 return 0; 269 } 270 271 /** 272 * devm_iio_backend_get - Device managed backend device get 273 * @dev: Consumer device for the backend 274 * @name: Backend name 275 * 276 * Get's the backend associated with @dev. 277 * 278 * RETURNS: 279 * A backend pointer, negative error pointer otherwise. 280 */ 281 struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name) 282 { 283 struct fwnode_handle *fwnode; 284 struct iio_backend *back; 285 unsigned int index; 286 int ret; 287 288 if (name) { 289 ret = device_property_match_string(dev, "io-backend-names", 290 name); 291 if (ret < 0) 292 return ERR_PTR(ret); 293 index = ret; 294 } else { 295 index = 0; 296 } 297 298 fwnode = fwnode_find_reference(dev_fwnode(dev), "io-backends", index); 299 if (IS_ERR(fwnode)) { 300 dev_err_probe(dev, PTR_ERR(fwnode), 301 "Cannot get Firmware reference\n"); 302 return ERR_CAST(fwnode); 303 } 304 305 guard(mutex)(&iio_back_lock); 306 list_for_each_entry(back, &iio_back_list, entry) { 307 if (!device_match_fwnode(back->dev, fwnode)) 308 continue; 309 310 fwnode_handle_put(fwnode); 311 ret = __devm_iio_backend_get(dev, back); 312 if (ret) 313 return ERR_PTR(ret); 314 315 return back; 316 } 317 318 fwnode_handle_put(fwnode); 319 return ERR_PTR(-EPROBE_DEFER); 320 } 321 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, IIO_BACKEND); 322 323 /** 324 * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get 325 * @dev: Consumer device for the backend 326 * @fwnode: Firmware node of the backend device 327 * 328 * Search the backend list for a device matching @fwnode. 329 * This API should not be used and it's only present for preventing the first 330 * user of this framework to break it's DT ABI. 331 * 332 * RETURNS: 333 * A backend pointer, negative error pointer otherwise. 334 */ 335 struct iio_backend * 336 __devm_iio_backend_get_from_fwnode_lookup(struct device *dev, 337 struct fwnode_handle *fwnode) 338 { 339 struct iio_backend *back; 340 int ret; 341 342 guard(mutex)(&iio_back_lock); 343 list_for_each_entry(back, &iio_back_list, entry) { 344 if (!device_match_fwnode(back->dev, fwnode)) 345 continue; 346 347 ret = __devm_iio_backend_get(dev, back); 348 if (ret) 349 return ERR_PTR(ret); 350 351 return back; 352 } 353 354 return ERR_PTR(-EPROBE_DEFER); 355 } 356 EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, IIO_BACKEND); 357 358 /** 359 * iio_backend_get_priv - Get driver private data 360 * @back: Backend device 361 */ 362 void *iio_backend_get_priv(const struct iio_backend *back) 363 { 364 return back->priv; 365 } 366 EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, IIO_BACKEND); 367 368 static void iio_backend_unregister(void *arg) 369 { 370 struct iio_backend *back = arg; 371 372 guard(mutex)(&iio_back_lock); 373 list_del(&back->entry); 374 } 375 376 /** 377 * devm_iio_backend_register - Device managed backend device register 378 * @dev: Backend device being registered 379 * @ops: Backend ops 380 * @priv: Device private data 381 * 382 * @ops is mandatory. Not providing it results in -EINVAL. 383 * 384 * RETURNS: 385 * 0 on success, negative error number on failure. 386 */ 387 int devm_iio_backend_register(struct device *dev, 388 const struct iio_backend_ops *ops, void *priv) 389 { 390 struct iio_backend *back; 391 392 if (!ops) 393 return dev_err_probe(dev, -EINVAL, "No backend ops given\n"); 394 395 /* 396 * Through device_links, we guarantee that a frontend device cannot be 397 * bound/exist if the backend driver is not around. Hence, we can bind 398 * the backend object lifetime with the device being passed since 399 * removing it will tear the frontend/consumer down. 400 */ 401 back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL); 402 if (!back) 403 return -ENOMEM; 404 405 back->ops = ops; 406 back->owner = dev->driver->owner; 407 back->dev = dev; 408 back->priv = priv; 409 scoped_guard(mutex, &iio_back_lock) 410 list_add(&back->entry, &iio_back_list); 411 412 return devm_add_action_or_reset(dev, iio_backend_unregister, back); 413 } 414 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, IIO_BACKEND); 415 416 MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>"); 417 MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices"); 418 MODULE_LICENSE("GPL"); 419