1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Framework to handle complex IIO aggregate devices.
4 *
5 * The typical architecture is to have one device as the frontend device which
6 * can be "linked" against one or multiple backend devices. All the IIO and
7 * userspace interface is expected to be registers/managed by the frontend
8 * device which will callback into the backends when needed (to get/set some
9 * configuration that it does not directly control).
10 *
11 * -------------------------------------------------------
12 * ------------------ | ------------ ------------ ------- FPGA|
13 * | ADC |------------------------| | ADC CORE |---------| DMA CORE |------| RAM | |
14 * | (Frontend/IIO) | Serial Data (eg: LVDS) | |(backend) |---------| |------| | |
15 * | |------------------------| ------------ ------------ ------- |
16 * ------------------ -------------------------------------------------------
17 *
18 * The framework interface is pretty simple:
19 * - Backends should register themselves with devm_iio_backend_register()
20 * - Frontend devices should get backends with devm_iio_backend_get()
21 *
22 * Also to note that the primary target for this framework are converters like
23 * ADC/DACs so iio_backend_ops will have some operations typical of converter
24 * devices. On top of that, this is "generic" for all IIO which means any kind
25 * of device can make use of the framework. That said, If the iio_backend_ops
26 * struct begins to grow out of control, we can always refactor things so that
27 * the industrialio-backend.c is only left with the really generic stuff. Then,
28 * we can build on top of it depending on the needs.
29 *
30 * Copyright (C) 2023-2024 Analog Devices Inc.
31 */
32 #define dev_fmt(fmt) "iio-backend: " fmt
33
34 #include <linux/cleanup.h>
35 #include <linux/debugfs.h>
36 #include <linux/device.h>
37 #include <linux/err.h>
38 #include <linux/errno.h>
39 #include <linux/list.h>
40 #include <linux/module.h>
41 #include <linux/mutex.h>
42 #include <linux/property.h>
43 #include <linux/slab.h>
44 #include <linux/stringify.h>
45 #include <linux/types.h>
46
47 #include <linux/iio/backend.h>
48 #include <linux/iio/iio.h>
49
50 struct iio_backend {
51 struct list_head entry;
52 const struct iio_backend_ops *ops;
53 struct device *frontend_dev;
54 struct device *dev;
55 struct module *owner;
56 void *priv;
57 const char *name;
58 unsigned int cached_reg_addr;
59 /*
60 * This index is relative to the frontend. Meaning that for
61 * frontends with multiple backends, this will be the index of this
62 * backend. Used for the debugfs directory name.
63 */
64 u8 idx;
65 };
66
67 /*
68 * Helper struct for requesting buffers. This ensures that we have all data
69 * that we need to free the buffer in a device managed action.
70 */
71 struct iio_backend_buffer_pair {
72 struct iio_backend *back;
73 struct iio_buffer *buffer;
74 };
75
76 static LIST_HEAD(iio_back_list);
77 static DEFINE_MUTEX(iio_back_lock);
78
79 /*
80 * Helper macros to call backend ops. Makes sure the option is supported.
81 */
82 #define iio_backend_check_op(back, op) ({ \
83 struct iio_backend *____back = back; \
84 int ____ret = 0; \
85 \
86 if (!____back->ops->op) \
87 ____ret = -EOPNOTSUPP; \
88 \
89 ____ret; \
90 })
91
92 #define iio_backend_op_call(back, op, args...) ({ \
93 struct iio_backend *__back = back; \
94 int __ret; \
95 \
96 __ret = iio_backend_check_op(__back, op); \
97 if (!__ret) \
98 __ret = __back->ops->op(__back, ##args); \
99 \
100 __ret; \
101 })
102
103 #define iio_backend_ptr_op_call(back, op, args...) ({ \
104 struct iio_backend *__back = back; \
105 void *ptr_err; \
106 int __ret; \
107 \
108 __ret = iio_backend_check_op(__back, op); \
109 if (__ret) \
110 ptr_err = ERR_PTR(__ret); \
111 else \
112 ptr_err = __back->ops->op(__back, ##args); \
113 \
114 ptr_err; \
115 })
116
117 #define iio_backend_void_op_call(back, op, args...) { \
118 struct iio_backend *__back = back; \
119 int __ret; \
120 \
121 __ret = iio_backend_check_op(__back, op); \
122 if (!__ret) \
123 __back->ops->op(__back, ##args); \
124 else \
125 dev_dbg(__back->dev, "Op(%s) not implemented\n",\
126 __stringify(op)); \
127 }
128
iio_backend_debugfs_read_reg(struct file * file,char __user * userbuf,size_t count,loff_t * ppos)129 static ssize_t iio_backend_debugfs_read_reg(struct file *file,
130 char __user *userbuf,
131 size_t count, loff_t *ppos)
132 {
133 struct iio_backend *back = file->private_data;
134 char read_buf[20];
135 unsigned int val;
136 int ret, len;
137
138 ret = iio_backend_op_call(back, debugfs_reg_access,
139 back->cached_reg_addr, 0, &val);
140 if (ret)
141 return ret;
142
143 len = scnprintf(read_buf, sizeof(read_buf), "0x%X\n", val);
144
145 return simple_read_from_buffer(userbuf, count, ppos, read_buf, len);
146 }
147
iio_backend_debugfs_write_reg(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)148 static ssize_t iio_backend_debugfs_write_reg(struct file *file,
149 const char __user *userbuf,
150 size_t count, loff_t *ppos)
151 {
152 struct iio_backend *back = file->private_data;
153 unsigned int val;
154 char buf[80];
155 ssize_t rc;
156 int ret;
157
158 if (count >= sizeof(buf))
159 return -ENOSPC;
160
161 rc = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf, count);
162 if (rc < 0)
163 return rc;
164
165 buf[rc] = '\0';
166
167 ret = sscanf(buf, "%i %i", &back->cached_reg_addr, &val);
168
169 switch (ret) {
170 case 1:
171 return count;
172 case 2:
173 ret = iio_backend_op_call(back, debugfs_reg_access,
174 back->cached_reg_addr, val, NULL);
175 if (ret)
176 return ret;
177 return count;
178 default:
179 return -EINVAL;
180 }
181 }
182
183 static const struct file_operations iio_backend_debugfs_reg_fops = {
184 .open = simple_open,
185 .read = iio_backend_debugfs_read_reg,
186 .write = iio_backend_debugfs_write_reg,
187 };
188
iio_backend_debugfs_read_name(struct file * file,char __user * userbuf,size_t count,loff_t * ppos)189 static ssize_t iio_backend_debugfs_read_name(struct file *file,
190 char __user *userbuf,
191 size_t count, loff_t *ppos)
192 {
193 struct iio_backend *back = file->private_data;
194 char name[128];
195 int len;
196
197 len = scnprintf(name, sizeof(name), "%s\n", back->name);
198
199 return simple_read_from_buffer(userbuf, count, ppos, name, len);
200 }
201
202 static const struct file_operations iio_backend_debugfs_name_fops = {
203 .open = simple_open,
204 .read = iio_backend_debugfs_read_name,
205 };
206
207 /**
208 * iio_backend_debugfs_add - Add debugfs interfaces for Backends
209 * @back: Backend device
210 * @indio_dev: IIO device
211 */
iio_backend_debugfs_add(struct iio_backend * back,struct iio_dev * indio_dev)212 void iio_backend_debugfs_add(struct iio_backend *back,
213 struct iio_dev *indio_dev)
214 {
215 struct dentry *d = iio_get_debugfs_dentry(indio_dev);
216 struct dentry *back_d;
217 char name[128];
218
219 if (!IS_ENABLED(CONFIG_DEBUG_FS) || !d)
220 return;
221 if (!back->ops->debugfs_reg_access && !back->name)
222 return;
223
224 snprintf(name, sizeof(name), "backend%d", back->idx);
225
226 back_d = debugfs_create_dir(name, d);
227 if (IS_ERR(back_d))
228 return;
229
230 if (back->ops->debugfs_reg_access)
231 debugfs_create_file("direct_reg_access", 0600, back_d, back,
232 &iio_backend_debugfs_reg_fops);
233
234 if (back->name)
235 debugfs_create_file("name", 0400, back_d, back,
236 &iio_backend_debugfs_name_fops);
237 }
238 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_add, "IIO_BACKEND");
239
240 /**
241 * iio_backend_debugfs_print_chan_status - Print channel status
242 * @back: Backend device
243 * @chan: Channel number
244 * @buf: Buffer where to print the status
245 * @len: Available space
246 *
247 * One usecase where this is useful is for testing test tones in a digital
248 * interface and "ask" the backend to dump more details on why a test tone might
249 * have errors.
250 *
251 * RETURNS:
252 * Number of copied bytes on success, negative error code on failure.
253 */
iio_backend_debugfs_print_chan_status(struct iio_backend * back,unsigned int chan,char * buf,size_t len)254 ssize_t iio_backend_debugfs_print_chan_status(struct iio_backend *back,
255 unsigned int chan, char *buf,
256 size_t len)
257 {
258 if (!IS_ENABLED(CONFIG_DEBUG_FS))
259 return -ENODEV;
260
261 return iio_backend_op_call(back, debugfs_print_chan_status, chan, buf,
262 len);
263 }
264 EXPORT_SYMBOL_NS_GPL(iio_backend_debugfs_print_chan_status, "IIO_BACKEND");
265
266 /**
267 * iio_backend_chan_enable - Enable a backend channel
268 * @back: Backend device
269 * @chan: Channel number
270 *
271 * RETURNS:
272 * 0 on success, negative error number on failure.
273 */
iio_backend_chan_enable(struct iio_backend * back,unsigned int chan)274 int iio_backend_chan_enable(struct iio_backend *back, unsigned int chan)
275 {
276 return iio_backend_op_call(back, chan_enable, chan);
277 }
278 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_enable, "IIO_BACKEND");
279
280 /**
281 * iio_backend_chan_disable - Disable a backend channel
282 * @back: Backend device
283 * @chan: Channel number
284 *
285 * RETURNS:
286 * 0 on success, negative error number on failure.
287 */
iio_backend_chan_disable(struct iio_backend * back,unsigned int chan)288 int iio_backend_chan_disable(struct iio_backend *back, unsigned int chan)
289 {
290 return iio_backend_op_call(back, chan_disable, chan);
291 }
292 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_disable, "IIO_BACKEND");
293
__iio_backend_disable(void * back)294 static void __iio_backend_disable(void *back)
295 {
296 iio_backend_void_op_call(back, disable);
297 }
298
299 /**
300 * iio_backend_disable - Backend disable
301 * @back: Backend device
302 */
iio_backend_disable(struct iio_backend * back)303 void iio_backend_disable(struct iio_backend *back)
304 {
305 __iio_backend_disable(back);
306 }
307 EXPORT_SYMBOL_NS_GPL(iio_backend_disable, "IIO_BACKEND");
308
309 /**
310 * iio_backend_enable - Backend enable
311 * @back: Backend device
312 *
313 * RETURNS:
314 * 0 on success, negative error number on failure.
315 */
iio_backend_enable(struct iio_backend * back)316 int iio_backend_enable(struct iio_backend *back)
317 {
318 return iio_backend_op_call(back, enable);
319 }
320 EXPORT_SYMBOL_NS_GPL(iio_backend_enable, "IIO_BACKEND");
321
322 /**
323 * devm_iio_backend_enable - Device managed backend enable
324 * @dev: Consumer device for the backend
325 * @back: Backend device
326 *
327 * RETURNS:
328 * 0 on success, negative error number on failure.
329 */
devm_iio_backend_enable(struct device * dev,struct iio_backend * back)330 int devm_iio_backend_enable(struct device *dev, struct iio_backend *back)
331 {
332 int ret;
333
334 ret = iio_backend_enable(back);
335 if (ret)
336 return ret;
337
338 return devm_add_action_or_reset(dev, __iio_backend_disable, back);
339 }
340 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_enable, "IIO_BACKEND");
341
342 /**
343 * iio_backend_data_format_set - Configure the channel data format
344 * @back: Backend device
345 * @chan: Channel number
346 * @data: Data format
347 *
348 * Properly configure a channel with respect to the expected data format. A
349 * @struct iio_backend_data_fmt must be passed with the settings.
350 *
351 * RETURNS:
352 * 0 on success, negative error number on failure.
353 */
iio_backend_data_format_set(struct iio_backend * back,unsigned int chan,const struct iio_backend_data_fmt * data)354 int iio_backend_data_format_set(struct iio_backend *back, unsigned int chan,
355 const struct iio_backend_data_fmt *data)
356 {
357 if (!data || data->type >= IIO_BACKEND_DATA_TYPE_MAX)
358 return -EINVAL;
359
360 return iio_backend_op_call(back, data_format_set, chan, data);
361 }
362 EXPORT_SYMBOL_NS_GPL(iio_backend_data_format_set, "IIO_BACKEND");
363
364 /**
365 * iio_backend_data_source_set - Select data source
366 * @back: Backend device
367 * @chan: Channel number
368 * @data: Data source
369 *
370 * A given backend may have different sources to stream/sync data. This allows
371 * to choose that source.
372 *
373 * RETURNS:
374 * 0 on success, negative error number on failure.
375 */
iio_backend_data_source_set(struct iio_backend * back,unsigned int chan,enum iio_backend_data_source data)376 int iio_backend_data_source_set(struct iio_backend *back, unsigned int chan,
377 enum iio_backend_data_source data)
378 {
379 if (data >= IIO_BACKEND_DATA_SOURCE_MAX)
380 return -EINVAL;
381
382 return iio_backend_op_call(back, data_source_set, chan, data);
383 }
384 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_set, "IIO_BACKEND");
385
386 /**
387 * iio_backend_data_source_get - Get current data source
388 * @back: Backend device
389 * @chan: Channel number
390 * @data: Pointer to receive the current source value
391 *
392 * A given backend may have different sources to stream/sync data. This allows
393 * to know what source is in use.
394 *
395 * RETURNS:
396 * 0 on success, negative error number on failure.
397 */
iio_backend_data_source_get(struct iio_backend * back,unsigned int chan,enum iio_backend_data_source * data)398 int iio_backend_data_source_get(struct iio_backend *back, unsigned int chan,
399 enum iio_backend_data_source *data)
400 {
401 int ret;
402
403 ret = iio_backend_op_call(back, data_source_get, chan, data);
404 if (ret)
405 return ret;
406
407 if (*data >= IIO_BACKEND_DATA_SOURCE_MAX)
408 return -EINVAL;
409
410 return 0;
411 }
412 EXPORT_SYMBOL_NS_GPL(iio_backend_data_source_get, "IIO_BACKEND");
413
414 /**
415 * iio_backend_set_sampling_freq - Set channel sampling rate
416 * @back: Backend device
417 * @chan: Channel number
418 * @sample_rate_hz: Sample rate
419 *
420 * RETURNS:
421 * 0 on success, negative error number on failure.
422 */
iio_backend_set_sampling_freq(struct iio_backend * back,unsigned int chan,u64 sample_rate_hz)423 int iio_backend_set_sampling_freq(struct iio_backend *back, unsigned int chan,
424 u64 sample_rate_hz)
425 {
426 return iio_backend_op_call(back, set_sample_rate, chan, sample_rate_hz);
427 }
428 EXPORT_SYMBOL_NS_GPL(iio_backend_set_sampling_freq, "IIO_BACKEND");
429
430 /**
431 * iio_backend_test_pattern_set - Configure a test pattern
432 * @back: Backend device
433 * @chan: Channel number
434 * @pattern: Test pattern
435 *
436 * Configure a test pattern on the backend. This is typically used for
437 * calibrating the timings on the data digital interface.
438 *
439 * RETURNS:
440 * 0 on success, negative error number on failure.
441 */
iio_backend_test_pattern_set(struct iio_backend * back,unsigned int chan,enum iio_backend_test_pattern pattern)442 int iio_backend_test_pattern_set(struct iio_backend *back,
443 unsigned int chan,
444 enum iio_backend_test_pattern pattern)
445 {
446 if (pattern >= IIO_BACKEND_TEST_PATTERN_MAX)
447 return -EINVAL;
448
449 return iio_backend_op_call(back, test_pattern_set, chan, pattern);
450 }
451 EXPORT_SYMBOL_NS_GPL(iio_backend_test_pattern_set, "IIO_BACKEND");
452
453 /**
454 * iio_backend_chan_status - Get the channel status
455 * @back: Backend device
456 * @chan: Channel number
457 * @error: Error indication
458 *
459 * Get the current state of the backend channel. Typically used to check if
460 * there were any errors sending/receiving data.
461 *
462 * RETURNS:
463 * 0 on success, negative error number on failure.
464 */
iio_backend_chan_status(struct iio_backend * back,unsigned int chan,bool * error)465 int iio_backend_chan_status(struct iio_backend *back, unsigned int chan,
466 bool *error)
467 {
468 return iio_backend_op_call(back, chan_status, chan, error);
469 }
470 EXPORT_SYMBOL_NS_GPL(iio_backend_chan_status, "IIO_BACKEND");
471
472 /**
473 * iio_backend_iodelay_set - Set digital I/O delay
474 * @back: Backend device
475 * @lane: Lane number
476 * @taps: Number of taps
477 *
478 * Controls delays on sending/receiving data. One usecase for this is to
479 * calibrate the data digital interface so we get the best results when
480 * transferring data. Note that @taps has no unit since the actual delay per tap
481 * is very backend specific. Hence, frontend devices typically should go through
482 * an array of @taps (the size of that array should typically match the size of
483 * calibration points on the frontend device) and call this API.
484 *
485 * RETURNS:
486 * 0 on success, negative error number on failure.
487 */
iio_backend_iodelay_set(struct iio_backend * back,unsigned int lane,unsigned int taps)488 int iio_backend_iodelay_set(struct iio_backend *back, unsigned int lane,
489 unsigned int taps)
490 {
491 return iio_backend_op_call(back, iodelay_set, lane, taps);
492 }
493 EXPORT_SYMBOL_NS_GPL(iio_backend_iodelay_set, "IIO_BACKEND");
494
495 /**
496 * iio_backend_data_sample_trigger - Control when to sample data
497 * @back: Backend device
498 * @trigger: Data trigger
499 *
500 * Mostly useful for input backends. Configures the backend for when to sample
501 * data (eg: rising vs falling edge).
502 *
503 * RETURNS:
504 * 0 on success, negative error number on failure.
505 */
iio_backend_data_sample_trigger(struct iio_backend * back,enum iio_backend_sample_trigger trigger)506 int iio_backend_data_sample_trigger(struct iio_backend *back,
507 enum iio_backend_sample_trigger trigger)
508 {
509 if (trigger >= IIO_BACKEND_SAMPLE_TRIGGER_MAX)
510 return -EINVAL;
511
512 return iio_backend_op_call(back, data_sample_trigger, trigger);
513 }
514 EXPORT_SYMBOL_NS_GPL(iio_backend_data_sample_trigger, "IIO_BACKEND");
515
iio_backend_free_buffer(void * arg)516 static void iio_backend_free_buffer(void *arg)
517 {
518 struct iio_backend_buffer_pair *pair = arg;
519
520 iio_backend_void_op_call(pair->back, free_buffer, pair->buffer);
521 }
522
523 /**
524 * devm_iio_backend_request_buffer - Device managed buffer request
525 * @dev: Consumer device for the backend
526 * @back: Backend device
527 * @indio_dev: IIO device
528 *
529 * Request an IIO buffer from the backend. The type of the buffer (typically
530 * INDIO_BUFFER_HARDWARE) is up to the backend to decide. This is because,
531 * normally, the backend dictates what kind of buffering we can get.
532 *
533 * The backend .free_buffer() hooks is automatically called on @dev detach.
534 *
535 * RETURNS:
536 * 0 on success, negative error number on failure.
537 */
devm_iio_backend_request_buffer(struct device * dev,struct iio_backend * back,struct iio_dev * indio_dev)538 int devm_iio_backend_request_buffer(struct device *dev,
539 struct iio_backend *back,
540 struct iio_dev *indio_dev)
541 {
542 struct iio_backend_buffer_pair *pair;
543 struct iio_buffer *buffer;
544
545 pair = devm_kzalloc(dev, sizeof(*pair), GFP_KERNEL);
546 if (!pair)
547 return -ENOMEM;
548
549 buffer = iio_backend_ptr_op_call(back, request_buffer, indio_dev);
550 if (IS_ERR(buffer))
551 return PTR_ERR(buffer);
552
553 /* weak reference should be all what we need */
554 pair->back = back;
555 pair->buffer = buffer;
556
557 return devm_add_action_or_reset(dev, iio_backend_free_buffer, pair);
558 }
559 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_request_buffer, "IIO_BACKEND");
560
561 /**
562 * iio_backend_read_raw - Read a channel attribute from a backend device.
563 * @back: Backend device
564 * @chan: IIO channel reference
565 * @val: First returned value
566 * @val2: Second returned value
567 * @mask: Specify the attribute to return
568 *
569 * RETURNS:
570 * 0 on success, negative error number on failure.
571 */
iio_backend_read_raw(struct iio_backend * back,struct iio_chan_spec const * chan,int * val,int * val2,long mask)572 int iio_backend_read_raw(struct iio_backend *back,
573 struct iio_chan_spec const *chan, int *val, int *val2,
574 long mask)
575 {
576 return iio_backend_op_call(back, read_raw, chan, val, val2, mask);
577 }
578 EXPORT_SYMBOL_NS_GPL(iio_backend_read_raw, "IIO_BACKEND");
579
iio_backend_from_indio_dev_parent(const struct device * dev)580 static struct iio_backend *iio_backend_from_indio_dev_parent(const struct device *dev)
581 {
582 struct iio_backend *back = ERR_PTR(-ENODEV), *iter;
583
584 /*
585 * We deliberately go through all backends even after finding a match.
586 * The reason is that we want to catch frontend devices which have more
587 * than one backend in which case returning the first we find is bogus.
588 * For those cases, frontends need to explicitly define
589 * get_iio_backend() in struct iio_info.
590 */
591 guard(mutex)(&iio_back_lock);
592 list_for_each_entry(iter, &iio_back_list, entry) {
593 if (dev == iter->frontend_dev) {
594 if (!IS_ERR(back)) {
595 dev_warn(dev,
596 "Multiple backends! get_iio_backend() needs to be implemented");
597 return ERR_PTR(-ENODEV);
598 }
599
600 back = iter;
601 }
602 }
603
604 return back;
605 }
606
607 /**
608 * iio_backend_ext_info_get - IIO ext_info read callback
609 * @indio_dev: IIO device
610 * @private: Data private to the driver
611 * @chan: IIO channel
612 * @buf: Buffer where to place the attribute data
613 *
614 * This helper is intended to be used by backends that extend an IIO channel
615 * (through iio_backend_extend_chan_spec()) with extended info. In that case,
616 * backends are not supposed to give their own callbacks (as they would not have
617 * a way to get the backend from indio_dev). This is the getter.
618 *
619 * RETURNS:
620 * Number of bytes written to buf, negative error number on failure.
621 */
iio_backend_ext_info_get(struct iio_dev * indio_dev,uintptr_t private,const struct iio_chan_spec * chan,char * buf)622 ssize_t iio_backend_ext_info_get(struct iio_dev *indio_dev, uintptr_t private,
623 const struct iio_chan_spec *chan, char *buf)
624 {
625 struct iio_backend *back;
626
627 /*
628 * The below should work for the majority of the cases. It will not work
629 * when one frontend has multiple backends in which case we'll need a
630 * new callback in struct iio_info so we can directly request the proper
631 * backend from the frontend. Anyways, let's only introduce new options
632 * when really needed...
633 */
634 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent);
635 if (IS_ERR(back))
636 return PTR_ERR(back);
637
638 return iio_backend_op_call(back, ext_info_get, private, chan, buf);
639 }
640 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_get, "IIO_BACKEND");
641
642 /**
643 * iio_backend_ext_info_set - IIO ext_info write callback
644 * @indio_dev: IIO device
645 * @private: Data private to the driver
646 * @chan: IIO channel
647 * @buf: Buffer holding the sysfs attribute
648 * @len: Buffer length
649 *
650 * This helper is intended to be used by backends that extend an IIO channel
651 * (trough iio_backend_extend_chan_spec()) with extended info. In that case,
652 * backends are not supposed to give their own callbacks (as they would not have
653 * a way to get the backend from indio_dev). This is the setter.
654 *
655 * RETURNS:
656 * Buffer length on success, negative error number on failure.
657 */
iio_backend_ext_info_set(struct iio_dev * indio_dev,uintptr_t private,const struct iio_chan_spec * chan,const char * buf,size_t len)658 ssize_t iio_backend_ext_info_set(struct iio_dev *indio_dev, uintptr_t private,
659 const struct iio_chan_spec *chan,
660 const char *buf, size_t len)
661 {
662 struct iio_backend *back;
663
664 back = iio_backend_from_indio_dev_parent(indio_dev->dev.parent);
665 if (IS_ERR(back))
666 return PTR_ERR(back);
667
668 return iio_backend_op_call(back, ext_info_set, private, chan, buf, len);
669 }
670 EXPORT_SYMBOL_NS_GPL(iio_backend_ext_info_set, "IIO_BACKEND");
671
672 /**
673 * iio_backend_interface_type_get - get the interface type used.
674 * @back: Backend device
675 * @type: Interface type
676 *
677 * RETURNS:
678 * 0 on success, negative error number on failure.
679 */
iio_backend_interface_type_get(struct iio_backend * back,enum iio_backend_interface_type * type)680 int iio_backend_interface_type_get(struct iio_backend *back,
681 enum iio_backend_interface_type *type)
682 {
683 int ret;
684
685 ret = iio_backend_op_call(back, interface_type_get, type);
686 if (ret)
687 return ret;
688
689 if (*type >= IIO_BACKEND_INTERFACE_MAX)
690 return -EINVAL;
691
692 return 0;
693 }
694 EXPORT_SYMBOL_NS_GPL(iio_backend_interface_type_get, "IIO_BACKEND");
695
696 /**
697 * iio_backend_data_size_set - set the data width/size in the data bus.
698 * @back: Backend device
699 * @size: Size in bits
700 *
701 * Some frontend devices can dynamically control the word/data size on the
702 * interface/data bus. Hence, the backend device needs to be aware of it so
703 * data can be correctly transferred.
704 *
705 * Return:
706 * 0 on success, negative error number on failure.
707 */
iio_backend_data_size_set(struct iio_backend * back,unsigned int size)708 int iio_backend_data_size_set(struct iio_backend *back, unsigned int size)
709 {
710 if (!size)
711 return -EINVAL;
712
713 return iio_backend_op_call(back, data_size_set, size);
714 }
715 EXPORT_SYMBOL_NS_GPL(iio_backend_data_size_set, "IIO_BACKEND");
716
717 /**
718 * iio_backend_oversampling_ratio_set - set the oversampling ratio
719 * @back: Backend device
720 * @ratio: The oversampling ratio - value 1 corresponds to no oversampling.
721 *
722 * Return:
723 * 0 on success, negative error number on failure.
724 */
iio_backend_oversampling_ratio_set(struct iio_backend * back,unsigned int chan,unsigned int ratio)725 int iio_backend_oversampling_ratio_set(struct iio_backend *back,
726 unsigned int chan,
727 unsigned int ratio)
728 {
729 return iio_backend_op_call(back, oversampling_ratio_set, chan, ratio);
730 }
731 EXPORT_SYMBOL_NS_GPL(iio_backend_oversampling_ratio_set, "IIO_BACKEND");
732
733 /**
734 * iio_backend_extend_chan_spec - Extend an IIO channel
735 * @back: Backend device
736 * @chan: IIO channel
737 *
738 * Some backends may have their own functionalities and hence capable of
739 * extending a frontend's channel.
740 *
741 * RETURNS:
742 * 0 on success, negative error number on failure.
743 */
iio_backend_extend_chan_spec(struct iio_backend * back,struct iio_chan_spec * chan)744 int iio_backend_extend_chan_spec(struct iio_backend *back,
745 struct iio_chan_spec *chan)
746 {
747 const struct iio_chan_spec_ext_info *frontend_ext_info = chan->ext_info;
748 const struct iio_chan_spec_ext_info *back_ext_info;
749 int ret;
750
751 ret = iio_backend_op_call(back, extend_chan_spec, chan);
752 if (ret)
753 return ret;
754 /*
755 * Let's keep things simple for now. Don't allow to overwrite the
756 * frontend's extended info. If ever needed, we can support appending
757 * it.
758 */
759 if (frontend_ext_info && chan->ext_info != frontend_ext_info)
760 return -EOPNOTSUPP;
761 if (!chan->ext_info)
762 return 0;
763
764 /* Don't allow backends to get creative and force their own handlers */
765 for (back_ext_info = chan->ext_info; back_ext_info->name; back_ext_info++) {
766 if (back_ext_info->read != iio_backend_ext_info_get)
767 return -EINVAL;
768 if (back_ext_info->write != iio_backend_ext_info_set)
769 return -EINVAL;
770 }
771
772 return 0;
773 }
774 EXPORT_SYMBOL_NS_GPL(iio_backend_extend_chan_spec, "IIO_BACKEND");
775
iio_backend_release(void * arg)776 static void iio_backend_release(void *arg)
777 {
778 struct iio_backend *back = arg;
779
780 module_put(back->owner);
781 }
782
__devm_iio_backend_get(struct device * dev,struct iio_backend * back)783 static int __devm_iio_backend_get(struct device *dev, struct iio_backend *back)
784 {
785 struct device_link *link;
786 int ret;
787
788 /*
789 * Make sure the provider cannot be unloaded before the consumer module.
790 * Note that device_links would still guarantee that nothing is
791 * accessible (and breaks) but this makes it explicit that the consumer
792 * module must be also unloaded.
793 */
794 if (!try_module_get(back->owner))
795 return dev_err_probe(dev, -ENODEV,
796 "Cannot get module reference\n");
797
798 ret = devm_add_action_or_reset(dev, iio_backend_release, back);
799 if (ret)
800 return ret;
801
802 link = device_link_add(dev, back->dev, DL_FLAG_AUTOREMOVE_CONSUMER);
803 if (!link)
804 return dev_err_probe(dev, -EINVAL,
805 "Could not link to supplier(%s)\n",
806 dev_name(back->dev));
807
808 back->frontend_dev = dev;
809
810 dev_dbg(dev, "Found backend(%s) device\n", dev_name(back->dev));
811
812 return 0;
813 }
814
815 /**
816 * iio_backend_filter_type_set - Set filter type
817 * @back: Backend device
818 * @type: Filter type.
819 *
820 * RETURNS:
821 * 0 on success, negative error number on failure.
822 */
iio_backend_filter_type_set(struct iio_backend * back,enum iio_backend_filter_type type)823 int iio_backend_filter_type_set(struct iio_backend *back,
824 enum iio_backend_filter_type type)
825 {
826 if (type >= IIO_BACKEND_FILTER_TYPE_MAX)
827 return -EINVAL;
828
829 return iio_backend_op_call(back, filter_type_set, type);
830 }
831 EXPORT_SYMBOL_NS_GPL(iio_backend_filter_type_set, "IIO_BACKEND");
832
833 /**
834 * iio_backend_interface_data_align - Perform the data alignment process.
835 * @back: Backend device
836 * @timeout_us: Timeout value in us.
837 *
838 * When activated, it initates a proccess that aligns the sample's most
839 * significant bit (MSB) based solely on the captured data, without
840 * considering any other external signals.
841 *
842 * The timeout_us value must be greater than 0.
843 *
844 * RETURNS:
845 * 0 on success, negative error number on failure.
846 */
iio_backend_interface_data_align(struct iio_backend * back,u32 timeout_us)847 int iio_backend_interface_data_align(struct iio_backend *back, u32 timeout_us)
848 {
849 if (!timeout_us)
850 return -EINVAL;
851
852 return iio_backend_op_call(back, interface_data_align, timeout_us);
853 }
854 EXPORT_SYMBOL_NS_GPL(iio_backend_interface_data_align, "IIO_BACKEND");
855
856 /**
857 * iio_backend_num_lanes_set - Number of lanes enabled.
858 * @back: Backend device
859 * @num_lanes: Number of lanes.
860 *
861 * RETURNS:
862 * 0 on success, negative error number on failure.
863 */
iio_backend_num_lanes_set(struct iio_backend * back,unsigned int num_lanes)864 int iio_backend_num_lanes_set(struct iio_backend *back, unsigned int num_lanes)
865 {
866 if (!num_lanes)
867 return -EINVAL;
868
869 return iio_backend_op_call(back, num_lanes_set, num_lanes);
870 }
871 EXPORT_SYMBOL_NS_GPL(iio_backend_num_lanes_set, "IIO_BACKEND");
872
873 /**
874 * iio_backend_ddr_enable - Enable interface DDR (Double Data Rate) mode
875 * @back: Backend device
876 *
877 * Enable DDR, data is generated by the IP at each front (raising and falling)
878 * of the bus clock signal.
879 *
880 * RETURNS:
881 * 0 on success, negative error number on failure.
882 */
iio_backend_ddr_enable(struct iio_backend * back)883 int iio_backend_ddr_enable(struct iio_backend *back)
884 {
885 return iio_backend_op_call(back, ddr_enable);
886 }
887 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_enable, "IIO_BACKEND");
888
889 /**
890 * iio_backend_ddr_disable - Disable interface DDR (Double Data Rate) mode
891 * @back: Backend device
892 *
893 * Disable DDR, setting into SDR mode (Single Data Rate).
894 *
895 * RETURNS:
896 * 0 on success, negative error number on failure.
897 */
iio_backend_ddr_disable(struct iio_backend * back)898 int iio_backend_ddr_disable(struct iio_backend *back)
899 {
900 return iio_backend_op_call(back, ddr_disable);
901 }
902 EXPORT_SYMBOL_NS_GPL(iio_backend_ddr_disable, "IIO_BACKEND");
903
904 /**
905 * iio_backend_data_stream_enable - Enable data stream
906 * @back: Backend device
907 *
908 * Enable data stream over the bus interface.
909 *
910 * RETURNS:
911 * 0 on success, negative error number on failure.
912 */
iio_backend_data_stream_enable(struct iio_backend * back)913 int iio_backend_data_stream_enable(struct iio_backend *back)
914 {
915 return iio_backend_op_call(back, data_stream_enable);
916 }
917 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_enable, "IIO_BACKEND");
918
919 /**
920 * iio_backend_data_stream_disable - Disable data stream
921 * @back: Backend device
922 *
923 * Disable data stream over the bus interface.
924 *
925 * RETURNS:
926 * 0 on success, negative error number on failure.
927 */
iio_backend_data_stream_disable(struct iio_backend * back)928 int iio_backend_data_stream_disable(struct iio_backend *back)
929 {
930 return iio_backend_op_call(back, data_stream_disable);
931 }
932 EXPORT_SYMBOL_NS_GPL(iio_backend_data_stream_disable, "IIO_BACKEND");
933
934 /**
935 * iio_backend_data_transfer_addr - Set data address.
936 * @back: Backend device
937 * @address: Data register address
938 *
939 * Some devices may need to inform the backend about an address
940 * where to read or write the data.
941 *
942 * RETURNS:
943 * 0 on success, negative error number on failure.
944 */
iio_backend_data_transfer_addr(struct iio_backend * back,u32 address)945 int iio_backend_data_transfer_addr(struct iio_backend *back, u32 address)
946 {
947 return iio_backend_op_call(back, data_transfer_addr, address);
948 }
949 EXPORT_SYMBOL_NS_GPL(iio_backend_data_transfer_addr, "IIO_BACKEND");
950
__devm_iio_backend_fwnode_get(struct device * dev,const char * name,struct fwnode_handle * fwnode)951 static struct iio_backend *__devm_iio_backend_fwnode_get(struct device *dev, const char *name,
952 struct fwnode_handle *fwnode)
953 {
954 struct fwnode_handle *fwnode_back;
955 struct iio_backend *back;
956 unsigned int index;
957 int ret;
958
959 if (name) {
960 ret = device_property_match_string(dev, "io-backend-names",
961 name);
962 if (ret < 0)
963 return ERR_PTR(ret);
964 index = ret;
965 } else {
966 index = 0;
967 }
968
969 fwnode_back = fwnode_find_reference(fwnode, "io-backends", index);
970 if (IS_ERR(fwnode_back))
971 return dev_err_cast_probe(dev, fwnode_back,
972 "Cannot get Firmware reference\n");
973
974 guard(mutex)(&iio_back_lock);
975 list_for_each_entry(back, &iio_back_list, entry) {
976 if (!device_match_fwnode(back->dev, fwnode_back))
977 continue;
978
979 fwnode_handle_put(fwnode_back);
980 ret = __devm_iio_backend_get(dev, back);
981 if (ret)
982 return ERR_PTR(ret);
983
984 if (name)
985 back->idx = index;
986
987 return back;
988 }
989
990 fwnode_handle_put(fwnode_back);
991 return ERR_PTR(-EPROBE_DEFER);
992 }
993
994 /**
995 * devm_iio_backend_get - Device managed backend device get
996 * @dev: Consumer device for the backend
997 * @name: Backend name
998 *
999 * Get's the backend associated with @dev.
1000 *
1001 * RETURNS:
1002 * A backend pointer, negative error pointer otherwise.
1003 */
devm_iio_backend_get(struct device * dev,const char * name)1004 struct iio_backend *devm_iio_backend_get(struct device *dev, const char *name)
1005 {
1006 return __devm_iio_backend_fwnode_get(dev, name, dev_fwnode(dev));
1007 }
1008 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_get, "IIO_BACKEND");
1009
1010 /**
1011 * devm_iio_backend_fwnode_get - Device managed backend firmware node get
1012 * @dev: Consumer device for the backend
1013 * @name: Backend name
1014 * @fwnode: Firmware node of the backend consumer
1015 *
1016 * Get's the backend associated with a firmware node.
1017 *
1018 * RETURNS:
1019 * A backend pointer, negative error pointer otherwise.
1020 */
devm_iio_backend_fwnode_get(struct device * dev,const char * name,struct fwnode_handle * fwnode)1021 struct iio_backend *devm_iio_backend_fwnode_get(struct device *dev,
1022 const char *name,
1023 struct fwnode_handle *fwnode)
1024 {
1025 return __devm_iio_backend_fwnode_get(dev, name, fwnode);
1026 }
1027 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_fwnode_get, "IIO_BACKEND");
1028
1029 /**
1030 * __devm_iio_backend_get_from_fwnode_lookup - Device managed fwnode backend device get
1031 * @dev: Consumer device for the backend
1032 * @fwnode: Firmware node of the backend device
1033 *
1034 * Search the backend list for a device matching @fwnode.
1035 * This API should not be used and it's only present for preventing the first
1036 * user of this framework to break it's DT ABI.
1037 *
1038 * RETURNS:
1039 * A backend pointer, negative error pointer otherwise.
1040 */
1041 struct iio_backend *
__devm_iio_backend_get_from_fwnode_lookup(struct device * dev,struct fwnode_handle * fwnode)1042 __devm_iio_backend_get_from_fwnode_lookup(struct device *dev,
1043 struct fwnode_handle *fwnode)
1044 {
1045 struct iio_backend *back;
1046 int ret;
1047
1048 guard(mutex)(&iio_back_lock);
1049 list_for_each_entry(back, &iio_back_list, entry) {
1050 if (!device_match_fwnode(back->dev, fwnode))
1051 continue;
1052
1053 ret = __devm_iio_backend_get(dev, back);
1054 if (ret)
1055 return ERR_PTR(ret);
1056
1057 return back;
1058 }
1059
1060 return ERR_PTR(-EPROBE_DEFER);
1061 }
1062 EXPORT_SYMBOL_NS_GPL(__devm_iio_backend_get_from_fwnode_lookup, "IIO_BACKEND");
1063
1064 /**
1065 * iio_backend_get_priv - Get driver private data
1066 * @back: Backend device
1067 */
iio_backend_get_priv(const struct iio_backend * back)1068 void *iio_backend_get_priv(const struct iio_backend *back)
1069 {
1070 return back->priv;
1071 }
1072 EXPORT_SYMBOL_NS_GPL(iio_backend_get_priv, "IIO_BACKEND");
1073
iio_backend_unregister(void * arg)1074 static void iio_backend_unregister(void *arg)
1075 {
1076 struct iio_backend *back = arg;
1077
1078 guard(mutex)(&iio_back_lock);
1079 list_del(&back->entry);
1080 }
1081
1082 /**
1083 * devm_iio_backend_register - Device managed backend device register
1084 * @dev: Backend device being registered
1085 * @info: Backend info
1086 * @priv: Device private data
1087 *
1088 * @info is mandatory. Not providing it results in -EINVAL.
1089 *
1090 * RETURNS:
1091 * 0 on success, negative error number on failure.
1092 */
devm_iio_backend_register(struct device * dev,const struct iio_backend_info * info,void * priv)1093 int devm_iio_backend_register(struct device *dev,
1094 const struct iio_backend_info *info, void *priv)
1095 {
1096 struct iio_backend *back;
1097
1098 if (!info || !info->ops)
1099 return dev_err_probe(dev, -EINVAL, "No backend ops given\n");
1100
1101 /*
1102 * Through device_links, we guarantee that a frontend device cannot be
1103 * bound/exist if the backend driver is not around. Hence, we can bind
1104 * the backend object lifetime with the device being passed since
1105 * removing it will tear the frontend/consumer down.
1106 */
1107 back = devm_kzalloc(dev, sizeof(*back), GFP_KERNEL);
1108 if (!back)
1109 return -ENOMEM;
1110
1111 back->ops = info->ops;
1112 back->name = info->name;
1113 back->owner = dev->driver->owner;
1114 back->dev = dev;
1115 back->priv = priv;
1116 scoped_guard(mutex, &iio_back_lock)
1117 list_add(&back->entry, &iio_back_list);
1118
1119 return devm_add_action_or_reset(dev, iio_backend_unregister, back);
1120 }
1121 EXPORT_SYMBOL_NS_GPL(devm_iio_backend_register, "IIO_BACKEND");
1122
1123 MODULE_AUTHOR("Nuno Sa <nuno.sa@analog.com>");
1124 MODULE_DESCRIPTION("Framework to handle complex IIO aggregate devices");
1125 MODULE_LICENSE("GPL");
1126