xref: /linux/include/linux/iio/iio.h (revision 3a39d672e7f48b8d6b91a09afa4b55352773b4b5)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 /* The industrial I/O core
4  *
5  * Copyright (c) 2008 Jonathan Cameron
6  */
7 #ifndef _INDUSTRIAL_IO_H_
8 #define _INDUSTRIAL_IO_H_
9 
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/cleanup.h>
13 #include <linux/slab.h>
14 #include <linux/iio/types.h>
15 /* IIO TODO LIST */
16 /*
17  * Provide means of adjusting timer accuracy.
18  * Currently assumes nano seconds.
19  */
20 
21 struct fwnode_reference_args;
22 
23 enum iio_shared_by {
24 	IIO_SEPARATE,
25 	IIO_SHARED_BY_TYPE,
26 	IIO_SHARED_BY_DIR,
27 	IIO_SHARED_BY_ALL
28 };
29 
30 enum iio_endian {
31 	IIO_CPU,
32 	IIO_BE,
33 	IIO_LE,
34 };
35 
36 struct iio_chan_spec;
37 struct iio_dev;
38 
39 /**
40  * struct iio_chan_spec_ext_info - Extended channel info attribute
41  * @name:	Info attribute name
42  * @shared:	Whether this attribute is shared between all channels.
43  * @read:	Read callback for this info attribute, may be NULL.
44  * @write:	Write callback for this info attribute, may be NULL.
45  * @private:	Data private to the driver.
46  */
47 struct iio_chan_spec_ext_info {
48 	const char *name;
49 	enum iio_shared_by shared;
50 	ssize_t (*read)(struct iio_dev *, uintptr_t private,
51 			struct iio_chan_spec const *, char *buf);
52 	ssize_t (*write)(struct iio_dev *, uintptr_t private,
53 			 struct iio_chan_spec const *, const char *buf,
54 			 size_t len);
55 	uintptr_t private;
56 };
57 
58 /**
59  * struct iio_enum - Enum channel info attribute
60  * @items:	An array of strings.
61  * @num_items:	Length of the item array.
62  * @set:	Set callback function, may be NULL.
63  * @get:	Get callback function, may be NULL.
64  *
65  * The iio_enum struct can be used to implement enum style channel attributes.
66  * Enum style attributes are those which have a set of strings which map to
67  * unsigned integer values. The IIO enum helper code takes care of mapping
68  * between value and string as well as generating a "_available" file which
69  * contains a list of all available items. The set callback will be called when
70  * the attribute is updated. The last parameter is the index to the newly
71  * activated item. The get callback will be used to query the currently active
72  * item and is supposed to return the index for it.
73  */
74 struct iio_enum {
75 	const char * const *items;
76 	unsigned int num_items;
77 	int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
78 	int (*get)(struct iio_dev *, const struct iio_chan_spec *);
79 };
80 
81 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
82 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
83 ssize_t iio_enum_read(struct iio_dev *indio_dev,
84 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
85 ssize_t iio_enum_write(struct iio_dev *indio_dev,
86 	uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
87 	size_t len);
88 
89 /**
90  * IIO_ENUM() - Initialize enum extended channel attribute
91  * @_name:	Attribute name
92  * @_shared:	Whether the attribute is shared between all channels
93  * @_e:		Pointer to an iio_enum struct
94  *
95  * This should usually be used together with IIO_ENUM_AVAILABLE()
96  */
97 #define IIO_ENUM(_name, _shared, _e) \
98 { \
99 	.name = (_name), \
100 	.shared = (_shared), \
101 	.read = iio_enum_read, \
102 	.write = iio_enum_write, \
103 	.private = (uintptr_t)(_e), \
104 }
105 
106 /**
107  * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
108  * @_name:	Attribute name ("_available" will be appended to the name)
109  * @_shared:	Whether the attribute is shared between all channels
110  * @_e:		Pointer to an iio_enum struct
111  *
112  * Creates a read only attribute which lists all the available enum items in a
113  * space separated list. This should usually be used together with IIO_ENUM()
114  */
115 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
116 { \
117 	.name = (_name "_available"), \
118 	.shared = _shared, \
119 	.read = iio_enum_available_read, \
120 	.private = (uintptr_t)(_e), \
121 }
122 
123 /**
124  * struct iio_mount_matrix - iio mounting matrix
125  * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
126  *            main hardware
127  */
128 struct iio_mount_matrix {
129 	const char *rotation[9];
130 };
131 
132 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
133 			      const struct iio_chan_spec *chan, char *buf);
134 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
135 
136 typedef const struct iio_mount_matrix *
137 	(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
138 				 const struct iio_chan_spec *chan);
139 
140 /**
141  * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
142  * @_shared:	Whether the attribute is shared between all channels
143  * @_get:	Pointer to an iio_get_mount_matrix_t accessor
144  */
145 #define IIO_MOUNT_MATRIX(_shared, _get) \
146 { \
147 	.name = "mount_matrix", \
148 	.shared = (_shared), \
149 	.read = iio_show_mount_matrix, \
150 	.private = (uintptr_t)(_get), \
151 }
152 
153 /**
154  * struct iio_event_spec - specification for a channel event
155  * @type:		    Type of the event
156  * @dir:		    Direction of the event
157  * @mask_separate:	    Bit mask of enum iio_event_info values. Attributes
158  *			    set in this mask will be registered per channel.
159  * @mask_shared_by_type:    Bit mask of enum iio_event_info values. Attributes
160  *			    set in this mask will be shared by channel type.
161  * @mask_shared_by_dir:	    Bit mask of enum iio_event_info values. Attributes
162  *			    set in this mask will be shared by channel type and
163  *			    direction.
164  * @mask_shared_by_all:	    Bit mask of enum iio_event_info values. Attributes
165  *			    set in this mask will be shared by all channels.
166  */
167 struct iio_event_spec {
168 	enum iio_event_type type;
169 	enum iio_event_direction dir;
170 	unsigned long mask_separate;
171 	unsigned long mask_shared_by_type;
172 	unsigned long mask_shared_by_dir;
173 	unsigned long mask_shared_by_all;
174 };
175 
176 /**
177  * struct iio_scan_type - specification for channel data format in buffer
178  * @sign:		's' or 'u' to specify signed or unsigned
179  * @realbits:		Number of valid bits of data
180  * @storagebits:	Realbits + padding
181  * @shift:		Shift right by this before masking out realbits.
182  * @repeat:		Number of times real/storage bits repeats. When the
183  *			repeat element is more than 1, then the type element in
184  *			sysfs will show a repeat value. Otherwise, the number
185  *			of repetitions is omitted.
186  * @endianness:		little or big endian
187  */
188 struct iio_scan_type {
189 	char	sign;
190 	u8	realbits;
191 	u8	storagebits;
192 	u8	shift;
193 	u8	repeat;
194 	enum iio_endian endianness;
195 };
196 
197 /**
198  * struct iio_chan_spec - specification of a single channel
199  * @type:		What type of measurement is the channel making.
200  * @channel:		What number do we wish to assign the channel.
201  * @channel2:		If there is a second number for a differential
202  *			channel then this is it. If modified is set then the
203  *			value here specifies the modifier.
204  * @address:		Driver specific identifier.
205  * @scan_index:		Monotonic index to give ordering in scans when read
206  *			from a buffer.
207  * @scan_type:		struct describing the scan type - mutually exclusive
208  *			with ext_scan_type.
209  * @ext_scan_type:	Used in rare cases where there is more than one scan
210  *			format for a channel. When this is used, the flag
211  *			has_ext_scan_type must be set and the driver must
212  *			implement get_current_scan_type in struct iio_info.
213  * @num_ext_scan_type:	Number of elements in ext_scan_type.
214  * @info_mask_separate: What information is to be exported that is specific to
215  *			this channel.
216  * @info_mask_separate_available: What availability information is to be
217  *			exported that is specific to this channel.
218  * @info_mask_shared_by_type: What information is to be exported that is shared
219  *			by all channels of the same type.
220  * @info_mask_shared_by_type_available: What availability information is to be
221  *			exported that is shared by all channels of the same
222  *			type.
223  * @info_mask_shared_by_dir: What information is to be exported that is shared
224  *			by all channels of the same direction.
225  * @info_mask_shared_by_dir_available: What availability information is to be
226  *			exported that is shared by all channels of the same
227  *			direction.
228  * @info_mask_shared_by_all: What information is to be exported that is shared
229  *			by all channels.
230  * @info_mask_shared_by_all_available: What availability information is to be
231  *			exported that is shared by all channels.
232  * @event_spec:		Array of events which should be registered for this
233  *			channel.
234  * @num_event_specs:	Size of the event_spec array.
235  * @ext_info:		Array of extended info attributes for this channel.
236  *			The array is NULL terminated, the last element should
237  *			have its name field set to NULL.
238  * @extend_name:	Allows labeling of channel attributes with an
239  *			informative name. Note this has no effect codes etc,
240  *			unlike modifiers.
241  *			This field is deprecated in favour of providing
242  *			iio_info->read_label() to override the label, which
243  *			unlike @extend_name does not affect sysfs filenames.
244  * @datasheet_name:	A name used in in-kernel mapping of channels. It should
245  *			correspond to the first name that the channel is referred
246  *			to by in the datasheet (e.g. IND), or the nearest
247  *			possible compound name (e.g. IND-INC).
248  * @modified:		Does a modifier apply to this channel. What these are
249  *			depends on the channel type.  Modifier is set in
250  *			channel2. Examples are IIO_MOD_X for axial sensors about
251  *			the 'x' axis.
252  * @indexed:		Specify the channel has a numerical index. If not,
253  *			the channel index number will be suppressed for sysfs
254  *			attributes but not for event codes.
255  * @output:		Channel is output.
256  * @differential:	Channel is differential.
257  * @has_ext_scan_type:	True if ext_scan_type is used instead of scan_type.
258  */
259 struct iio_chan_spec {
260 	enum iio_chan_type	type;
261 	int			channel;
262 	int			channel2;
263 	unsigned long		address;
264 	int			scan_index;
265 	union {
266 		struct iio_scan_type scan_type;
267 		struct {
268 			const struct iio_scan_type *ext_scan_type;
269 			unsigned int num_ext_scan_type;
270 		};
271 	};
272 	long			info_mask_separate;
273 	long			info_mask_separate_available;
274 	long			info_mask_shared_by_type;
275 	long			info_mask_shared_by_type_available;
276 	long			info_mask_shared_by_dir;
277 	long			info_mask_shared_by_dir_available;
278 	long			info_mask_shared_by_all;
279 	long			info_mask_shared_by_all_available;
280 	const struct iio_event_spec *event_spec;
281 	unsigned int		num_event_specs;
282 	const struct iio_chan_spec_ext_info *ext_info;
283 	const char		*extend_name;
284 	const char		*datasheet_name;
285 	unsigned		modified:1;
286 	unsigned		indexed:1;
287 	unsigned		output:1;
288 	unsigned		differential:1;
289 	unsigned		has_ext_scan_type:1;
290 };
291 
292 
293 /**
294  * iio_channel_has_info() - Checks whether a channel supports a info attribute
295  * @chan: The channel to be queried
296  * @type: Type of the info attribute to be checked
297  *
298  * Returns true if the channels supports reporting values for the given info
299  * attribute type, false otherwise.
300  */
iio_channel_has_info(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)301 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
302 	enum iio_chan_info_enum type)
303 {
304 	return (chan->info_mask_separate & BIT(type)) |
305 		(chan->info_mask_shared_by_type & BIT(type)) |
306 		(chan->info_mask_shared_by_dir & BIT(type)) |
307 		(chan->info_mask_shared_by_all & BIT(type));
308 }
309 
310 /**
311  * iio_channel_has_available() - Checks if a channel has an available attribute
312  * @chan: The channel to be queried
313  * @type: Type of the available attribute to be checked
314  *
315  * Returns true if the channel supports reporting available values for the
316  * given attribute type, false otherwise.
317  */
iio_channel_has_available(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)318 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
319 					     enum iio_chan_info_enum type)
320 {
321 	return (chan->info_mask_separate_available & BIT(type)) |
322 		(chan->info_mask_shared_by_type_available & BIT(type)) |
323 		(chan->info_mask_shared_by_dir_available & BIT(type)) |
324 		(chan->info_mask_shared_by_all_available & BIT(type));
325 }
326 
327 #define IIO_CHAN_SOFT_TIMESTAMP(_si) {					\
328 	.type = IIO_TIMESTAMP,						\
329 	.channel = -1,							\
330 	.scan_index = _si,						\
331 	.scan_type = {							\
332 		.sign = 's',						\
333 		.realbits = 64,					\
334 		.storagebits = 64,					\
335 		},							\
336 }
337 
338 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
339 
340 /*
341  * Device operating modes
342  * @INDIO_DIRECT_MODE: There is an access to either:
343  * a) The last single value available for devices that do not provide
344  *    on-demand reads.
345  * b) A new value after performing an on-demand read otherwise.
346  * On most devices, this is a single-shot read. On some devices with data
347  * streams without an 'on-demand' function, this might also be the 'last value'
348  * feature. Above all, this mode internally means that we are not in any of the
349  * other modes, and sysfs reads should work.
350  * Device drivers should inform the core if they support this mode.
351  * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
352  * It indicates that an explicit trigger is required. This requests the core to
353  * attach a poll function when enabling the buffer, which is indicated by the
354  * _TRIGGERED suffix.
355  * The core will ensure this mode is set when registering a triggered buffer
356  * with iio_triggered_buffer_setup().
357  * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
358  * No poll function can be attached because there is no triggered infrastructure
359  * we can use to cause capture. There is a kfifo that the driver will fill, but
360  * not "only one scan at a time". Typically, hardware will have a buffer that
361  * can hold multiple scans. Software may read one or more scans at a single time
362  * and push the available data to a Kfifo. This means the core will not attach
363  * any poll function when enabling the buffer.
364  * The core will ensure this mode is set when registering a simple kfifo buffer
365  * with devm_iio_kfifo_buffer_setup().
366  * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
367  * Same as above but this time the buffer is not a kfifo where we have direct
368  * access to the data. Instead, the consumer driver must access the data through
369  * non software visible channels (or DMA when there is no demux possible in
370  * software)
371  * The core will ensure this mode is set when registering a dmaengine buffer
372  * with devm_iio_dmaengine_buffer_setup().
373  * @INDIO_EVENT_TRIGGERED: Very unusual mode.
374  * Triggers usually refer to an external event which will start data capture.
375  * Here it is kind of the opposite as, a particular state of the data might
376  * produce an event which can be considered as an event. We don't necessarily
377  * have access to the data itself, but to the event produced. For example, this
378  * can be a threshold detector. The internal path of this mode is very close to
379  * the INDIO_BUFFER_TRIGGERED mode.
380  * The core will ensure this mode is set when registering a triggered event.
381  * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
382  * Here, triggers can result in data capture and can be routed to multiple
383  * hardware components, which make them close to regular triggers in the way
384  * they must be managed by the core, but without the entire interrupts/poll
385  * functions burden. Interrupts are irrelevant as the data flow is hardware
386  * mediated and distributed.
387  */
388 #define INDIO_DIRECT_MODE		0x01
389 #define INDIO_BUFFER_TRIGGERED		0x02
390 #define INDIO_BUFFER_SOFTWARE		0x04
391 #define INDIO_BUFFER_HARDWARE		0x08
392 #define INDIO_EVENT_TRIGGERED		0x10
393 #define INDIO_HARDWARE_TRIGGERED	0x20
394 
395 #define INDIO_ALL_BUFFER_MODES					\
396 	(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
397 
398 #define INDIO_ALL_TRIGGERED_MODES	\
399 	(INDIO_BUFFER_TRIGGERED		\
400 	 | INDIO_EVENT_TRIGGERED	\
401 	 | INDIO_HARDWARE_TRIGGERED)
402 
403 #define INDIO_MAX_RAW_ELEMENTS		4
404 
405 struct iio_val_int_plus_micro {
406 	int integer;
407 	int micro;
408 };
409 
410 struct iio_trigger; /* forward declaration */
411 
412 /**
413  * struct iio_info - constant information about device
414  * @event_attrs:	event control attributes
415  * @attrs:		general purpose device attributes
416  * @read_raw:		function to request a value from the device.
417  *			mask specifies which value. Note 0 means a reading of
418  *			the channel in question.  Return value will specify the
419  *			type of value returned by the device. val and val2 will
420  *			contain the elements making up the returned value.
421  * @read_raw_multi:	function to return values from the device.
422  *			mask specifies which value. Note 0 means a reading of
423  *			the channel in question.  Return value will specify the
424  *			type of value returned by the device. vals pointer
425  *			contain the elements making up the returned value.
426  *			max_len specifies maximum number of elements
427  *			vals pointer can contain. val_len is used to return
428  *			length of valid elements in vals.
429  * @read_avail:		function to return the available values from the device.
430  *			mask specifies which value. Note 0 means the available
431  *			values for the channel in question.  Return value
432  *			specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
433  *			returned in vals. The type of the vals are returned in
434  *			type and the number of vals is returned in length. For
435  *			ranges, there are always three vals returned; min, step
436  *			and max. For lists, all possible values are enumerated.
437  * @write_raw:		function to write a value to the device.
438  *			Parameters are the same as for read_raw.
439  * @read_label:		function to request label name for a specified label,
440  *			for better channel identification.
441  * @write_raw_get_fmt:	callback function to query the expected
442  *			format/precision. If not set by the driver, write_raw
443  *			returns IIO_VAL_INT_PLUS_MICRO.
444  * @read_event_config:	find out if the event is enabled.
445  * @write_event_config:	set if the event is enabled.
446  * @read_event_value:	read a configuration value associated with the event.
447  * @write_event_value:	write a configuration value for the event.
448  * @read_event_label:	function to request label name for a specified label,
449  *			for better event identification.
450  * @validate_trigger:	function to validate the trigger when the
451  *			current trigger gets changed.
452  * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
453  *			in the channel spec to return the index of the currently
454  *			active ext_scan type for a channel.
455  * @update_scan_mode:	function to configure device and scan buffer when
456  *			channels have changed
457  * @debugfs_reg_access:	function to read or write register value of device
458  * @fwnode_xlate:	fwnode based function pointer to obtain channel specifier index.
459  * @hwfifo_set_watermark: function pointer to set the current hardware
460  *			fifo watermark level; see hwfifo_* entries in
461  *			Documentation/ABI/testing/sysfs-bus-iio for details on
462  *			how the hardware fifo operates
463  * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
464  *			in the hardware fifo to the device buffer. The driver
465  *			should not flush more than count samples. The function
466  *			must return the number of samples flushed, 0 if no
467  *			samples were flushed or a negative integer if no samples
468  *			were flushed and there was an error.
469  **/
470 struct iio_info {
471 	const struct attribute_group	*event_attrs;
472 	const struct attribute_group	*attrs;
473 
474 	int (*read_raw)(struct iio_dev *indio_dev,
475 			struct iio_chan_spec const *chan,
476 			int *val,
477 			int *val2,
478 			long mask);
479 
480 	int (*read_raw_multi)(struct iio_dev *indio_dev,
481 			struct iio_chan_spec const *chan,
482 			int max_len,
483 			int *vals,
484 			int *val_len,
485 			long mask);
486 
487 	int (*read_avail)(struct iio_dev *indio_dev,
488 			  struct iio_chan_spec const *chan,
489 			  const int **vals,
490 			  int *type,
491 			  int *length,
492 			  long mask);
493 
494 	int (*write_raw)(struct iio_dev *indio_dev,
495 			 struct iio_chan_spec const *chan,
496 			 int val,
497 			 int val2,
498 			 long mask);
499 
500 	int (*read_label)(struct iio_dev *indio_dev,
501 			 struct iio_chan_spec const *chan,
502 			 char *label);
503 
504 	int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
505 			 struct iio_chan_spec const *chan,
506 			 long mask);
507 
508 	int (*read_event_config)(struct iio_dev *indio_dev,
509 				 const struct iio_chan_spec *chan,
510 				 enum iio_event_type type,
511 				 enum iio_event_direction dir);
512 
513 	int (*write_event_config)(struct iio_dev *indio_dev,
514 				  const struct iio_chan_spec *chan,
515 				  enum iio_event_type type,
516 				  enum iio_event_direction dir,
517 				  int state);
518 
519 	int (*read_event_value)(struct iio_dev *indio_dev,
520 				const struct iio_chan_spec *chan,
521 				enum iio_event_type type,
522 				enum iio_event_direction dir,
523 				enum iio_event_info info, int *val, int *val2);
524 
525 	int (*write_event_value)(struct iio_dev *indio_dev,
526 				 const struct iio_chan_spec *chan,
527 				 enum iio_event_type type,
528 				 enum iio_event_direction dir,
529 				 enum iio_event_info info, int val, int val2);
530 
531 	int (*read_event_label)(struct iio_dev *indio_dev,
532 				struct iio_chan_spec const *chan,
533 				enum iio_event_type type,
534 				enum iio_event_direction dir,
535 				char *label);
536 
537 	int (*validate_trigger)(struct iio_dev *indio_dev,
538 				struct iio_trigger *trig);
539 	int (*get_current_scan_type)(const struct iio_dev *indio_dev,
540 				     const struct iio_chan_spec *chan);
541 	int (*update_scan_mode)(struct iio_dev *indio_dev,
542 				const unsigned long *scan_mask);
543 	int (*debugfs_reg_access)(struct iio_dev *indio_dev,
544 				  unsigned reg, unsigned writeval,
545 				  unsigned *readval);
546 	int (*fwnode_xlate)(struct iio_dev *indio_dev,
547 			    const struct fwnode_reference_args *iiospec);
548 	int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
549 	int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
550 				      unsigned count);
551 };
552 
553 /**
554  * struct iio_buffer_setup_ops - buffer setup related callbacks
555  * @preenable:		[DRIVER] function to run prior to marking buffer enabled
556  * @postenable:		[DRIVER] function to run after marking buffer enabled
557  * @predisable:		[DRIVER] function to run prior to marking buffer
558  *			disabled
559  * @postdisable:	[DRIVER] function to run after marking buffer disabled
560  * @validate_scan_mask: [DRIVER] function callback to check whether a given
561  *			scan mask is valid for the device.
562  */
563 struct iio_buffer_setup_ops {
564 	int (*preenable)(struct iio_dev *);
565 	int (*postenable)(struct iio_dev *);
566 	int (*predisable)(struct iio_dev *);
567 	int (*postdisable)(struct iio_dev *);
568 	bool (*validate_scan_mask)(struct iio_dev *indio_dev,
569 				   const unsigned long *scan_mask);
570 };
571 
572 /**
573  * struct iio_dev - industrial I/O device
574  * @modes:		[DRIVER] bitmask listing all the operating modes
575  *			supported by the IIO device. This list should be
576  *			initialized before registering the IIO device. It can
577  *			also be filed up by the IIO core, as a result of
578  *			enabling particular features in the driver
579  *			(see iio_triggered_event_setup()).
580  * @dev:		[DRIVER] device structure, should be assigned a parent
581  *			and owner
582  * @buffer:		[DRIVER] any buffer present
583  * @scan_bytes:		[INTERN] num bytes captured to be fed to buffer demux
584  * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
585  *			   array in order of preference, the most preferred
586  *			   masks first.
587  * @masklength:		[INTERN] the length of the mask established from
588  *			channels
589  * @active_scan_mask:	[INTERN] union of all scan masks requested by buffers
590  * @scan_timestamp:	[INTERN] set if any buffers have requested timestamp
591  * @trig:		[INTERN] current device trigger (buffer modes)
592  * @pollfunc:		[DRIVER] function run on trigger being received
593  * @pollfunc_event:	[DRIVER] function run on events trigger being received
594  * @channels:		[DRIVER] channel specification structure table
595  * @num_channels:	[DRIVER] number of channels specified in @channels.
596  * @name:		[DRIVER] name of the device.
597  * @label:              [DRIVER] unique name to identify which device this is
598  * @info:		[DRIVER] callbacks and constant info from driver
599  * @setup_ops:		[DRIVER] callbacks to call before and after buffer
600  *			enable/disable
601  * @priv:		[DRIVER] reference to driver's private information
602  *			**MUST** be accessed **ONLY** via iio_priv() helper
603  */
604 struct iio_dev {
605 	int				modes;
606 	struct device			dev;
607 
608 	struct iio_buffer		*buffer;
609 	int				scan_bytes;
610 
611 	const unsigned long		*available_scan_masks;
612 	unsigned			__private masklength;
613 	const unsigned long		*active_scan_mask;
614 	bool				scan_timestamp;
615 	struct iio_trigger		*trig;
616 	struct iio_poll_func		*pollfunc;
617 	struct iio_poll_func		*pollfunc_event;
618 
619 	struct iio_chan_spec const	*channels;
620 	int				num_channels;
621 
622 	const char			*name;
623 	const char			*label;
624 	const struct iio_info		*info;
625 	const struct iio_buffer_setup_ops	*setup_ops;
626 
627 	void				*priv;
628 };
629 
630 int iio_device_id(struct iio_dev *indio_dev);
631 int iio_device_get_current_mode(struct iio_dev *indio_dev);
632 bool iio_buffer_enabled(struct iio_dev *indio_dev);
633 
634 const struct iio_chan_spec
635 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
636 /**
637  * iio_device_register() - register a device with the IIO subsystem
638  * @indio_dev:		Device structure filled by the device driver
639  **/
640 #define iio_device_register(indio_dev) \
641 	__iio_device_register((indio_dev), THIS_MODULE)
642 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
643 void iio_device_unregister(struct iio_dev *indio_dev);
644 /**
645  * devm_iio_device_register - Resource-managed iio_device_register()
646  * @dev:	Device to allocate iio_dev for
647  * @indio_dev:	Device structure filled by the device driver
648  *
649  * Managed iio_device_register.  The IIO device registered with this
650  * function is automatically unregistered on driver detach. This function
651  * calls iio_device_register() internally. Refer to that function for more
652  * information.
653  *
654  * RETURNS:
655  * 0 on success, negative error number on failure.
656  */
657 #define devm_iio_device_register(dev, indio_dev) \
658 	__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
659 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
660 			       struct module *this_mod);
661 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
662 int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
663 void iio_device_release_direct_mode(struct iio_dev *indio_dev);
664 
665 /*
666  * This autocleanup logic is normally used via
667  * iio_device_claim_direct_scoped().
668  */
669 DEFINE_GUARD(iio_claim_direct, struct iio_dev *, iio_device_claim_direct_mode(_T),
670 	     iio_device_release_direct_mode(_T))
671 
672 DEFINE_GUARD_COND(iio_claim_direct, _try, ({
673 			struct iio_dev *dev;
674 			int d = iio_device_claim_direct_mode(_T);
675 
676 			if (d < 0)
677 				dev = NULL;
678 			else
679 				dev = _T;
680 			dev;
681 		}))
682 
683 /**
684  * iio_device_claim_direct_scoped() - Scoped call to iio_device_claim_direct.
685  * @fail: What to do on failure to claim device.
686  * @iio_dev: Pointer to the IIO devices structure
687  */
688 #define iio_device_claim_direct_scoped(fail, iio_dev) \
689 	scoped_cond_guard(iio_claim_direct_try, fail, iio_dev)
690 
691 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
692 void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
693 
694 extern const struct bus_type iio_bus_type;
695 
696 /**
697  * iio_device_put() - reference counted deallocation of struct device
698  * @indio_dev: IIO device structure containing the device
699  **/
iio_device_put(struct iio_dev * indio_dev)700 static inline void iio_device_put(struct iio_dev *indio_dev)
701 {
702 	if (indio_dev)
703 		put_device(&indio_dev->dev);
704 }
705 
706 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
707 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
708 
709 /**
710  * dev_to_iio_dev() - Get IIO device struct from a device struct
711  * @dev: 		The device embedded in the IIO device
712  *
713  * Note: The device must be a IIO device, otherwise the result is undefined.
714  */
dev_to_iio_dev(struct device * dev)715 static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
716 {
717 	return container_of(dev, struct iio_dev, dev);
718 }
719 
720 /**
721  * iio_device_get() - increment reference count for the device
722  * @indio_dev: 		IIO device structure
723  *
724  * Returns: The passed IIO device
725  **/
iio_device_get(struct iio_dev * indio_dev)726 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
727 {
728 	return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
729 }
730 
731 /**
732  * iio_device_set_parent() - assign parent device to the IIO device object
733  * @indio_dev: 		IIO device structure
734  * @parent:		reference to parent device object
735  *
736  * This utility must be called between IIO device allocation
737  * (via devm_iio_device_alloc()) & IIO device registration
738  * (via iio_device_register() and devm_iio_device_register())).
739  * By default, the device allocation will also assign a parent device to
740  * the IIO device object. In cases where devm_iio_device_alloc() is used,
741  * sometimes the parent device must be different than the device used to
742  * manage the allocation.
743  * In that case, this helper should be used to change the parent, hence the
744  * requirement to call this between allocation & registration.
745  **/
iio_device_set_parent(struct iio_dev * indio_dev,struct device * parent)746 static inline void iio_device_set_parent(struct iio_dev *indio_dev,
747 					 struct device *parent)
748 {
749 	indio_dev->dev.parent = parent;
750 }
751 
752 /**
753  * iio_device_set_drvdata() - Set device driver data
754  * @indio_dev: IIO device structure
755  * @data: Driver specific data
756  *
757  * Allows to attach an arbitrary pointer to an IIO device, which can later be
758  * retrieved by iio_device_get_drvdata().
759  */
iio_device_set_drvdata(struct iio_dev * indio_dev,void * data)760 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
761 {
762 	dev_set_drvdata(&indio_dev->dev, data);
763 }
764 
765 /**
766  * iio_device_get_drvdata() - Get device driver data
767  * @indio_dev: IIO device structure
768  *
769  * Returns the data previously set with iio_device_set_drvdata()
770  */
iio_device_get_drvdata(const struct iio_dev * indio_dev)771 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
772 {
773 	return dev_get_drvdata(&indio_dev->dev);
774 }
775 
776 /*
777  * Used to ensure the iio_priv() structure is aligned to allow that structure
778  * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
779  * must not share  cachelines with the rest of the structure, thus making
780  * them safe for use with non-coherent DMA.
781  */
782 #define IIO_DMA_MINALIGN ARCH_DMA_MINALIGN
783 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
784 
785 /* The information at the returned address is guaranteed to be cacheline aligned */
iio_priv(const struct iio_dev * indio_dev)786 static inline void *iio_priv(const struct iio_dev *indio_dev)
787 {
788 	return indio_dev->priv;
789 }
790 
791 void iio_device_free(struct iio_dev *indio_dev);
792 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
793 
794 #define devm_iio_trigger_alloc(parent, fmt, ...) \
795 	__devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
796 __printf(3, 4)
797 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
798 					     struct module *this_mod,
799 					     const char *fmt, ...);
800 /**
801  * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
802  * @indio_dev:		IIO device structure for device
803  **/
804 #if defined(CONFIG_DEBUG_FS)
805 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
806 #else
iio_get_debugfs_dentry(struct iio_dev * indio_dev)807 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
808 {
809 	return NULL;
810 }
811 #endif
812 
813 /**
814  * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
815  * @indio_dev: iio_dev associated with the device that will have triggers suspended
816  *
817  * Return 0 if successful, negative otherwise
818  **/
819 int iio_device_suspend_triggering(struct iio_dev *indio_dev);
820 
821 /**
822  * iio_device_resume_triggering() - resume trigger attached to an iio_dev
823  *	that was previously suspended with iio_device_suspend_triggering()
824  * @indio_dev: iio_dev associated with the device that will have triggers resumed
825  *
826  * Return 0 if successful, negative otherwise
827  **/
828 int iio_device_resume_triggering(struct iio_dev *indio_dev);
829 
830 #ifdef CONFIG_ACPI
831 bool iio_read_acpi_mount_matrix(struct device *dev,
832 				struct iio_mount_matrix *orientation,
833 				char *acpi_method);
834 #else
iio_read_acpi_mount_matrix(struct device * dev,struct iio_mount_matrix * orientation,char * acpi_method)835 static inline bool iio_read_acpi_mount_matrix(struct device *dev,
836 					      struct iio_mount_matrix *orientation,
837 					      char *acpi_method)
838 {
839 	return false;
840 }
841 #endif
842 
843 /**
844  * iio_get_current_scan_type - Get the current scan type for a channel
845  * @indio_dev:	the IIO device to get the scan type for
846  * @chan:	the channel to get the scan type for
847  *
848  * Most devices only have one scan type per channel and can just access it
849  * directly without calling this function. Core IIO code and drivers that
850  * implement ext_scan_type in the channel spec should use this function to
851  * get the current scan type for a channel.
852  *
853  * Returns: the current scan type for the channel or error.
854  */
855 static inline const struct iio_scan_type
iio_get_current_scan_type(const struct iio_dev * indio_dev,const struct iio_chan_spec * chan)856 *iio_get_current_scan_type(const struct iio_dev *indio_dev,
857 			   const struct iio_chan_spec *chan)
858 {
859 	int ret;
860 
861 	if (chan->has_ext_scan_type) {
862 		ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
863 		if (ret < 0)
864 			return ERR_PTR(ret);
865 
866 		if (ret >= chan->num_ext_scan_type)
867 			return ERR_PTR(-EINVAL);
868 
869 		return &chan->ext_scan_type[ret];
870 	}
871 
872 	return &chan->scan_type;
873 }
874 
875 /**
876  * iio_get_masklength - Get length of the channels mask
877  * @indio_dev: the IIO device to get the masklength for
878  */
iio_get_masklength(const struct iio_dev * indio_dev)879 static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
880 {
881 	return ACCESS_PRIVATE(indio_dev, masklength);
882 }
883 
884 int iio_active_scan_mask_index(struct iio_dev *indio_dev);
885 
886 /**
887  * iio_for_each_active_channel - Iterated over active channels
888  * @indio_dev: the IIO device
889  * @chan: Holds the index of the enabled channel
890  */
891 #define iio_for_each_active_channel(indio_dev, chan) \
892 	for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
893 			 iio_get_masklength(indio_dev))
894 
895 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
896 
897 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
898 	int *fract);
899 
900 /**
901  * IIO_DEGREE_TO_RAD() - Convert degree to rad
902  * @deg: A value in degree
903  *
904  * Returns the given value converted from degree to rad
905  */
906 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
907 
908 /**
909  * IIO_RAD_TO_DEGREE() - Convert rad to degree
910  * @rad: A value in rad
911  *
912  * Returns the given value converted from rad to degree
913  */
914 #define IIO_RAD_TO_DEGREE(rad) \
915 	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
916 
917 /**
918  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
919  * @g: A value in g
920  *
921  * Returns the given value converted from g to meter / second**2
922  */
923 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
924 
925 /**
926  * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
927  * @ms2: A value in meter / second**2
928  *
929  * Returns the given value converted from meter / second**2 to g
930  */
931 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
932 
933 #endif /* _INDUSTRIAL_IO_H_ */
934