xref: /linux/include/linux/iio/iio.h (revision c7decec2f2d2ab0366567f9e30c0e1418cece43f)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 /* The industrial I/O core
4  *
5  * Copyright (c) 2008 Jonathan Cameron
6  */
7 #ifndef _INDUSTRIAL_IO_H_
8 #define _INDUSTRIAL_IO_H_
9 
10 #include <linux/align.h>
11 #include <linux/device.h>
12 #include <linux/cdev.h>
13 #include <linux/cleanup.h>
14 #include <linux/compiler_types.h>
15 #include <linux/minmax.h>
16 #include <linux/slab.h>
17 #include <linux/iio/types.h>
18 /* IIO TODO LIST */
19 /*
20  * Provide means of adjusting timer accuracy.
21  * Currently assumes nano seconds.
22  */
23 
24 struct fwnode_reference_args;
25 
26 enum iio_shared_by {
27 	IIO_SEPARATE,
28 	IIO_SHARED_BY_TYPE,
29 	IIO_SHARED_BY_DIR,
30 	IIO_SHARED_BY_ALL
31 };
32 
33 enum iio_endian {
34 	IIO_CPU,
35 	IIO_BE,
36 	IIO_LE,
37 };
38 
39 struct iio_chan_spec;
40 struct iio_dev;
41 
42 /**
43  * struct iio_chan_spec_ext_info - Extended channel info attribute
44  * @name:	Info attribute name
45  * @shared:	Whether this attribute is shared between all channels.
46  * @read:	Read callback for this info attribute, may be NULL.
47  * @write:	Write callback for this info attribute, may be NULL.
48  * @private:	Data private to the driver.
49  */
50 struct iio_chan_spec_ext_info {
51 	const char *name;
52 	enum iio_shared_by shared;
53 	ssize_t (*read)(struct iio_dev *, uintptr_t private,
54 			struct iio_chan_spec const *, char *buf);
55 	ssize_t (*write)(struct iio_dev *, uintptr_t private,
56 			 struct iio_chan_spec const *, const char *buf,
57 			 size_t len);
58 	uintptr_t private;
59 };
60 
61 /**
62  * struct iio_enum - Enum channel info attribute
63  * @items:	An array of strings.
64  * @num_items:	Length of the item array.
65  * @set:	Set callback function, may be NULL.
66  * @get:	Get callback function, may be NULL.
67  *
68  * The iio_enum struct can be used to implement enum style channel attributes.
69  * Enum style attributes are those which have a set of strings which map to
70  * unsigned integer values. The IIO enum helper code takes care of mapping
71  * between value and string as well as generating a "_available" file which
72  * contains a list of all available items. The set callback will be called when
73  * the attribute is updated. The last parameter is the index to the newly
74  * activated item. The get callback will be used to query the currently active
75  * item and is supposed to return the index for it.
76  */
77 struct iio_enum {
78 	const char * const *items;
79 	unsigned int num_items;
80 	int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
81 	int (*get)(struct iio_dev *, const struct iio_chan_spec *);
82 };
83 
84 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
85 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
86 ssize_t iio_enum_read(struct iio_dev *indio_dev,
87 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
88 ssize_t iio_enum_write(struct iio_dev *indio_dev,
89 	uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
90 	size_t len);
91 
92 /**
93  * IIO_ENUM() - Initialize enum extended channel attribute
94  * @_name:	Attribute name
95  * @_shared:	Whether the attribute is shared between all channels
96  * @_e:		Pointer to an iio_enum struct
97  *
98  * This should usually be used together with IIO_ENUM_AVAILABLE()
99  */
100 #define IIO_ENUM(_name, _shared, _e) \
101 { \
102 	.name = (_name), \
103 	.shared = (_shared), \
104 	.read = iio_enum_read, \
105 	.write = iio_enum_write, \
106 	.private = (uintptr_t)(_e), \
107 }
108 
109 /**
110  * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
111  * @_name:	Attribute name ("_available" will be appended to the name)
112  * @_shared:	Whether the attribute is shared between all channels
113  * @_e:		Pointer to an iio_enum struct
114  *
115  * Creates a read only attribute which lists all the available enum items in a
116  * space separated list. This should usually be used together with IIO_ENUM()
117  */
118 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
119 { \
120 	.name = (_name "_available"), \
121 	.shared = _shared, \
122 	.read = iio_enum_available_read, \
123 	.private = (uintptr_t)(_e), \
124 }
125 
126 /**
127  * struct iio_mount_matrix - iio mounting matrix
128  * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
129  *            main hardware
130  */
131 struct iio_mount_matrix {
132 	const char *rotation[9];
133 };
134 
135 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
136 			      const struct iio_chan_spec *chan, char *buf);
137 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
138 
139 typedef const struct iio_mount_matrix *
140 	(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
141 				 const struct iio_chan_spec *chan);
142 
143 /**
144  * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
145  * @_shared:	Whether the attribute is shared between all channels
146  * @_get:	Pointer to an iio_get_mount_matrix_t accessor
147  */
148 #define IIO_MOUNT_MATRIX(_shared, _get) \
149 { \
150 	.name = "mount_matrix", \
151 	.shared = (_shared), \
152 	.read = iio_show_mount_matrix, \
153 	.private = (uintptr_t)(_get), \
154 }
155 
156 /**
157  * struct iio_event_spec - specification for a channel event
158  * @type:		    Type of the event
159  * @dir:		    Direction of the event
160  * @mask_separate:	    Bit mask of enum iio_event_info values. Attributes
161  *			    set in this mask will be registered per channel.
162  * @mask_shared_by_type:    Bit mask of enum iio_event_info values. Attributes
163  *			    set in this mask will be shared by channel type.
164  * @mask_shared_by_dir:	    Bit mask of enum iio_event_info values. Attributes
165  *			    set in this mask will be shared by channel type and
166  *			    direction.
167  * @mask_shared_by_all:	    Bit mask of enum iio_event_info values. Attributes
168  *			    set in this mask will be shared by all channels.
169  */
170 struct iio_event_spec {
171 	enum iio_event_type type;
172 	enum iio_event_direction dir;
173 	unsigned long mask_separate;
174 	unsigned long mask_shared_by_type;
175 	unsigned long mask_shared_by_dir;
176 	unsigned long mask_shared_by_all;
177 };
178 
179 /**
180  * struct iio_scan_type - specification for channel data format in buffer
181  * @sign:		's' or 'u' to specify signed or unsigned
182  * @realbits:		Number of valid bits of data
183  * @storagebits:	Realbits + padding
184  * @shift:		Shift right by this before masking out realbits.
185  * @repeat:		Number of times real/storage bits repeats. When the
186  *			repeat element is more than 1, then the type element in
187  *			sysfs will show a repeat value. Otherwise, the number
188  *			of repetitions is omitted.
189  * @endianness:		little or big endian
190  */
191 struct iio_scan_type {
192 	char	sign;
193 	u8	realbits;
194 	u8	storagebits;
195 	u8	shift;
196 	u8	repeat;
197 	enum iio_endian endianness;
198 };
199 
200 /**
201  * struct iio_chan_spec - specification of a single channel
202  * @type:		What type of measurement is the channel making.
203  * @channel:		What number do we wish to assign the channel.
204  * @channel2:		If there is a second number for a differential
205  *			channel then this is it. If modified is set then the
206  *			value here specifies the modifier.
207  * @address:		Driver specific identifier.
208  * @scan_index:		Monotonic index to give ordering in scans when read
209  *			from a buffer.
210  * @scan_type:		struct describing the scan type - mutually exclusive
211  *			with ext_scan_type.
212  * @ext_scan_type:	Used in rare cases where there is more than one scan
213  *			format for a channel. When this is used, the flag
214  *			has_ext_scan_type must be set and the driver must
215  *			implement get_current_scan_type in struct iio_info.
216  * @num_ext_scan_type:	Number of elements in ext_scan_type.
217  * @info_mask_separate: What information is to be exported that is specific to
218  *			this channel.
219  * @info_mask_separate_available: What availability information is to be
220  *			exported that is specific to this channel.
221  * @info_mask_shared_by_type: What information is to be exported that is shared
222  *			by all channels of the same type.
223  * @info_mask_shared_by_type_available: What availability information is to be
224  *			exported that is shared by all channels of the same
225  *			type.
226  * @info_mask_shared_by_dir: What information is to be exported that is shared
227  *			by all channels of the same direction.
228  * @info_mask_shared_by_dir_available: What availability information is to be
229  *			exported that is shared by all channels of the same
230  *			direction.
231  * @info_mask_shared_by_all: What information is to be exported that is shared
232  *			by all channels.
233  * @info_mask_shared_by_all_available: What availability information is to be
234  *			exported that is shared by all channels.
235  * @event_spec:		Array of events which should be registered for this
236  *			channel.
237  * @num_event_specs:	Size of the event_spec array.
238  * @ext_info:		Array of extended info attributes for this channel.
239  *			The array is NULL terminated, the last element should
240  *			have its name field set to NULL.
241  * @extend_name:	Allows labeling of channel attributes with an
242  *			informative name. Note this has no effect codes etc,
243  *			unlike modifiers.
244  *			This field is deprecated in favour of providing
245  *			iio_info->read_label() to override the label, which
246  *			unlike @extend_name does not affect sysfs filenames.
247  * @datasheet_name:	A name used in in-kernel mapping of channels. It should
248  *			correspond to the first name that the channel is referred
249  *			to by in the datasheet (e.g. IND), or the nearest
250  *			possible compound name (e.g. IND-INC).
251  * @modified:		Does a modifier apply to this channel. What these are
252  *			depends on the channel type.  Modifier is set in
253  *			channel2. Examples are IIO_MOD_X for axial sensors about
254  *			the 'x' axis.
255  * @indexed:		Specify the channel has a numerical index. If not,
256  *			the channel index number will be suppressed for sysfs
257  *			attributes but not for event codes.
258  * @output:		Channel is output.
259  * @differential:	Channel is differential.
260  * @has_ext_scan_type:	True if ext_scan_type is used instead of scan_type.
261  */
262 struct iio_chan_spec {
263 	enum iio_chan_type	type;
264 	int			channel;
265 	int			channel2;
266 	unsigned long		address;
267 	int			scan_index;
268 	union {
269 		struct iio_scan_type scan_type;
270 		struct {
271 			const struct iio_scan_type *ext_scan_type;
272 			unsigned int num_ext_scan_type;
273 		};
274 	};
275 	unsigned long			info_mask_separate;
276 	unsigned long			info_mask_separate_available;
277 	unsigned long			info_mask_shared_by_type;
278 	unsigned long			info_mask_shared_by_type_available;
279 	unsigned long			info_mask_shared_by_dir;
280 	unsigned long			info_mask_shared_by_dir_available;
281 	unsigned long			info_mask_shared_by_all;
282 	unsigned long			info_mask_shared_by_all_available;
283 	const struct iio_event_spec *event_spec;
284 	unsigned int		num_event_specs;
285 	const struct iio_chan_spec_ext_info *ext_info;
286 	const char		*extend_name;
287 	const char		*datasheet_name;
288 	unsigned int		modified:1;
289 	unsigned int		indexed:1;
290 	unsigned int		output:1;
291 	unsigned int		differential:1;
292 	unsigned int		has_ext_scan_type:1;
293 };
294 
295 
296 /**
297  * iio_channel_has_info() - Checks whether a channel supports a info attribute
298  * @chan: The channel to be queried
299  * @type: Type of the info attribute to be checked
300  *
301  * Returns true if the channels supports reporting values for the given info
302  * attribute type, false otherwise.
303  */
304 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
305 	enum iio_chan_info_enum type)
306 {
307 	return (chan->info_mask_separate & BIT(type)) |
308 		(chan->info_mask_shared_by_type & BIT(type)) |
309 		(chan->info_mask_shared_by_dir & BIT(type)) |
310 		(chan->info_mask_shared_by_all & BIT(type));
311 }
312 
313 /**
314  * iio_channel_has_available() - Checks if a channel has an available attribute
315  * @chan: The channel to be queried
316  * @type: Type of the available attribute to be checked
317  *
318  * Returns true if the channel supports reporting available values for the
319  * given attribute type, false otherwise.
320  */
321 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
322 					     enum iio_chan_info_enum type)
323 {
324 	return (chan->info_mask_separate_available & BIT(type)) |
325 		(chan->info_mask_shared_by_type_available & BIT(type)) |
326 		(chan->info_mask_shared_by_dir_available & BIT(type)) |
327 		(chan->info_mask_shared_by_all_available & BIT(type));
328 }
329 
330 #define IIO_CHAN_SOFT_TIMESTAMP(_si) {					\
331 	.type = IIO_TIMESTAMP,						\
332 	.channel = -1,							\
333 	.scan_index = _si,						\
334 	.scan_type = {							\
335 		.sign = 's',						\
336 		.realbits = 64,					\
337 		.storagebits = 64,					\
338 		},							\
339 }
340 
341 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
342 
343 /*
344  * Device operating modes
345  * @INDIO_DIRECT_MODE: There is an access to either:
346  * a) The last single value available for devices that do not provide
347  *    on-demand reads.
348  * b) A new value after performing an on-demand read otherwise.
349  * On most devices, this is a single-shot read. On some devices with data
350  * streams without an 'on-demand' function, this might also be the 'last value'
351  * feature. Above all, this mode internally means that we are not in any of the
352  * other modes, and sysfs reads should work.
353  * Device drivers should inform the core if they support this mode.
354  * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
355  * It indicates that an explicit trigger is required. This requests the core to
356  * attach a poll function when enabling the buffer, which is indicated by the
357  * _TRIGGERED suffix.
358  * The core will ensure this mode is set when registering a triggered buffer
359  * with iio_triggered_buffer_setup().
360  * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
361  * No poll function can be attached because there is no triggered infrastructure
362  * we can use to cause capture. There is a kfifo that the driver will fill, but
363  * not "only one scan at a time". Typically, hardware will have a buffer that
364  * can hold multiple scans. Software may read one or more scans at a single time
365  * and push the available data to a Kfifo. This means the core will not attach
366  * any poll function when enabling the buffer.
367  * The core will ensure this mode is set when registering a simple kfifo buffer
368  * with devm_iio_kfifo_buffer_setup().
369  * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
370  * Same as above but this time the buffer is not a kfifo where we have direct
371  * access to the data. Instead, the consumer driver must access the data through
372  * non software visible channels (or DMA when there is no demux possible in
373  * software)
374  * The core will ensure this mode is set when registering a dmaengine buffer
375  * with devm_iio_dmaengine_buffer_setup().
376  * @INDIO_EVENT_TRIGGERED: Very unusual mode.
377  * Triggers usually refer to an external event which will start data capture.
378  * Here it is kind of the opposite as, a particular state of the data might
379  * produce an event which can be considered as an event. We don't necessarily
380  * have access to the data itself, but to the event produced. For example, this
381  * can be a threshold detector. The internal path of this mode is very close to
382  * the INDIO_BUFFER_TRIGGERED mode.
383  * The core will ensure this mode is set when registering a triggered event.
384  * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
385  * Here, triggers can result in data capture and can be routed to multiple
386  * hardware components, which make them close to regular triggers in the way
387  * they must be managed by the core, but without the entire interrupts/poll
388  * functions burden. Interrupts are irrelevant as the data flow is hardware
389  * mediated and distributed.
390  */
391 #define INDIO_DIRECT_MODE		0x01
392 #define INDIO_BUFFER_TRIGGERED		0x02
393 #define INDIO_BUFFER_SOFTWARE		0x04
394 #define INDIO_BUFFER_HARDWARE		0x08
395 #define INDIO_EVENT_TRIGGERED		0x10
396 #define INDIO_HARDWARE_TRIGGERED	0x20
397 
398 #define INDIO_ALL_BUFFER_MODES					\
399 	(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
400 
401 #define INDIO_ALL_TRIGGERED_MODES	\
402 	(INDIO_BUFFER_TRIGGERED		\
403 	 | INDIO_EVENT_TRIGGERED	\
404 	 | INDIO_HARDWARE_TRIGGERED)
405 
406 #define INDIO_MAX_RAW_ELEMENTS		4
407 
408 struct iio_val_int_plus_micro {
409 	int integer;
410 	int micro;
411 };
412 
413 struct iio_trigger; /* forward declaration */
414 
415 /**
416  * struct iio_info - constant information about device
417  * @event_attrs:	event control attributes
418  * @attrs:		general purpose device attributes
419  * @read_raw:		function to request a value from the device.
420  *			mask specifies which value. Note 0 means a reading of
421  *			the channel in question.  Return value will specify the
422  *			type of value returned by the device. val and val2 will
423  *			contain the elements making up the returned value.
424  * @read_raw_multi:	function to return values from the device.
425  *			mask specifies which value. Note 0 means a reading of
426  *			the channel in question.  Return value will specify the
427  *			type of value returned by the device. vals pointer
428  *			contain the elements making up the returned value.
429  *			max_len specifies maximum number of elements
430  *			vals pointer can contain. val_len is used to return
431  *			length of valid elements in vals.
432  * @read_avail:		function to return the available values from the device.
433  *			mask specifies which value. Note 0 means the available
434  *			values for the channel in question.  Return value
435  *			specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
436  *			returned in vals. The type of the vals are returned in
437  *			type and the number of vals is returned in length. For
438  *			ranges, there are always three vals returned; min, step
439  *			and max. For lists, all possible values are enumerated.
440  * @write_raw:		function to write a value to the device.
441  *			Parameters are the same as for read_raw.
442  * @read_label:		function to request label name for a specified label,
443  *			for better channel identification.
444  * @write_raw_get_fmt:	callback function to query the expected
445  *			format/precision. If not set by the driver, write_raw
446  *			returns IIO_VAL_INT_PLUS_MICRO.
447  * @read_event_config:	find out if the event is enabled.
448  * @write_event_config:	set if the event is enabled.
449  * @read_event_value:	read a configuration value associated with the event.
450  * @write_event_value:	write a configuration value for the event.
451  * @read_event_label:	function to request label name for a specified label,
452  *			for better event identification.
453  * @validate_trigger:	function to validate the trigger when the
454  *			current trigger gets changed.
455  * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
456  *			in the channel spec to return the index of the currently
457  *			active ext_scan type for a channel.
458  * @update_scan_mode:	function to configure device and scan buffer when
459  *			channels have changed
460  * @debugfs_reg_access:	function to read or write register value of device
461  * @fwnode_xlate:	fwnode based function pointer to obtain channel specifier index.
462  * @hwfifo_set_watermark: function pointer to set the current hardware
463  *			fifo watermark level; see hwfifo_* entries in
464  *			Documentation/ABI/testing/sysfs-bus-iio for details on
465  *			how the hardware fifo operates
466  * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
467  *			in the hardware fifo to the device buffer. The driver
468  *			should not flush more than count samples. The function
469  *			must return the number of samples flushed, 0 if no
470  *			samples were flushed or a negative integer if no samples
471  *			were flushed and there was an error.
472  **/
473 struct iio_info {
474 	const struct attribute_group	*event_attrs;
475 	const struct attribute_group	*attrs;
476 
477 	int (*read_raw)(struct iio_dev *indio_dev,
478 			struct iio_chan_spec const *chan,
479 			int *val,
480 			int *val2,
481 			long mask);
482 
483 	int (*read_raw_multi)(struct iio_dev *indio_dev,
484 			struct iio_chan_spec const *chan,
485 			int max_len,
486 			int *vals,
487 			int *val_len,
488 			long mask);
489 
490 	int (*read_avail)(struct iio_dev *indio_dev,
491 			  struct iio_chan_spec const *chan,
492 			  const int **vals,
493 			  int *type,
494 			  int *length,
495 			  long mask);
496 
497 	int (*write_raw)(struct iio_dev *indio_dev,
498 			 struct iio_chan_spec const *chan,
499 			 int val,
500 			 int val2,
501 			 long mask);
502 
503 	int (*read_label)(struct iio_dev *indio_dev,
504 			 struct iio_chan_spec const *chan,
505 			 char *label);
506 
507 	int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
508 			 struct iio_chan_spec const *chan,
509 			 long mask);
510 
511 	int (*read_event_config)(struct iio_dev *indio_dev,
512 				 const struct iio_chan_spec *chan,
513 				 enum iio_event_type type,
514 				 enum iio_event_direction dir);
515 
516 	int (*write_event_config)(struct iio_dev *indio_dev,
517 				  const struct iio_chan_spec *chan,
518 				  enum iio_event_type type,
519 				  enum iio_event_direction dir,
520 				  bool state);
521 
522 	int (*read_event_value)(struct iio_dev *indio_dev,
523 				const struct iio_chan_spec *chan,
524 				enum iio_event_type type,
525 				enum iio_event_direction dir,
526 				enum iio_event_info info, int *val, int *val2);
527 
528 	int (*write_event_value)(struct iio_dev *indio_dev,
529 				 const struct iio_chan_spec *chan,
530 				 enum iio_event_type type,
531 				 enum iio_event_direction dir,
532 				 enum iio_event_info info, int val, int val2);
533 
534 	int (*read_event_label)(struct iio_dev *indio_dev,
535 				struct iio_chan_spec const *chan,
536 				enum iio_event_type type,
537 				enum iio_event_direction dir,
538 				char *label);
539 
540 	int (*validate_trigger)(struct iio_dev *indio_dev,
541 				struct iio_trigger *trig);
542 	int (*get_current_scan_type)(const struct iio_dev *indio_dev,
543 				     const struct iio_chan_spec *chan);
544 	int (*update_scan_mode)(struct iio_dev *indio_dev,
545 				const unsigned long *scan_mask);
546 	int (*debugfs_reg_access)(struct iio_dev *indio_dev,
547 				  unsigned int reg, unsigned int writeval,
548 				  unsigned int *readval);
549 	int (*fwnode_xlate)(struct iio_dev *indio_dev,
550 			    const struct fwnode_reference_args *iiospec);
551 	int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val);
552 	int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
553 				      unsigned int count);
554 };
555 
556 /**
557  * struct iio_buffer_setup_ops - buffer setup related callbacks
558  * @preenable:		[DRIVER] function to run prior to marking buffer enabled
559  * @postenable:		[DRIVER] function to run after marking buffer enabled
560  * @predisable:		[DRIVER] function to run prior to marking buffer
561  *			disabled
562  * @postdisable:	[DRIVER] function to run after marking buffer disabled
563  * @validate_scan_mask: [DRIVER] function callback to check whether a given
564  *			scan mask is valid for the device.
565  */
566 struct iio_buffer_setup_ops {
567 	int (*preenable)(struct iio_dev *);
568 	int (*postenable)(struct iio_dev *);
569 	int (*predisable)(struct iio_dev *);
570 	int (*postdisable)(struct iio_dev *);
571 	bool (*validate_scan_mask)(struct iio_dev *indio_dev,
572 				   const unsigned long *scan_mask);
573 };
574 
575 /**
576  * struct iio_dev - industrial I/O device
577  * @modes:		[DRIVER] bitmask listing all the operating modes
578  *			supported by the IIO device. This list should be
579  *			initialized before registering the IIO device. It can
580  *			also be filed up by the IIO core, as a result of
581  *			enabling particular features in the driver
582  *			(see iio_triggered_event_setup()).
583  * @dev:		[DRIVER] device structure, should be assigned a parent
584  *			and owner
585  * @buffer:		[DRIVER] any buffer present
586  * @scan_bytes:		[INTERN] num bytes captured to be fed to buffer demux
587  * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
588  *			   array in order of preference, the most preferred
589  *			   masks first.
590  * @masklength:		[INTERN] the length of the mask established from
591  *			channels
592  * @active_scan_mask:	[INTERN] union of all scan masks requested by buffers
593  * @scan_timestamp:	[INTERN] set if any buffers have requested timestamp
594  * @trig:		[INTERN] current device trigger (buffer modes)
595  * @pollfunc:		[DRIVER] function run on trigger being received
596  * @pollfunc_event:	[DRIVER] function run on events trigger being received
597  * @channels:		[DRIVER] channel specification structure table
598  * @num_channels:	[DRIVER] number of channels specified in @channels.
599  * @name:		[DRIVER] name of the device.
600  * @label:              [DRIVER] unique name to identify which device this is
601  * @info:		[DRIVER] callbacks and constant info from driver
602  * @setup_ops:		[DRIVER] callbacks to call before and after buffer
603  *			enable/disable
604  * @priv:		[DRIVER] reference to driver's private information
605  *			**MUST** be accessed **ONLY** via iio_priv() helper
606  */
607 struct iio_dev {
608 	int				modes;
609 	struct device			dev;
610 
611 	struct iio_buffer		*buffer;
612 	int				scan_bytes;
613 
614 	const unsigned long		*available_scan_masks;
615 	unsigned int			__private masklength;
616 	const unsigned long		*active_scan_mask;
617 	bool				__private scan_timestamp;
618 	struct iio_trigger		*trig;
619 	struct iio_poll_func		*pollfunc;
620 	struct iio_poll_func		*pollfunc_event;
621 
622 	struct iio_chan_spec const	*channels;
623 	int				num_channels;
624 
625 	const char			*name;
626 	const char			*label;
627 	const struct iio_info		*info;
628 	const struct iio_buffer_setup_ops	*setup_ops;
629 
630 	void				*__private priv;
631 };
632 
633 int iio_device_id(struct iio_dev *indio_dev);
634 int iio_device_get_current_mode(struct iio_dev *indio_dev);
635 bool iio_buffer_enabled(struct iio_dev *indio_dev);
636 
637 const struct iio_chan_spec
638 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
639 /**
640  * iio_device_register() - register a device with the IIO subsystem
641  * @indio_dev:		Device structure filled by the device driver
642  **/
643 #define iio_device_register(indio_dev) \
644 	__iio_device_register((indio_dev), THIS_MODULE)
645 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
646 void iio_device_unregister(struct iio_dev *indio_dev);
647 /**
648  * devm_iio_device_register - Resource-managed iio_device_register()
649  * @dev:	Device to allocate iio_dev for
650  * @indio_dev:	Device structure filled by the device driver
651  *
652  * Managed iio_device_register.  The IIO device registered with this
653  * function is automatically unregistered on driver detach. This function
654  * calls iio_device_register() internally. Refer to that function for more
655  * information.
656  *
657  * RETURNS:
658  * 0 on success, negative error number on failure.
659  */
660 #define devm_iio_device_register(dev, indio_dev) \
661 	__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
662 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
663 			       struct module *this_mod);
664 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
665 
666 void __iio_dev_mode_lock(struct iio_dev *indio_dev) __acquires(indio_dev);
667 void __iio_dev_mode_unlock(struct iio_dev *indio_dev) __releases(indio_dev);
668 
669 /*
670  * Helper functions that allow claim and release of direct mode
671  * in a fashion that doesn't generate many false positives from sparse.
672  * Note this must remain static inline in the header so that sparse
673  * can see the __acquires() and __releases() annotations.
674  */
675 
676 /**
677  * iio_device_claim_direct() - Keep device in direct mode
678  * @indio_dev:	the iio_dev associated with the device
679  *
680  * If the device is in direct mode it is guaranteed to stay
681  * that way until iio_device_release_direct() is called.
682  *
683  * Use with iio_device_release_direct().
684  *
685  * Returns: true on success, false on failure.
686  */
687 static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
688 {
689 	__iio_dev_mode_lock(indio_dev);
690 
691 	if (iio_buffer_enabled(indio_dev)) {
692 		__iio_dev_mode_unlock(indio_dev);
693 		return false;
694 	}
695 
696 	return true;
697 }
698 
699 /**
700  * iio_device_release_direct() - Releases claim on direct mode
701  * @indio_dev:	the iio_dev associated with the device
702  *
703  * Release the claim. Device is no longer guaranteed to stay
704  * in direct mode.
705  *
706  * Use with iio_device_claim_direct().
707  */
708 #define iio_device_release_direct(indio_dev) __iio_dev_mode_unlock(indio_dev)
709 
710 /**
711  * iio_device_try_claim_buffer_mode() - Keep device in buffer mode
712  * @indio_dev:	the iio_dev associated with the device
713  *
714  * If the device is in buffer mode it is guaranteed to stay
715  * that way until iio_device_release_buffer_mode() is called.
716  *
717  * Use with iio_device_release_buffer_mode().
718  *
719  * Returns: true on success, false on failure.
720  */
721 static inline bool iio_device_try_claim_buffer_mode(struct iio_dev *indio_dev)
722 {
723 	__iio_dev_mode_lock(indio_dev);
724 
725 	if (!iio_buffer_enabled(indio_dev)) {
726 		__iio_dev_mode_unlock(indio_dev);
727 		return false;
728 	}
729 
730 	return true;
731 }
732 
733 /**
734  * iio_device_release_buffer_mode() - releases claim on buffer mode
735  * @indio_dev:	the iio_dev associated with the device
736  *
737  * Release the claim. Device is no longer guaranteed to stay
738  * in buffer mode.
739  *
740  * Use with iio_device_try_claim_buffer_mode().
741  */
742 #define iio_device_release_buffer_mode(indio_dev) __iio_dev_mode_unlock(indio_dev)
743 
744 /*
745  * These classes are not meant to be used directly by drivers (hence the
746  * __priv__ prefix). Instead, documented wrapper macros are provided below to
747  * enforce the use of ACQUIRE() or guard() semantics and avoid the problematic
748  * scoped guard variants.
749  */
750 DEFINE_GUARD(__priv__iio_dev_mode_lock, struct iio_dev *,
751 	     __iio_dev_mode_lock(_T), __iio_dev_mode_unlock(_T));
752 DEFINE_GUARD_COND(__priv__iio_dev_mode_lock, _try_direct,
753 		  iio_device_claim_direct(_T));
754 
755 /**
756  * IIO_DEV_ACQUIRE_DIRECT_MODE() - Tries to acquire the direct mode lock with
757  *				   automatic release
758  * @dev: IIO device instance
759  * @claim: Variable identifier to store acquire result
760  *
761  * Tries to acquire the direct mode lock with cleanup ACQUIRE() semantics and
762  * automatically releases it at the end of the scope. It most be always paired
763  * with IIO_DEV_ACQUIRE_ERR(), for example (notice the scope braces)::
764  *
765  *	switch() {
766  *	case IIO_CHAN_INFO_RAW: {
767  *		IIO_DEV_ACQUIRE_DIRECT_MODE(indio_dev, claim);
768  *		if (IIO_DEV_ACQUIRE_FAILED(claim))
769  *			return -EBUSY;
770  *
771  *		...
772  *	}
773  *	case IIO_CHAN_INFO_SCALE:
774  *		...
775  *	...
776  *	}
777  *
778  * Context: Can sleep
779  */
780 #define IIO_DEV_ACQUIRE_DIRECT_MODE(dev, claim) \
781 	ACQUIRE(__priv__iio_dev_mode_lock_try_direct, claim)(dev)
782 
783 /**
784  * IIO_DEV_ACQUIRE_FAILED() - ACQUIRE_ERR() wrapper
785  * @claim: The claim variable passed to IIO_DEV_ACQUIRE_*_MODE()
786  *
787  * Return: true if failed to acquire the mode, otherwise false.
788  */
789 #define IIO_DEV_ACQUIRE_FAILED(claim) \
790 	ACQUIRE_ERR(__priv__iio_dev_mode_lock_try_direct, &(claim))
791 
792 /**
793  * IIO_DEV_GUARD_CURRENT_MODE() - Acquires the mode lock with automatic release
794  * @dev: IIO device instance
795  *
796  * Acquires the mode lock with cleanup guard() semantics. It is usually paired
797  * with iio_buffer_enabled().
798  *
799  * This should *not* be used to protect internal driver state and it's use in
800  * general is *strongly* discouraged. Use any of the IIO_DEV_ACQUIRE_*_MODE()
801  * variants.
802  *
803  * Context: Can sleep
804  */
805 #define IIO_DEV_GUARD_CURRENT_MODE(dev) \
806 	guard(__priv__iio_dev_mode_lock)(dev)
807 
808 extern const struct bus_type iio_bus_type;
809 
810 /**
811  * iio_device_put() - reference counted deallocation of struct device
812  * @indio_dev: IIO device structure containing the device
813  **/
814 static inline void iio_device_put(struct iio_dev *indio_dev)
815 {
816 	if (indio_dev)
817 		put_device(&indio_dev->dev);
818 }
819 
820 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
821 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
822 
823 /**
824  * dev_to_iio_dev() - Get IIO device struct from a device struct
825  * @dev: 		The device embedded in the IIO device
826  *
827  * Note: The device must be a IIO device, otherwise the result is undefined.
828  */
829 static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
830 {
831 	return container_of(dev, struct iio_dev, dev);
832 }
833 
834 /**
835  * iio_device_get() - increment reference count for the device
836  * @indio_dev: 		IIO device structure
837  *
838  * Returns: The passed IIO device
839  **/
840 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
841 {
842 	return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
843 }
844 
845 /**
846  * iio_device_set_parent() - assign parent device to the IIO device object
847  * @indio_dev: 		IIO device structure
848  * @parent:		reference to parent device object
849  *
850  * This utility must be called between IIO device allocation
851  * (via devm_iio_device_alloc()) & IIO device registration
852  * (via iio_device_register() and devm_iio_device_register())).
853  * By default, the device allocation will also assign a parent device to
854  * the IIO device object. In cases where devm_iio_device_alloc() is used,
855  * sometimes the parent device must be different than the device used to
856  * manage the allocation.
857  * In that case, this helper should be used to change the parent, hence the
858  * requirement to call this between allocation & registration.
859  **/
860 static inline void iio_device_set_parent(struct iio_dev *indio_dev,
861 					 struct device *parent)
862 {
863 	indio_dev->dev.parent = parent;
864 }
865 
866 /**
867  * iio_device_set_drvdata() - Set device driver data
868  * @indio_dev: IIO device structure
869  * @data: Driver specific data
870  *
871  * Allows to attach an arbitrary pointer to an IIO device, which can later be
872  * retrieved by iio_device_get_drvdata().
873  */
874 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
875 {
876 	dev_set_drvdata(&indio_dev->dev, data);
877 }
878 
879 /**
880  * iio_device_get_drvdata() - Get device driver data
881  * @indio_dev: IIO device structure
882  *
883  * Returns the data previously set with iio_device_set_drvdata()
884  */
885 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
886 {
887 	return dev_get_drvdata(&indio_dev->dev);
888 }
889 
890 /*
891  * Used to ensure the iio_priv() structure is aligned to allow that structure
892  * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
893  * must not share  cachelines with the rest of the structure, thus making
894  * them safe for use with non-coherent DMA.
895  *
896  * A number of drivers also use this on buffers that include a 64-bit timestamp
897  * that is used with iio_push_to_buffers_with_ts(). Therefore, in the case where
898  * DMA alignment is not sufficient for proper timestamp alignment, we align to
899  * 8 bytes instead.
900  */
901 #define IIO_DMA_MINALIGN MAX(ARCH_DMA_MINALIGN, sizeof(s64))
902 
903 #define __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
904 	type name[ALIGN((count), sizeof(s64) / sizeof(type)) + sizeof(s64) / sizeof(type)]
905 
906 /**
907  * IIO_DECLARE_BUFFER_WITH_TS() - Declare a buffer with timestamp
908  * @type: element type of the buffer
909  * @name: identifier name of the buffer
910  * @count: number of elements in the buffer
911  *
912  * Declares a buffer that is safe to use with iio_push_to_buffers_with_ts(). In
913  * addition to allocating enough space for @count elements of @type, it also
914  * allocates space for a s64 timestamp at the end of the buffer and ensures
915  * proper alignment of the timestamp.
916  */
917 #define IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
918 	__IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(sizeof(s64))
919 
920 /**
921  * IIO_DECLARE_DMA_BUFFER_WITH_TS() - Declare a DMA-aligned buffer with timestamp
922  * @type: element type of the buffer
923  * @name: identifier name of the buffer
924  * @count: number of elements in the buffer
925  *
926  * Same as IIO_DECLARE_BUFFER_WITH_TS(), but is uses __aligned(IIO_DMA_MINALIGN)
927  * to ensure that the buffer doesn't share cachelines with anything that comes
928  * before it in a struct. This should not be used for stack-allocated buffers
929  * as stack memory cannot generally be used for DMA.
930  */
931 #define IIO_DECLARE_DMA_BUFFER_WITH_TS(type, name, count) \
932 	__IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(IIO_DMA_MINALIGN)
933 
934 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
935 
936 /* The information at the returned address is guaranteed to be cacheline aligned */
937 static inline void *iio_priv(const struct iio_dev *indio_dev)
938 {
939 	return ACCESS_PRIVATE(indio_dev, priv);
940 }
941 
942 void iio_device_free(struct iio_dev *indio_dev);
943 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
944 
945 #define devm_iio_trigger_alloc(parent, fmt, ...) \
946 	__devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
947 __printf(3, 4)
948 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
949 					     struct module *this_mod,
950 					     const char *fmt, ...);
951 /**
952  * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
953  * @indio_dev:		IIO device structure for device
954  **/
955 #if defined(CONFIG_DEBUG_FS)
956 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
957 #else
958 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
959 {
960 	return NULL;
961 }
962 #endif
963 
964 /**
965  * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
966  * @indio_dev: iio_dev associated with the device that will have triggers suspended
967  *
968  * Return 0 if successful, negative otherwise
969  **/
970 int iio_device_suspend_triggering(struct iio_dev *indio_dev);
971 
972 /**
973  * iio_device_resume_triggering() - resume trigger attached to an iio_dev
974  *	that was previously suspended with iio_device_suspend_triggering()
975  * @indio_dev: iio_dev associated with the device that will have triggers resumed
976  *
977  * Return 0 if successful, negative otherwise
978  **/
979 int iio_device_resume_triggering(struct iio_dev *indio_dev);
980 
981 #ifdef CONFIG_ACPI
982 bool iio_read_acpi_mount_matrix(struct device *dev,
983 				struct iio_mount_matrix *orientation,
984 				char *acpi_method);
985 const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data);
986 #else
987 static inline bool iio_read_acpi_mount_matrix(struct device *dev,
988 					      struct iio_mount_matrix *orientation,
989 					      char *acpi_method)
990 {
991 	return false;
992 }
993 static inline const char *
994 iio_get_acpi_device_name_and_data(struct device *dev, const void **data)
995 {
996 	return NULL;
997 }
998 #endif
999 static inline const char *iio_get_acpi_device_name(struct device *dev)
1000 {
1001 	return iio_get_acpi_device_name_and_data(dev, NULL);
1002 }
1003 
1004 /**
1005  * iio_get_current_scan_type - Get the current scan type for a channel
1006  * @indio_dev:	the IIO device to get the scan type for
1007  * @chan:	the channel to get the scan type for
1008  *
1009  * Most devices only have one scan type per channel and can just access it
1010  * directly without calling this function. Core IIO code and drivers that
1011  * implement ext_scan_type in the channel spec should use this function to
1012  * get the current scan type for a channel.
1013  *
1014  * Returns: the current scan type for the channel or error.
1015  */
1016 static inline const struct iio_scan_type
1017 *iio_get_current_scan_type(const struct iio_dev *indio_dev,
1018 			   const struct iio_chan_spec *chan)
1019 {
1020 	int ret;
1021 
1022 	if (chan->has_ext_scan_type) {
1023 		ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
1024 		if (ret < 0)
1025 			return ERR_PTR(ret);
1026 
1027 		if (ret >= chan->num_ext_scan_type)
1028 			return ERR_PTR(-EINVAL);
1029 
1030 		return &chan->ext_scan_type[ret];
1031 	}
1032 
1033 	return &chan->scan_type;
1034 }
1035 
1036 /**
1037  * iio_get_masklength - Get length of the channels mask
1038  * @indio_dev: the IIO device to get the masklength for
1039  */
1040 static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
1041 {
1042 	return ACCESS_PRIVATE(indio_dev, masklength);
1043 }
1044 
1045 int iio_active_scan_mask_index(struct iio_dev *indio_dev);
1046 
1047 /**
1048  * iio_for_each_active_channel - Iterated over active channels
1049  * @indio_dev: the IIO device
1050  * @chan: Holds the index of the enabled channel
1051  */
1052 #define iio_for_each_active_channel(indio_dev, chan) \
1053 	for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
1054 			 iio_get_masklength(indio_dev))
1055 
1056 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
1057 
1058 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
1059 	int *fract);
1060 
1061 /**
1062  * IIO_DEGREE_TO_RAD() - Convert degree to rad
1063  * @deg: A value in degree
1064  *
1065  * Returns the given value converted from degree to rad
1066  */
1067 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
1068 
1069 /**
1070  * IIO_RAD_TO_DEGREE() - Convert rad to degree
1071  * @rad: A value in rad
1072  *
1073  * Returns the given value converted from rad to degree
1074  */
1075 #define IIO_RAD_TO_DEGREE(rad) \
1076 	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
1077 
1078 /**
1079  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
1080  * @g: A value in g
1081  *
1082  * Returns the given value converted from g to meter / second**2
1083  */
1084 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
1085 
1086 /**
1087  * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
1088  * @ms2: A value in meter / second**2
1089  *
1090  * Returns the given value converted from meter / second**2 to g
1091  */
1092 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
1093 
1094 #endif /* _INDUSTRIAL_IO_H_ */
1095