xref: /linux/include/linux/iio/iio.h (revision 4359a011e259a4608afc7fb3635370c9d4ba5943)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 /* The industrial I/O core
4  *
5  * Copyright (c) 2008 Jonathan Cameron
6  */
7 #ifndef _INDUSTRIAL_IO_H_
8 #define _INDUSTRIAL_IO_H_
9 
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/slab.h>
13 #include <linux/iio/types.h>
14 /* IIO TODO LIST */
15 /*
16  * Provide means of adjusting timer accuracy.
17  * Currently assumes nano seconds.
18  */
19 
20 struct of_phandle_args;
21 
22 enum iio_shared_by {
23 	IIO_SEPARATE,
24 	IIO_SHARED_BY_TYPE,
25 	IIO_SHARED_BY_DIR,
26 	IIO_SHARED_BY_ALL
27 };
28 
29 enum iio_endian {
30 	IIO_CPU,
31 	IIO_BE,
32 	IIO_LE,
33 };
34 
35 struct iio_chan_spec;
36 struct iio_dev;
37 
38 /**
39  * struct iio_chan_spec_ext_info - Extended channel info attribute
40  * @name:	Info attribute name
41  * @shared:	Whether this attribute is shared between all channels.
42  * @read:	Read callback for this info attribute, may be NULL.
43  * @write:	Write callback for this info attribute, may be NULL.
44  * @private:	Data private to the driver.
45  */
46 struct iio_chan_spec_ext_info {
47 	const char *name;
48 	enum iio_shared_by shared;
49 	ssize_t (*read)(struct iio_dev *, uintptr_t private,
50 			struct iio_chan_spec const *, char *buf);
51 	ssize_t (*write)(struct iio_dev *, uintptr_t private,
52 			 struct iio_chan_spec const *, const char *buf,
53 			 size_t len);
54 	uintptr_t private;
55 };
56 
57 /**
58  * struct iio_enum - Enum channel info attribute
59  * @items:	An array of strings.
60  * @num_items:	Length of the item array.
61  * @set:	Set callback function, may be NULL.
62  * @get:	Get callback function, may be NULL.
63  *
64  * The iio_enum struct can be used to implement enum style channel attributes.
65  * Enum style attributes are those which have a set of strings which map to
66  * unsigned integer values. The IIO enum helper code takes care of mapping
67  * between value and string as well as generating a "_available" file which
68  * contains a list of all available items. The set callback will be called when
69  * the attribute is updated. The last parameter is the index to the newly
70  * activated item. The get callback will be used to query the currently active
71  * item and is supposed to return the index for it.
72  */
73 struct iio_enum {
74 	const char * const *items;
75 	unsigned int num_items;
76 	int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
77 	int (*get)(struct iio_dev *, const struct iio_chan_spec *);
78 };
79 
80 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
81 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
82 ssize_t iio_enum_read(struct iio_dev *indio_dev,
83 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
84 ssize_t iio_enum_write(struct iio_dev *indio_dev,
85 	uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
86 	size_t len);
87 
88 /**
89  * IIO_ENUM() - Initialize enum extended channel attribute
90  * @_name:	Attribute name
91  * @_shared:	Whether the attribute is shared between all channels
92  * @_e:		Pointer to an iio_enum struct
93  *
94  * This should usually be used together with IIO_ENUM_AVAILABLE()
95  */
96 #define IIO_ENUM(_name, _shared, _e) \
97 { \
98 	.name = (_name), \
99 	.shared = (_shared), \
100 	.read = iio_enum_read, \
101 	.write = iio_enum_write, \
102 	.private = (uintptr_t)(_e), \
103 }
104 
105 /**
106  * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
107  * @_name:	Attribute name ("_available" will be appended to the name)
108  * @_shared:	Whether the attribute is shared between all channels
109  * @_e:		Pointer to an iio_enum struct
110  *
111  * Creates a read only attribute which lists all the available enum items in a
112  * space separated list. This should usually be used together with IIO_ENUM()
113  */
114 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
115 { \
116 	.name = (_name "_available"), \
117 	.shared = _shared, \
118 	.read = iio_enum_available_read, \
119 	.private = (uintptr_t)(_e), \
120 }
121 
122 /**
123  * struct iio_mount_matrix - iio mounting matrix
124  * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
125  *            main hardware
126  */
127 struct iio_mount_matrix {
128 	const char *rotation[9];
129 };
130 
131 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
132 			      const struct iio_chan_spec *chan, char *buf);
133 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
134 
135 typedef const struct iio_mount_matrix *
136 	(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
137 				 const struct iio_chan_spec *chan);
138 
139 /**
140  * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
141  * @_shared:	Whether the attribute is shared between all channels
142  * @_get:	Pointer to an iio_get_mount_matrix_t accessor
143  */
144 #define IIO_MOUNT_MATRIX(_shared, _get) \
145 { \
146 	.name = "mount_matrix", \
147 	.shared = (_shared), \
148 	.read = iio_show_mount_matrix, \
149 	.private = (uintptr_t)(_get), \
150 }
151 
152 /**
153  * struct iio_event_spec - specification for a channel event
154  * @type:		    Type of the event
155  * @dir:		    Direction of the event
156  * @mask_separate:	    Bit mask of enum iio_event_info values. Attributes
157  *			    set in this mask will be registered per channel.
158  * @mask_shared_by_type:    Bit mask of enum iio_event_info values. Attributes
159  *			    set in this mask will be shared by channel type.
160  * @mask_shared_by_dir:	    Bit mask of enum iio_event_info values. Attributes
161  *			    set in this mask will be shared by channel type and
162  *			    direction.
163  * @mask_shared_by_all:	    Bit mask of enum iio_event_info values. Attributes
164  *			    set in this mask will be shared by all channels.
165  */
166 struct iio_event_spec {
167 	enum iio_event_type type;
168 	enum iio_event_direction dir;
169 	unsigned long mask_separate;
170 	unsigned long mask_shared_by_type;
171 	unsigned long mask_shared_by_dir;
172 	unsigned long mask_shared_by_all;
173 };
174 
175 /**
176  * struct iio_chan_spec - specification of a single channel
177  * @type:		What type of measurement is the channel making.
178  * @channel:		What number do we wish to assign the channel.
179  * @channel2:		If there is a second number for a differential
180  *			channel then this is it. If modified is set then the
181  *			value here specifies the modifier.
182  * @address:		Driver specific identifier.
183  * @scan_index:		Monotonic index to give ordering in scans when read
184  *			from a buffer.
185  * @scan_type:		struct describing the scan type
186  * @scan_type.sign:		's' or 'u' to specify signed or unsigned
187  * @scan_type.realbits:		Number of valid bits of data
188  * @scan_type.storagebits:	Realbits + padding
189  * @scan_type.shift:		Shift right by this before masking out
190  *				realbits.
191  * @scan_type.repeat:		Number of times real/storage bits repeats.
192  *				When the repeat element is more than 1, then
193  *				the type element in sysfs will show a repeat
194  *				value. Otherwise, the number of repetitions
195  *				is omitted.
196  * @scan_type.endianness:	little or big endian
197  * @info_mask_separate: What information is to be exported that is specific to
198  *			this channel.
199  * @info_mask_separate_available: What availability information is to be
200  *			exported that is specific to this channel.
201  * @info_mask_shared_by_type: What information is to be exported that is shared
202  *			by all channels of the same type.
203  * @info_mask_shared_by_type_available: What availability information is to be
204  *			exported that is shared by all channels of the same
205  *			type.
206  * @info_mask_shared_by_dir: What information is to be exported that is shared
207  *			by all channels of the same direction.
208  * @info_mask_shared_by_dir_available: What availability information is to be
209  *			exported that is shared by all channels of the same
210  *			direction.
211  * @info_mask_shared_by_all: What information is to be exported that is shared
212  *			by all channels.
213  * @info_mask_shared_by_all_available: What availability information is to be
214  *			exported that is shared by all channels.
215  * @event_spec:		Array of events which should be registered for this
216  *			channel.
217  * @num_event_specs:	Size of the event_spec array.
218  * @ext_info:		Array of extended info attributes for this channel.
219  *			The array is NULL terminated, the last element should
220  *			have its name field set to NULL.
221  * @extend_name:	Allows labeling of channel attributes with an
222  *			informative name. Note this has no effect codes etc,
223  *			unlike modifiers.
224  * @datasheet_name:	A name used in in-kernel mapping of channels. It should
225  *			correspond to the first name that the channel is referred
226  *			to by in the datasheet (e.g. IND), or the nearest
227  *			possible compound name (e.g. IND-INC).
228  * @modified:		Does a modifier apply to this channel. What these are
229  *			depends on the channel type.  Modifier is set in
230  *			channel2. Examples are IIO_MOD_X for axial sensors about
231  *			the 'x' axis.
232  * @indexed:		Specify the channel has a numerical index. If not,
233  *			the channel index number will be suppressed for sysfs
234  *			attributes but not for event codes.
235  * @output:		Channel is output.
236  * @differential:	Channel is differential.
237  */
238 struct iio_chan_spec {
239 	enum iio_chan_type	type;
240 	int			channel;
241 	int			channel2;
242 	unsigned long		address;
243 	int			scan_index;
244 	struct {
245 		char	sign;
246 		u8	realbits;
247 		u8	storagebits;
248 		u8	shift;
249 		u8	repeat;
250 		enum iio_endian endianness;
251 	} scan_type;
252 	long			info_mask_separate;
253 	long			info_mask_separate_available;
254 	long			info_mask_shared_by_type;
255 	long			info_mask_shared_by_type_available;
256 	long			info_mask_shared_by_dir;
257 	long			info_mask_shared_by_dir_available;
258 	long			info_mask_shared_by_all;
259 	long			info_mask_shared_by_all_available;
260 	const struct iio_event_spec *event_spec;
261 	unsigned int		num_event_specs;
262 	const struct iio_chan_spec_ext_info *ext_info;
263 	const char		*extend_name;
264 	const char		*datasheet_name;
265 	unsigned		modified:1;
266 	unsigned		indexed:1;
267 	unsigned		output:1;
268 	unsigned		differential:1;
269 };
270 
271 
272 /**
273  * iio_channel_has_info() - Checks whether a channel supports a info attribute
274  * @chan: The channel to be queried
275  * @type: Type of the info attribute to be checked
276  *
277  * Returns true if the channels supports reporting values for the given info
278  * attribute type, false otherwise.
279  */
280 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
281 	enum iio_chan_info_enum type)
282 {
283 	return (chan->info_mask_separate & BIT(type)) |
284 		(chan->info_mask_shared_by_type & BIT(type)) |
285 		(chan->info_mask_shared_by_dir & BIT(type)) |
286 		(chan->info_mask_shared_by_all & BIT(type));
287 }
288 
289 /**
290  * iio_channel_has_available() - Checks if a channel has an available attribute
291  * @chan: The channel to be queried
292  * @type: Type of the available attribute to be checked
293  *
294  * Returns true if the channel supports reporting available values for the
295  * given attribute type, false otherwise.
296  */
297 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
298 					     enum iio_chan_info_enum type)
299 {
300 	return (chan->info_mask_separate_available & BIT(type)) |
301 		(chan->info_mask_shared_by_type_available & BIT(type)) |
302 		(chan->info_mask_shared_by_dir_available & BIT(type)) |
303 		(chan->info_mask_shared_by_all_available & BIT(type));
304 }
305 
306 #define IIO_CHAN_SOFT_TIMESTAMP(_si) {					\
307 	.type = IIO_TIMESTAMP,						\
308 	.channel = -1,							\
309 	.scan_index = _si,						\
310 	.scan_type = {							\
311 		.sign = 's',						\
312 		.realbits = 64,					\
313 		.storagebits = 64,					\
314 		},							\
315 }
316 
317 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
318 
319 /*
320  * Device operating modes
321  * @INDIO_DIRECT_MODE: There is an access to either:
322  * a) The last single value available for devices that do not provide
323  *    on-demand reads.
324  * b) A new value after performing an on-demand read otherwise.
325  * On most devices, this is a single-shot read. On some devices with data
326  * streams without an 'on-demand' function, this might also be the 'last value'
327  * feature. Above all, this mode internally means that we are not in any of the
328  * other modes, and sysfs reads should work.
329  * Device drivers should inform the core if they support this mode.
330  * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
331  * It indicates that an explicit trigger is required. This requests the core to
332  * attach a poll function when enabling the buffer, which is indicated by the
333  * _TRIGGERED suffix.
334  * The core will ensure this mode is set when registering a triggered buffer
335  * with iio_triggered_buffer_setup().
336  * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
337  * No poll function can be attached because there is no triggered infrastructure
338  * we can use to cause capture. There is a kfifo that the driver will fill, but
339  * not "only one scan at a time". Typically, hardware will have a buffer that
340  * can hold multiple scans. Software may read one or more scans at a single time
341  * and push the available data to a Kfifo. This means the core will not attach
342  * any poll function when enabling the buffer.
343  * The core will ensure this mode is set when registering a simple kfifo buffer
344  * with devm_iio_kfifo_buffer_setup().
345  * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
346  * Same as above but this time the buffer is not a kfifo where we have direct
347  * access to the data. Instead, the consumer driver must access the data through
348  * non software visible channels (or DMA when there is no demux possible in
349  * software)
350  * The core will ensure this mode is set when registering a dmaengine buffer
351  * with devm_iio_dmaengine_buffer_setup().
352  * @INDIO_EVENT_TRIGGERED: Very unusual mode.
353  * Triggers usually refer to an external event which will start data capture.
354  * Here it is kind of the opposite as, a particular state of the data might
355  * produce an event which can be considered as an event. We don't necessarily
356  * have access to the data itself, but to the event produced. For example, this
357  * can be a threshold detector. The internal path of this mode is very close to
358  * the INDIO_BUFFER_TRIGGERED mode.
359  * The core will ensure this mode is set when registering a triggered event.
360  * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
361  * Here, triggers can result in data capture and can be routed to multiple
362  * hardware components, which make them close to regular triggers in the way
363  * they must be managed by the core, but without the entire interrupts/poll
364  * functions burden. Interrupts are irrelevant as the data flow is hardware
365  * mediated and distributed.
366  */
367 #define INDIO_DIRECT_MODE		0x01
368 #define INDIO_BUFFER_TRIGGERED		0x02
369 #define INDIO_BUFFER_SOFTWARE		0x04
370 #define INDIO_BUFFER_HARDWARE		0x08
371 #define INDIO_EVENT_TRIGGERED		0x10
372 #define INDIO_HARDWARE_TRIGGERED	0x20
373 
374 #define INDIO_ALL_BUFFER_MODES					\
375 	(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
376 
377 #define INDIO_ALL_TRIGGERED_MODES	\
378 	(INDIO_BUFFER_TRIGGERED		\
379 	 | INDIO_EVENT_TRIGGERED	\
380 	 | INDIO_HARDWARE_TRIGGERED)
381 
382 #define INDIO_MAX_RAW_ELEMENTS		4
383 
384 struct iio_trigger; /* forward declaration */
385 
386 /**
387  * struct iio_info - constant information about device
388  * @event_attrs:	event control attributes
389  * @attrs:		general purpose device attributes
390  * @read_raw:		function to request a value from the device.
391  *			mask specifies which value. Note 0 means a reading of
392  *			the channel in question.  Return value will specify the
393  *			type of value returned by the device. val and val2 will
394  *			contain the elements making up the returned value.
395  * @read_raw_multi:	function to return values from the device.
396  *			mask specifies which value. Note 0 means a reading of
397  *			the channel in question.  Return value will specify the
398  *			type of value returned by the device. vals pointer
399  *			contain the elements making up the returned value.
400  *			max_len specifies maximum number of elements
401  *			vals pointer can contain. val_len is used to return
402  *			length of valid elements in vals.
403  * @read_avail:		function to return the available values from the device.
404  *			mask specifies which value. Note 0 means the available
405  *			values for the channel in question.  Return value
406  *			specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
407  *			returned in vals. The type of the vals are returned in
408  *			type and the number of vals is returned in length. For
409  *			ranges, there are always three vals returned; min, step
410  *			and max. For lists, all possible values are enumerated.
411  * @write_raw:		function to write a value to the device.
412  *			Parameters are the same as for read_raw.
413  * @read_label:		function to request label name for a specified label,
414  *			for better channel identification.
415  * @write_raw_get_fmt:	callback function to query the expected
416  *			format/precision. If not set by the driver, write_raw
417  *			returns IIO_VAL_INT_PLUS_MICRO.
418  * @read_event_config:	find out if the event is enabled.
419  * @write_event_config:	set if the event is enabled.
420  * @read_event_value:	read a configuration value associated with the event.
421  * @write_event_value:	write a configuration value for the event.
422  * @validate_trigger:	function to validate the trigger when the
423  *			current trigger gets changed.
424  * @update_scan_mode:	function to configure device and scan buffer when
425  *			channels have changed
426  * @debugfs_reg_access:	function to read or write register value of device
427  * @of_xlate:		function pointer to obtain channel specifier index.
428  *			When #iio-cells is greater than '0', the driver could
429  *			provide a custom of_xlate function that reads the
430  *			*args* and returns the appropriate index in registered
431  *			IIO channels array.
432  * @hwfifo_set_watermark: function pointer to set the current hardware
433  *			fifo watermark level; see hwfifo_* entries in
434  *			Documentation/ABI/testing/sysfs-bus-iio for details on
435  *			how the hardware fifo operates
436  * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
437  *			in the hardware fifo to the device buffer. The driver
438  *			should not flush more than count samples. The function
439  *			must return the number of samples flushed, 0 if no
440  *			samples were flushed or a negative integer if no samples
441  *			were flushed and there was an error.
442  **/
443 struct iio_info {
444 	const struct attribute_group	*event_attrs;
445 	const struct attribute_group	*attrs;
446 
447 	int (*read_raw)(struct iio_dev *indio_dev,
448 			struct iio_chan_spec const *chan,
449 			int *val,
450 			int *val2,
451 			long mask);
452 
453 	int (*read_raw_multi)(struct iio_dev *indio_dev,
454 			struct iio_chan_spec const *chan,
455 			int max_len,
456 			int *vals,
457 			int *val_len,
458 			long mask);
459 
460 	int (*read_avail)(struct iio_dev *indio_dev,
461 			  struct iio_chan_spec const *chan,
462 			  const int **vals,
463 			  int *type,
464 			  int *length,
465 			  long mask);
466 
467 	int (*write_raw)(struct iio_dev *indio_dev,
468 			 struct iio_chan_spec const *chan,
469 			 int val,
470 			 int val2,
471 			 long mask);
472 
473 	int (*read_label)(struct iio_dev *indio_dev,
474 			 struct iio_chan_spec const *chan,
475 			 char *label);
476 
477 	int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
478 			 struct iio_chan_spec const *chan,
479 			 long mask);
480 
481 	int (*read_event_config)(struct iio_dev *indio_dev,
482 				 const struct iio_chan_spec *chan,
483 				 enum iio_event_type type,
484 				 enum iio_event_direction dir);
485 
486 	int (*write_event_config)(struct iio_dev *indio_dev,
487 				  const struct iio_chan_spec *chan,
488 				  enum iio_event_type type,
489 				  enum iio_event_direction dir,
490 				  int state);
491 
492 	int (*read_event_value)(struct iio_dev *indio_dev,
493 				const struct iio_chan_spec *chan,
494 				enum iio_event_type type,
495 				enum iio_event_direction dir,
496 				enum iio_event_info info, int *val, int *val2);
497 
498 	int (*write_event_value)(struct iio_dev *indio_dev,
499 				 const struct iio_chan_spec *chan,
500 				 enum iio_event_type type,
501 				 enum iio_event_direction dir,
502 				 enum iio_event_info info, int val, int val2);
503 
504 	int (*validate_trigger)(struct iio_dev *indio_dev,
505 				struct iio_trigger *trig);
506 	int (*update_scan_mode)(struct iio_dev *indio_dev,
507 				const unsigned long *scan_mask);
508 	int (*debugfs_reg_access)(struct iio_dev *indio_dev,
509 				  unsigned reg, unsigned writeval,
510 				  unsigned *readval);
511 	int (*of_xlate)(struct iio_dev *indio_dev,
512 			const struct of_phandle_args *iiospec);
513 	int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
514 	int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
515 				      unsigned count);
516 };
517 
518 /**
519  * struct iio_buffer_setup_ops - buffer setup related callbacks
520  * @preenable:		[DRIVER] function to run prior to marking buffer enabled
521  * @postenable:		[DRIVER] function to run after marking buffer enabled
522  * @predisable:		[DRIVER] function to run prior to marking buffer
523  *			disabled
524  * @postdisable:	[DRIVER] function to run after marking buffer disabled
525  * @validate_scan_mask: [DRIVER] function callback to check whether a given
526  *			scan mask is valid for the device.
527  */
528 struct iio_buffer_setup_ops {
529 	int (*preenable)(struct iio_dev *);
530 	int (*postenable)(struct iio_dev *);
531 	int (*predisable)(struct iio_dev *);
532 	int (*postdisable)(struct iio_dev *);
533 	bool (*validate_scan_mask)(struct iio_dev *indio_dev,
534 				   const unsigned long *scan_mask);
535 };
536 
537 /**
538  * struct iio_dev - industrial I/O device
539  * @modes:		[DRIVER] bitmask listing all the operating modes
540  *			supported by the IIO device. This list should be
541  *			initialized before registering the IIO device. It can
542  *			also be filed up by the IIO core, as a result of
543  *			enabling particular features in the driver
544  *			(see iio_triggered_event_setup()).
545  * @dev:		[DRIVER] device structure, should be assigned a parent
546  *			and owner
547  * @buffer:		[DRIVER] any buffer present
548  * @scan_bytes:		[INTERN] num bytes captured to be fed to buffer demux
549  * @mlock:		[INTERN] lock used to prevent simultaneous device state
550  *			changes
551  * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
552  * @masklength:		[INTERN] the length of the mask established from
553  *			channels
554  * @active_scan_mask:	[INTERN] union of all scan masks requested by buffers
555  * @scan_timestamp:	[INTERN] set if any buffers have requested timestamp
556  * @trig:		[INTERN] current device trigger (buffer modes)
557  * @pollfunc:		[DRIVER] function run on trigger being received
558  * @pollfunc_event:	[DRIVER] function run on events trigger being received
559  * @channels:		[DRIVER] channel specification structure table
560  * @num_channels:	[DRIVER] number of channels specified in @channels.
561  * @name:		[DRIVER] name of the device.
562  * @label:              [DRIVER] unique name to identify which device this is
563  * @info:		[DRIVER] callbacks and constant info from driver
564  * @setup_ops:		[DRIVER] callbacks to call before and after buffer
565  *			enable/disable
566  * @priv:		[DRIVER] reference to driver's private information
567  *			**MUST** be accessed **ONLY** via iio_priv() helper
568  */
569 struct iio_dev {
570 	int				modes;
571 	struct device			dev;
572 
573 	struct iio_buffer		*buffer;
574 	int				scan_bytes;
575 	struct mutex			mlock;
576 
577 	const unsigned long		*available_scan_masks;
578 	unsigned			masklength;
579 	const unsigned long		*active_scan_mask;
580 	bool				scan_timestamp;
581 	struct iio_trigger		*trig;
582 	struct iio_poll_func		*pollfunc;
583 	struct iio_poll_func		*pollfunc_event;
584 
585 	struct iio_chan_spec const	*channels;
586 	int				num_channels;
587 
588 	const char			*name;
589 	const char			*label;
590 	const struct iio_info		*info;
591 	const struct iio_buffer_setup_ops	*setup_ops;
592 
593 	void				*priv;
594 };
595 
596 int iio_device_id(struct iio_dev *indio_dev);
597 int iio_device_get_current_mode(struct iio_dev *indio_dev);
598 bool iio_buffer_enabled(struct iio_dev *indio_dev);
599 
600 const struct iio_chan_spec
601 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
602 /**
603  * iio_device_register() - register a device with the IIO subsystem
604  * @indio_dev:		Device structure filled by the device driver
605  **/
606 #define iio_device_register(indio_dev) \
607 	__iio_device_register((indio_dev), THIS_MODULE)
608 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
609 void iio_device_unregister(struct iio_dev *indio_dev);
610 /**
611  * devm_iio_device_register - Resource-managed iio_device_register()
612  * @dev:	Device to allocate iio_dev for
613  * @indio_dev:	Device structure filled by the device driver
614  *
615  * Managed iio_device_register.  The IIO device registered with this
616  * function is automatically unregistered on driver detach. This function
617  * calls iio_device_register() internally. Refer to that function for more
618  * information.
619  *
620  * RETURNS:
621  * 0 on success, negative error number on failure.
622  */
623 #define devm_iio_device_register(dev, indio_dev) \
624 	__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
625 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
626 			       struct module *this_mod);
627 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
628 int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
629 void iio_device_release_direct_mode(struct iio_dev *indio_dev);
630 
631 extern struct bus_type iio_bus_type;
632 
633 /**
634  * iio_device_put() - reference counted deallocation of struct device
635  * @indio_dev: IIO device structure containing the device
636  **/
637 static inline void iio_device_put(struct iio_dev *indio_dev)
638 {
639 	if (indio_dev)
640 		put_device(&indio_dev->dev);
641 }
642 
643 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
644 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
645 
646 /**
647  * dev_to_iio_dev() - Get IIO device struct from a device struct
648  * @dev: 		The device embedded in the IIO device
649  *
650  * Note: The device must be a IIO device, otherwise the result is undefined.
651  */
652 static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
653 {
654 	return container_of(dev, struct iio_dev, dev);
655 }
656 
657 /**
658  * iio_device_get() - increment reference count for the device
659  * @indio_dev: 		IIO device structure
660  *
661  * Returns: The passed IIO device
662  **/
663 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
664 {
665 	return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
666 }
667 
668 /**
669  * iio_device_set_parent() - assign parent device to the IIO device object
670  * @indio_dev: 		IIO device structure
671  * @parent:		reference to parent device object
672  *
673  * This utility must be called between IIO device allocation
674  * (via devm_iio_device_alloc()) & IIO device registration
675  * (via iio_device_register() and devm_iio_device_register())).
676  * By default, the device allocation will also assign a parent device to
677  * the IIO device object. In cases where devm_iio_device_alloc() is used,
678  * sometimes the parent device must be different than the device used to
679  * manage the allocation.
680  * In that case, this helper should be used to change the parent, hence the
681  * requirement to call this between allocation & registration.
682  **/
683 static inline void iio_device_set_parent(struct iio_dev *indio_dev,
684 					 struct device *parent)
685 {
686 	indio_dev->dev.parent = parent;
687 }
688 
689 /**
690  * iio_device_set_drvdata() - Set device driver data
691  * @indio_dev: IIO device structure
692  * @data: Driver specific data
693  *
694  * Allows to attach an arbitrary pointer to an IIO device, which can later be
695  * retrieved by iio_device_get_drvdata().
696  */
697 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
698 {
699 	dev_set_drvdata(&indio_dev->dev, data);
700 }
701 
702 /**
703  * iio_device_get_drvdata() - Get device driver data
704  * @indio_dev: IIO device structure
705  *
706  * Returns the data previously set with iio_device_set_drvdata()
707  */
708 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
709 {
710 	return dev_get_drvdata(&indio_dev->dev);
711 }
712 
713 /*
714  * Used to ensure the iio_priv() structure is aligned to allow that structure
715  * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
716  * must not share  cachelines with the rest of the structure, thus making
717  * them safe for use with non-coherent DMA.
718  */
719 #define IIO_DMA_MINALIGN ARCH_KMALLOC_MINALIGN
720 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
721 
722 /* The information at the returned address is guaranteed to be cacheline aligned */
723 static inline void *iio_priv(const struct iio_dev *indio_dev)
724 {
725 	return indio_dev->priv;
726 }
727 
728 void iio_device_free(struct iio_dev *indio_dev);
729 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
730 
731 #define devm_iio_trigger_alloc(parent, fmt, ...) \
732 	__devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
733 __printf(3, 4)
734 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
735 					     struct module *this_mod,
736 					     const char *fmt, ...);
737 /**
738  * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
739  * @indio_dev:		IIO device structure for device
740  **/
741 #if defined(CONFIG_DEBUG_FS)
742 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
743 #else
744 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
745 {
746 	return NULL;
747 }
748 #endif
749 
750 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
751 
752 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
753 	int *fract);
754 
755 /**
756  * IIO_DEGREE_TO_RAD() - Convert degree to rad
757  * @deg: A value in degree
758  *
759  * Returns the given value converted from degree to rad
760  */
761 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
762 
763 /**
764  * IIO_RAD_TO_DEGREE() - Convert rad to degree
765  * @rad: A value in rad
766  *
767  * Returns the given value converted from rad to degree
768  */
769 #define IIO_RAD_TO_DEGREE(rad) \
770 	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
771 
772 /**
773  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
774  * @g: A value in g
775  *
776  * Returns the given value converted from g to meter / second**2
777  */
778 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
779 
780 /**
781  * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
782  * @ms2: A value in meter / second**2
783  *
784  * Returns the given value converted from meter / second**2 to g
785  */
786 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
787 
788 #endif /* _INDUSTRIAL_IO_H_ */
789