xref: /linux/include/linux/iio/iio.h (revision 07f0148aafe8c95a3a76cd59e9e75b4d78d1d31d)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 /* The industrial I/O core
4  *
5  * Copyright (c) 2008 Jonathan Cameron
6  */
7 #ifndef _INDUSTRIAL_IO_H_
8 #define _INDUSTRIAL_IO_H_
9 
10 #include <linux/device.h>
11 #include <linux/cdev.h>
12 #include <linux/slab.h>
13 #include <linux/iio/types.h>
14 /* IIO TODO LIST */
15 /*
16  * Provide means of adjusting timer accuracy.
17  * Currently assumes nano seconds.
18  */
19 
20 struct fwnode_reference_args;
21 
22 enum iio_shared_by {
23 	IIO_SEPARATE,
24 	IIO_SHARED_BY_TYPE,
25 	IIO_SHARED_BY_DIR,
26 	IIO_SHARED_BY_ALL
27 };
28 
29 enum iio_endian {
30 	IIO_CPU,
31 	IIO_BE,
32 	IIO_LE,
33 };
34 
35 struct iio_chan_spec;
36 struct iio_dev;
37 
38 /**
39  * struct iio_chan_spec_ext_info - Extended channel info attribute
40  * @name:	Info attribute name
41  * @shared:	Whether this attribute is shared between all channels.
42  * @read:	Read callback for this info attribute, may be NULL.
43  * @write:	Write callback for this info attribute, may be NULL.
44  * @private:	Data private to the driver.
45  */
46 struct iio_chan_spec_ext_info {
47 	const char *name;
48 	enum iio_shared_by shared;
49 	ssize_t (*read)(struct iio_dev *, uintptr_t private,
50 			struct iio_chan_spec const *, char *buf);
51 	ssize_t (*write)(struct iio_dev *, uintptr_t private,
52 			 struct iio_chan_spec const *, const char *buf,
53 			 size_t len);
54 	uintptr_t private;
55 };
56 
57 /**
58  * struct iio_enum - Enum channel info attribute
59  * @items:	An array of strings.
60  * @num_items:	Length of the item array.
61  * @set:	Set callback function, may be NULL.
62  * @get:	Get callback function, may be NULL.
63  *
64  * The iio_enum struct can be used to implement enum style channel attributes.
65  * Enum style attributes are those which have a set of strings which map to
66  * unsigned integer values. The IIO enum helper code takes care of mapping
67  * between value and string as well as generating a "_available" file which
68  * contains a list of all available items. The set callback will be called when
69  * the attribute is updated. The last parameter is the index to the newly
70  * activated item. The get callback will be used to query the currently active
71  * item and is supposed to return the index for it.
72  */
73 struct iio_enum {
74 	const char * const *items;
75 	unsigned int num_items;
76 	int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
77 	int (*get)(struct iio_dev *, const struct iio_chan_spec *);
78 };
79 
80 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
81 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
82 ssize_t iio_enum_read(struct iio_dev *indio_dev,
83 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
84 ssize_t iio_enum_write(struct iio_dev *indio_dev,
85 	uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
86 	size_t len);
87 
88 /**
89  * IIO_ENUM() - Initialize enum extended channel attribute
90  * @_name:	Attribute name
91  * @_shared:	Whether the attribute is shared between all channels
92  * @_e:		Pointer to an iio_enum struct
93  *
94  * This should usually be used together with IIO_ENUM_AVAILABLE()
95  */
96 #define IIO_ENUM(_name, _shared, _e) \
97 { \
98 	.name = (_name), \
99 	.shared = (_shared), \
100 	.read = iio_enum_read, \
101 	.write = iio_enum_write, \
102 	.private = (uintptr_t)(_e), \
103 }
104 
105 /**
106  * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
107  * @_name:	Attribute name ("_available" will be appended to the name)
108  * @_shared:	Whether the attribute is shared between all channels
109  * @_e:		Pointer to an iio_enum struct
110  *
111  * Creates a read only attribute which lists all the available enum items in a
112  * space separated list. This should usually be used together with IIO_ENUM()
113  */
114 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
115 { \
116 	.name = (_name "_available"), \
117 	.shared = _shared, \
118 	.read = iio_enum_available_read, \
119 	.private = (uintptr_t)(_e), \
120 }
121 
122 /**
123  * struct iio_mount_matrix - iio mounting matrix
124  * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
125  *            main hardware
126  */
127 struct iio_mount_matrix {
128 	const char *rotation[9];
129 };
130 
131 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
132 			      const struct iio_chan_spec *chan, char *buf);
133 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
134 
135 typedef const struct iio_mount_matrix *
136 	(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
137 				 const struct iio_chan_spec *chan);
138 
139 /**
140  * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
141  * @_shared:	Whether the attribute is shared between all channels
142  * @_get:	Pointer to an iio_get_mount_matrix_t accessor
143  */
144 #define IIO_MOUNT_MATRIX(_shared, _get) \
145 { \
146 	.name = "mount_matrix", \
147 	.shared = (_shared), \
148 	.read = iio_show_mount_matrix, \
149 	.private = (uintptr_t)(_get), \
150 }
151 
152 /**
153  * struct iio_event_spec - specification for a channel event
154  * @type:		    Type of the event
155  * @dir:		    Direction of the event
156  * @mask_separate:	    Bit mask of enum iio_event_info values. Attributes
157  *			    set in this mask will be registered per channel.
158  * @mask_shared_by_type:    Bit mask of enum iio_event_info values. Attributes
159  *			    set in this mask will be shared by channel type.
160  * @mask_shared_by_dir:	    Bit mask of enum iio_event_info values. Attributes
161  *			    set in this mask will be shared by channel type and
162  *			    direction.
163  * @mask_shared_by_all:	    Bit mask of enum iio_event_info values. Attributes
164  *			    set in this mask will be shared by all channels.
165  */
166 struct iio_event_spec {
167 	enum iio_event_type type;
168 	enum iio_event_direction dir;
169 	unsigned long mask_separate;
170 	unsigned long mask_shared_by_type;
171 	unsigned long mask_shared_by_dir;
172 	unsigned long mask_shared_by_all;
173 };
174 
175 /**
176  * struct iio_chan_spec - specification of a single channel
177  * @type:		What type of measurement is the channel making.
178  * @channel:		What number do we wish to assign the channel.
179  * @channel2:		If there is a second number for a differential
180  *			channel then this is it. If modified is set then the
181  *			value here specifies the modifier.
182  * @address:		Driver specific identifier.
183  * @scan_index:		Monotonic index to give ordering in scans when read
184  *			from a buffer.
185  * @scan_type:		struct describing the scan type
186  * @scan_type.sign:		's' or 'u' to specify signed or unsigned
187  * @scan_type.realbits:		Number of valid bits of data
188  * @scan_type.storagebits:	Realbits + padding
189  * @scan_type.shift:		Shift right by this before masking out
190  *				realbits.
191  * @scan_type.repeat:		Number of times real/storage bits repeats.
192  *				When the repeat element is more than 1, then
193  *				the type element in sysfs will show a repeat
194  *				value. Otherwise, the number of repetitions
195  *				is omitted.
196  * @scan_type.endianness:	little or big endian
197  * @info_mask_separate: What information is to be exported that is specific to
198  *			this channel.
199  * @info_mask_separate_available: What availability information is to be
200  *			exported that is specific to this channel.
201  * @info_mask_shared_by_type: What information is to be exported that is shared
202  *			by all channels of the same type.
203  * @info_mask_shared_by_type_available: What availability information is to be
204  *			exported that is shared by all channels of the same
205  *			type.
206  * @info_mask_shared_by_dir: What information is to be exported that is shared
207  *			by all channels of the same direction.
208  * @info_mask_shared_by_dir_available: What availability information is to be
209  *			exported that is shared by all channels of the same
210  *			direction.
211  * @info_mask_shared_by_all: What information is to be exported that is shared
212  *			by all channels.
213  * @info_mask_shared_by_all_available: What availability information is to be
214  *			exported that is shared by all channels.
215  * @event_spec:		Array of events which should be registered for this
216  *			channel.
217  * @num_event_specs:	Size of the event_spec array.
218  * @ext_info:		Array of extended info attributes for this channel.
219  *			The array is NULL terminated, the last element should
220  *			have its name field set to NULL.
221  * @extend_name:	Allows labeling of channel attributes with an
222  *			informative name. Note this has no effect codes etc,
223  *			unlike modifiers.
224  * @datasheet_name:	A name used in in-kernel mapping of channels. It should
225  *			correspond to the first name that the channel is referred
226  *			to by in the datasheet (e.g. IND), or the nearest
227  *			possible compound name (e.g. IND-INC).
228  * @modified:		Does a modifier apply to this channel. What these are
229  *			depends on the channel type.  Modifier is set in
230  *			channel2. Examples are IIO_MOD_X for axial sensors about
231  *			the 'x' axis.
232  * @indexed:		Specify the channel has a numerical index. If not,
233  *			the channel index number will be suppressed for sysfs
234  *			attributes but not for event codes.
235  * @output:		Channel is output.
236  * @differential:	Channel is differential.
237  */
238 struct iio_chan_spec {
239 	enum iio_chan_type	type;
240 	int			channel;
241 	int			channel2;
242 	unsigned long		address;
243 	int			scan_index;
244 	struct {
245 		char	sign;
246 		u8	realbits;
247 		u8	storagebits;
248 		u8	shift;
249 		u8	repeat;
250 		enum iio_endian endianness;
251 	} scan_type;
252 	long			info_mask_separate;
253 	long			info_mask_separate_available;
254 	long			info_mask_shared_by_type;
255 	long			info_mask_shared_by_type_available;
256 	long			info_mask_shared_by_dir;
257 	long			info_mask_shared_by_dir_available;
258 	long			info_mask_shared_by_all;
259 	long			info_mask_shared_by_all_available;
260 	const struct iio_event_spec *event_spec;
261 	unsigned int		num_event_specs;
262 	const struct iio_chan_spec_ext_info *ext_info;
263 	const char		*extend_name;
264 	const char		*datasheet_name;
265 	unsigned		modified:1;
266 	unsigned		indexed:1;
267 	unsigned		output:1;
268 	unsigned		differential:1;
269 };
270 
271 
272 /**
273  * iio_channel_has_info() - Checks whether a channel supports a info attribute
274  * @chan: The channel to be queried
275  * @type: Type of the info attribute to be checked
276  *
277  * Returns true if the channels supports reporting values for the given info
278  * attribute type, false otherwise.
279  */
280 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
281 	enum iio_chan_info_enum type)
282 {
283 	return (chan->info_mask_separate & BIT(type)) |
284 		(chan->info_mask_shared_by_type & BIT(type)) |
285 		(chan->info_mask_shared_by_dir & BIT(type)) |
286 		(chan->info_mask_shared_by_all & BIT(type));
287 }
288 
289 /**
290  * iio_channel_has_available() - Checks if a channel has an available attribute
291  * @chan: The channel to be queried
292  * @type: Type of the available attribute to be checked
293  *
294  * Returns true if the channel supports reporting available values for the
295  * given attribute type, false otherwise.
296  */
297 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
298 					     enum iio_chan_info_enum type)
299 {
300 	return (chan->info_mask_separate_available & BIT(type)) |
301 		(chan->info_mask_shared_by_type_available & BIT(type)) |
302 		(chan->info_mask_shared_by_dir_available & BIT(type)) |
303 		(chan->info_mask_shared_by_all_available & BIT(type));
304 }
305 
306 #define IIO_CHAN_SOFT_TIMESTAMP(_si) {					\
307 	.type = IIO_TIMESTAMP,						\
308 	.channel = -1,							\
309 	.scan_index = _si,						\
310 	.scan_type = {							\
311 		.sign = 's',						\
312 		.realbits = 64,					\
313 		.storagebits = 64,					\
314 		},							\
315 }
316 
317 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
318 
319 /*
320  * Device operating modes
321  * @INDIO_DIRECT_MODE: There is an access to either:
322  * a) The last single value available for devices that do not provide
323  *    on-demand reads.
324  * b) A new value after performing an on-demand read otherwise.
325  * On most devices, this is a single-shot read. On some devices with data
326  * streams without an 'on-demand' function, this might also be the 'last value'
327  * feature. Above all, this mode internally means that we are not in any of the
328  * other modes, and sysfs reads should work.
329  * Device drivers should inform the core if they support this mode.
330  * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
331  * It indicates that an explicit trigger is required. This requests the core to
332  * attach a poll function when enabling the buffer, which is indicated by the
333  * _TRIGGERED suffix.
334  * The core will ensure this mode is set when registering a triggered buffer
335  * with iio_triggered_buffer_setup().
336  * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
337  * No poll function can be attached because there is no triggered infrastructure
338  * we can use to cause capture. There is a kfifo that the driver will fill, but
339  * not "only one scan at a time". Typically, hardware will have a buffer that
340  * can hold multiple scans. Software may read one or more scans at a single time
341  * and push the available data to a Kfifo. This means the core will not attach
342  * any poll function when enabling the buffer.
343  * The core will ensure this mode is set when registering a simple kfifo buffer
344  * with devm_iio_kfifo_buffer_setup().
345  * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
346  * Same as above but this time the buffer is not a kfifo where we have direct
347  * access to the data. Instead, the consumer driver must access the data through
348  * non software visible channels (or DMA when there is no demux possible in
349  * software)
350  * The core will ensure this mode is set when registering a dmaengine buffer
351  * with devm_iio_dmaengine_buffer_setup().
352  * @INDIO_EVENT_TRIGGERED: Very unusual mode.
353  * Triggers usually refer to an external event which will start data capture.
354  * Here it is kind of the opposite as, a particular state of the data might
355  * produce an event which can be considered as an event. We don't necessarily
356  * have access to the data itself, but to the event produced. For example, this
357  * can be a threshold detector. The internal path of this mode is very close to
358  * the INDIO_BUFFER_TRIGGERED mode.
359  * The core will ensure this mode is set when registering a triggered event.
360  * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
361  * Here, triggers can result in data capture and can be routed to multiple
362  * hardware components, which make them close to regular triggers in the way
363  * they must be managed by the core, but without the entire interrupts/poll
364  * functions burden. Interrupts are irrelevant as the data flow is hardware
365  * mediated and distributed.
366  */
367 #define INDIO_DIRECT_MODE		0x01
368 #define INDIO_BUFFER_TRIGGERED		0x02
369 #define INDIO_BUFFER_SOFTWARE		0x04
370 #define INDIO_BUFFER_HARDWARE		0x08
371 #define INDIO_EVENT_TRIGGERED		0x10
372 #define INDIO_HARDWARE_TRIGGERED	0x20
373 
374 #define INDIO_ALL_BUFFER_MODES					\
375 	(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
376 
377 #define INDIO_ALL_TRIGGERED_MODES	\
378 	(INDIO_BUFFER_TRIGGERED		\
379 	 | INDIO_EVENT_TRIGGERED	\
380 	 | INDIO_HARDWARE_TRIGGERED)
381 
382 #define INDIO_MAX_RAW_ELEMENTS		4
383 
384 struct iio_trigger; /* forward declaration */
385 
386 /**
387  * struct iio_info - constant information about device
388  * @event_attrs:	event control attributes
389  * @attrs:		general purpose device attributes
390  * @read_raw:		function to request a value from the device.
391  *			mask specifies which value. Note 0 means a reading of
392  *			the channel in question.  Return value will specify the
393  *			type of value returned by the device. val and val2 will
394  *			contain the elements making up the returned value.
395  * @read_raw_multi:	function to return values from the device.
396  *			mask specifies which value. Note 0 means a reading of
397  *			the channel in question.  Return value will specify the
398  *			type of value returned by the device. vals pointer
399  *			contain the elements making up the returned value.
400  *			max_len specifies maximum number of elements
401  *			vals pointer can contain. val_len is used to return
402  *			length of valid elements in vals.
403  * @read_avail:		function to return the available values from the device.
404  *			mask specifies which value. Note 0 means the available
405  *			values for the channel in question.  Return value
406  *			specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
407  *			returned in vals. The type of the vals are returned in
408  *			type and the number of vals is returned in length. For
409  *			ranges, there are always three vals returned; min, step
410  *			and max. For lists, all possible values are enumerated.
411  * @write_raw:		function to write a value to the device.
412  *			Parameters are the same as for read_raw.
413  * @read_label:		function to request label name for a specified label,
414  *			for better channel identification.
415  * @write_raw_get_fmt:	callback function to query the expected
416  *			format/precision. If not set by the driver, write_raw
417  *			returns IIO_VAL_INT_PLUS_MICRO.
418  * @read_event_config:	find out if the event is enabled.
419  * @write_event_config:	set if the event is enabled.
420  * @read_event_value:	read a configuration value associated with the event.
421  * @write_event_value:	write a configuration value for the event.
422  * @validate_trigger:	function to validate the trigger when the
423  *			current trigger gets changed.
424  * @update_scan_mode:	function to configure device and scan buffer when
425  *			channels have changed
426  * @debugfs_reg_access:	function to read or write register value of device
427  * @of_xlate:		function pointer to obtain channel specifier index.
428  *			When #iio-cells is greater than '0', the driver could
429  *			provide a custom of_xlate function that reads the
430  *			*args* and returns the appropriate index in registered
431  *			IIO channels array.
432  * @fwnode_xlate:	fwnode based function pointer to obtain channel specifier index.
433  *			Functionally the same as @of_xlate.
434  * @hwfifo_set_watermark: function pointer to set the current hardware
435  *			fifo watermark level; see hwfifo_* entries in
436  *			Documentation/ABI/testing/sysfs-bus-iio for details on
437  *			how the hardware fifo operates
438  * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
439  *			in the hardware fifo to the device buffer. The driver
440  *			should not flush more than count samples. The function
441  *			must return the number of samples flushed, 0 if no
442  *			samples were flushed or a negative integer if no samples
443  *			were flushed and there was an error.
444  **/
445 struct iio_info {
446 	const struct attribute_group	*event_attrs;
447 	const struct attribute_group	*attrs;
448 
449 	int (*read_raw)(struct iio_dev *indio_dev,
450 			struct iio_chan_spec const *chan,
451 			int *val,
452 			int *val2,
453 			long mask);
454 
455 	int (*read_raw_multi)(struct iio_dev *indio_dev,
456 			struct iio_chan_spec const *chan,
457 			int max_len,
458 			int *vals,
459 			int *val_len,
460 			long mask);
461 
462 	int (*read_avail)(struct iio_dev *indio_dev,
463 			  struct iio_chan_spec const *chan,
464 			  const int **vals,
465 			  int *type,
466 			  int *length,
467 			  long mask);
468 
469 	int (*write_raw)(struct iio_dev *indio_dev,
470 			 struct iio_chan_spec const *chan,
471 			 int val,
472 			 int val2,
473 			 long mask);
474 
475 	int (*read_label)(struct iio_dev *indio_dev,
476 			 struct iio_chan_spec const *chan,
477 			 char *label);
478 
479 	int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
480 			 struct iio_chan_spec const *chan,
481 			 long mask);
482 
483 	int (*read_event_config)(struct iio_dev *indio_dev,
484 				 const struct iio_chan_spec *chan,
485 				 enum iio_event_type type,
486 				 enum iio_event_direction dir);
487 
488 	int (*write_event_config)(struct iio_dev *indio_dev,
489 				  const struct iio_chan_spec *chan,
490 				  enum iio_event_type type,
491 				  enum iio_event_direction dir,
492 				  int state);
493 
494 	int (*read_event_value)(struct iio_dev *indio_dev,
495 				const struct iio_chan_spec *chan,
496 				enum iio_event_type type,
497 				enum iio_event_direction dir,
498 				enum iio_event_info info, int *val, int *val2);
499 
500 	int (*write_event_value)(struct iio_dev *indio_dev,
501 				 const struct iio_chan_spec *chan,
502 				 enum iio_event_type type,
503 				 enum iio_event_direction dir,
504 				 enum iio_event_info info, int val, int val2);
505 
506 	int (*validate_trigger)(struct iio_dev *indio_dev,
507 				struct iio_trigger *trig);
508 	int (*update_scan_mode)(struct iio_dev *indio_dev,
509 				const unsigned long *scan_mask);
510 	int (*debugfs_reg_access)(struct iio_dev *indio_dev,
511 				  unsigned reg, unsigned writeval,
512 				  unsigned *readval);
513 	int (*fwnode_xlate)(struct iio_dev *indio_dev,
514 			    const struct fwnode_reference_args *iiospec);
515 	int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val);
516 	int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
517 				      unsigned count);
518 };
519 
520 /**
521  * struct iio_buffer_setup_ops - buffer setup related callbacks
522  * @preenable:		[DRIVER] function to run prior to marking buffer enabled
523  * @postenable:		[DRIVER] function to run after marking buffer enabled
524  * @predisable:		[DRIVER] function to run prior to marking buffer
525  *			disabled
526  * @postdisable:	[DRIVER] function to run after marking buffer disabled
527  * @validate_scan_mask: [DRIVER] function callback to check whether a given
528  *			scan mask is valid for the device.
529  */
530 struct iio_buffer_setup_ops {
531 	int (*preenable)(struct iio_dev *);
532 	int (*postenable)(struct iio_dev *);
533 	int (*predisable)(struct iio_dev *);
534 	int (*postdisable)(struct iio_dev *);
535 	bool (*validate_scan_mask)(struct iio_dev *indio_dev,
536 				   const unsigned long *scan_mask);
537 };
538 
539 /**
540  * struct iio_dev - industrial I/O device
541  * @modes:		[DRIVER] bitmask listing all the operating modes
542  *			supported by the IIO device. This list should be
543  *			initialized before registering the IIO device. It can
544  *			also be filed up by the IIO core, as a result of
545  *			enabling particular features in the driver
546  *			(see iio_triggered_event_setup()).
547  * @dev:		[DRIVER] device structure, should be assigned a parent
548  *			and owner
549  * @buffer:		[DRIVER] any buffer present
550  * @scan_bytes:		[INTERN] num bytes captured to be fed to buffer demux
551  * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
552  * @masklength:		[INTERN] the length of the mask established from
553  *			channels
554  * @active_scan_mask:	[INTERN] union of all scan masks requested by buffers
555  * @scan_timestamp:	[INTERN] set if any buffers have requested timestamp
556  * @trig:		[INTERN] current device trigger (buffer modes)
557  * @pollfunc:		[DRIVER] function run on trigger being received
558  * @pollfunc_event:	[DRIVER] function run on events trigger being received
559  * @channels:		[DRIVER] channel specification structure table
560  * @num_channels:	[DRIVER] number of channels specified in @channels.
561  * @name:		[DRIVER] name of the device.
562  * @label:              [DRIVER] unique name to identify which device this is
563  * @info:		[DRIVER] callbacks and constant info from driver
564  * @setup_ops:		[DRIVER] callbacks to call before and after buffer
565  *			enable/disable
566  * @priv:		[DRIVER] reference to driver's private information
567  *			**MUST** be accessed **ONLY** via iio_priv() helper
568  */
569 struct iio_dev {
570 	int				modes;
571 	struct device			dev;
572 
573 	struct iio_buffer		*buffer;
574 	int				scan_bytes;
575 
576 	const unsigned long		*available_scan_masks;
577 	unsigned			masklength;
578 	const unsigned long		*active_scan_mask;
579 	bool				scan_timestamp;
580 	struct iio_trigger		*trig;
581 	struct iio_poll_func		*pollfunc;
582 	struct iio_poll_func		*pollfunc_event;
583 
584 	struct iio_chan_spec const	*channels;
585 	int				num_channels;
586 
587 	const char			*name;
588 	const char			*label;
589 	const struct iio_info		*info;
590 	const struct iio_buffer_setup_ops	*setup_ops;
591 
592 	void				*priv;
593 };
594 
595 int iio_device_id(struct iio_dev *indio_dev);
596 int iio_device_get_current_mode(struct iio_dev *indio_dev);
597 bool iio_buffer_enabled(struct iio_dev *indio_dev);
598 
599 const struct iio_chan_spec
600 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
601 /**
602  * iio_device_register() - register a device with the IIO subsystem
603  * @indio_dev:		Device structure filled by the device driver
604  **/
605 #define iio_device_register(indio_dev) \
606 	__iio_device_register((indio_dev), THIS_MODULE)
607 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
608 void iio_device_unregister(struct iio_dev *indio_dev);
609 /**
610  * devm_iio_device_register - Resource-managed iio_device_register()
611  * @dev:	Device to allocate iio_dev for
612  * @indio_dev:	Device structure filled by the device driver
613  *
614  * Managed iio_device_register.  The IIO device registered with this
615  * function is automatically unregistered on driver detach. This function
616  * calls iio_device_register() internally. Refer to that function for more
617  * information.
618  *
619  * RETURNS:
620  * 0 on success, negative error number on failure.
621  */
622 #define devm_iio_device_register(dev, indio_dev) \
623 	__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
624 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
625 			       struct module *this_mod);
626 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
627 int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
628 void iio_device_release_direct_mode(struct iio_dev *indio_dev);
629 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
630 void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
631 
632 extern struct bus_type iio_bus_type;
633 
634 /**
635  * iio_device_put() - reference counted deallocation of struct device
636  * @indio_dev: IIO device structure containing the device
637  **/
638 static inline void iio_device_put(struct iio_dev *indio_dev)
639 {
640 	if (indio_dev)
641 		put_device(&indio_dev->dev);
642 }
643 
644 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
645 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
646 
647 /**
648  * dev_to_iio_dev() - Get IIO device struct from a device struct
649  * @dev: 		The device embedded in the IIO device
650  *
651  * Note: The device must be a IIO device, otherwise the result is undefined.
652  */
653 static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
654 {
655 	return container_of(dev, struct iio_dev, dev);
656 }
657 
658 /**
659  * iio_device_get() - increment reference count for the device
660  * @indio_dev: 		IIO device structure
661  *
662  * Returns: The passed IIO device
663  **/
664 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
665 {
666 	return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
667 }
668 
669 /**
670  * iio_device_set_parent() - assign parent device to the IIO device object
671  * @indio_dev: 		IIO device structure
672  * @parent:		reference to parent device object
673  *
674  * This utility must be called between IIO device allocation
675  * (via devm_iio_device_alloc()) & IIO device registration
676  * (via iio_device_register() and devm_iio_device_register())).
677  * By default, the device allocation will also assign a parent device to
678  * the IIO device object. In cases where devm_iio_device_alloc() is used,
679  * sometimes the parent device must be different than the device used to
680  * manage the allocation.
681  * In that case, this helper should be used to change the parent, hence the
682  * requirement to call this between allocation & registration.
683  **/
684 static inline void iio_device_set_parent(struct iio_dev *indio_dev,
685 					 struct device *parent)
686 {
687 	indio_dev->dev.parent = parent;
688 }
689 
690 /**
691  * iio_device_set_drvdata() - Set device driver data
692  * @indio_dev: IIO device structure
693  * @data: Driver specific data
694  *
695  * Allows to attach an arbitrary pointer to an IIO device, which can later be
696  * retrieved by iio_device_get_drvdata().
697  */
698 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
699 {
700 	dev_set_drvdata(&indio_dev->dev, data);
701 }
702 
703 /**
704  * iio_device_get_drvdata() - Get device driver data
705  * @indio_dev: IIO device structure
706  *
707  * Returns the data previously set with iio_device_set_drvdata()
708  */
709 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
710 {
711 	return dev_get_drvdata(&indio_dev->dev);
712 }
713 
714 /*
715  * Used to ensure the iio_priv() structure is aligned to allow that structure
716  * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
717  * must not share  cachelines with the rest of the structure, thus making
718  * them safe for use with non-coherent DMA.
719  */
720 #define IIO_DMA_MINALIGN ARCH_KMALLOC_MINALIGN
721 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
722 
723 /* The information at the returned address is guaranteed to be cacheline aligned */
724 static inline void *iio_priv(const struct iio_dev *indio_dev)
725 {
726 	return indio_dev->priv;
727 }
728 
729 void iio_device_free(struct iio_dev *indio_dev);
730 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
731 
732 #define devm_iio_trigger_alloc(parent, fmt, ...) \
733 	__devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
734 __printf(3, 4)
735 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
736 					     struct module *this_mod,
737 					     const char *fmt, ...);
738 /**
739  * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
740  * @indio_dev:		IIO device structure for device
741  **/
742 #if defined(CONFIG_DEBUG_FS)
743 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
744 #else
745 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
746 {
747 	return NULL;
748 }
749 #endif
750 
751 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
752 
753 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
754 	int *fract);
755 
756 /**
757  * IIO_DEGREE_TO_RAD() - Convert degree to rad
758  * @deg: A value in degree
759  *
760  * Returns the given value converted from degree to rad
761  */
762 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
763 
764 /**
765  * IIO_RAD_TO_DEGREE() - Convert rad to degree
766  * @rad: A value in rad
767  *
768  * Returns the given value converted from rad to degree
769  */
770 #define IIO_RAD_TO_DEGREE(rad) \
771 	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
772 
773 /**
774  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
775  * @g: A value in g
776  *
777  * Returns the given value converted from g to meter / second**2
778  */
779 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
780 
781 /**
782  * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
783  * @ms2: A value in meter / second**2
784  *
785  * Returns the given value converted from meter / second**2 to g
786  */
787 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
788 
789 #endif /* _INDUSTRIAL_IO_H_ */
790