xref: /linux/include/linux/iio/iio.h (revision c26f4fbd58375bd6ef74f95eb73d61762ad97c59)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 /* The industrial I/O core
4  *
5  * Copyright (c) 2008 Jonathan Cameron
6  */
7 #ifndef _INDUSTRIAL_IO_H_
8 #define _INDUSTRIAL_IO_H_
9 
10 #include <linux/align.h>
11 #include <linux/device.h>
12 #include <linux/cdev.h>
13 #include <linux/compiler_types.h>
14 #include <linux/minmax.h>
15 #include <linux/slab.h>
16 #include <linux/iio/types.h>
17 /* IIO TODO LIST */
18 /*
19  * Provide means of adjusting timer accuracy.
20  * Currently assumes nano seconds.
21  */
22 
23 struct fwnode_reference_args;
24 
25 enum iio_shared_by {
26 	IIO_SEPARATE,
27 	IIO_SHARED_BY_TYPE,
28 	IIO_SHARED_BY_DIR,
29 	IIO_SHARED_BY_ALL
30 };
31 
32 enum iio_endian {
33 	IIO_CPU,
34 	IIO_BE,
35 	IIO_LE,
36 };
37 
38 struct iio_chan_spec;
39 struct iio_dev;
40 
41 /**
42  * struct iio_chan_spec_ext_info - Extended channel info attribute
43  * @name:	Info attribute name
44  * @shared:	Whether this attribute is shared between all channels.
45  * @read:	Read callback for this info attribute, may be NULL.
46  * @write:	Write callback for this info attribute, may be NULL.
47  * @private:	Data private to the driver.
48  */
49 struct iio_chan_spec_ext_info {
50 	const char *name;
51 	enum iio_shared_by shared;
52 	ssize_t (*read)(struct iio_dev *, uintptr_t private,
53 			struct iio_chan_spec const *, char *buf);
54 	ssize_t (*write)(struct iio_dev *, uintptr_t private,
55 			 struct iio_chan_spec const *, const char *buf,
56 			 size_t len);
57 	uintptr_t private;
58 };
59 
60 /**
61  * struct iio_enum - Enum channel info attribute
62  * @items:	An array of strings.
63  * @num_items:	Length of the item array.
64  * @set:	Set callback function, may be NULL.
65  * @get:	Get callback function, may be NULL.
66  *
67  * The iio_enum struct can be used to implement enum style channel attributes.
68  * Enum style attributes are those which have a set of strings which map to
69  * unsigned integer values. The IIO enum helper code takes care of mapping
70  * between value and string as well as generating a "_available" file which
71  * contains a list of all available items. The set callback will be called when
72  * the attribute is updated. The last parameter is the index to the newly
73  * activated item. The get callback will be used to query the currently active
74  * item and is supposed to return the index for it.
75  */
76 struct iio_enum {
77 	const char * const *items;
78 	unsigned int num_items;
79 	int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int);
80 	int (*get)(struct iio_dev *, const struct iio_chan_spec *);
81 };
82 
83 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
84 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
85 ssize_t iio_enum_read(struct iio_dev *indio_dev,
86 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf);
87 ssize_t iio_enum_write(struct iio_dev *indio_dev,
88 	uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
89 	size_t len);
90 
91 /**
92  * IIO_ENUM() - Initialize enum extended channel attribute
93  * @_name:	Attribute name
94  * @_shared:	Whether the attribute is shared between all channels
95  * @_e:		Pointer to an iio_enum struct
96  *
97  * This should usually be used together with IIO_ENUM_AVAILABLE()
98  */
99 #define IIO_ENUM(_name, _shared, _e) \
100 { \
101 	.name = (_name), \
102 	.shared = (_shared), \
103 	.read = iio_enum_read, \
104 	.write = iio_enum_write, \
105 	.private = (uintptr_t)(_e), \
106 }
107 
108 /**
109  * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute
110  * @_name:	Attribute name ("_available" will be appended to the name)
111  * @_shared:	Whether the attribute is shared between all channels
112  * @_e:		Pointer to an iio_enum struct
113  *
114  * Creates a read only attribute which lists all the available enum items in a
115  * space separated list. This should usually be used together with IIO_ENUM()
116  */
117 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \
118 { \
119 	.name = (_name "_available"), \
120 	.shared = _shared, \
121 	.read = iio_enum_available_read, \
122 	.private = (uintptr_t)(_e), \
123 }
124 
125 /**
126  * struct iio_mount_matrix - iio mounting matrix
127  * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
128  *            main hardware
129  */
130 struct iio_mount_matrix {
131 	const char *rotation[9];
132 };
133 
134 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
135 			      const struct iio_chan_spec *chan, char *buf);
136 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix);
137 
138 typedef const struct iio_mount_matrix *
139 	(iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
140 				 const struct iio_chan_spec *chan);
141 
142 /**
143  * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
144  * @_shared:	Whether the attribute is shared between all channels
145  * @_get:	Pointer to an iio_get_mount_matrix_t accessor
146  */
147 #define IIO_MOUNT_MATRIX(_shared, _get) \
148 { \
149 	.name = "mount_matrix", \
150 	.shared = (_shared), \
151 	.read = iio_show_mount_matrix, \
152 	.private = (uintptr_t)(_get), \
153 }
154 
155 /**
156  * struct iio_event_spec - specification for a channel event
157  * @type:		    Type of the event
158  * @dir:		    Direction of the event
159  * @mask_separate:	    Bit mask of enum iio_event_info values. Attributes
160  *			    set in this mask will be registered per channel.
161  * @mask_shared_by_type:    Bit mask of enum iio_event_info values. Attributes
162  *			    set in this mask will be shared by channel type.
163  * @mask_shared_by_dir:	    Bit mask of enum iio_event_info values. Attributes
164  *			    set in this mask will be shared by channel type and
165  *			    direction.
166  * @mask_shared_by_all:	    Bit mask of enum iio_event_info values. Attributes
167  *			    set in this mask will be shared by all channels.
168  */
169 struct iio_event_spec {
170 	enum iio_event_type type;
171 	enum iio_event_direction dir;
172 	unsigned long mask_separate;
173 	unsigned long mask_shared_by_type;
174 	unsigned long mask_shared_by_dir;
175 	unsigned long mask_shared_by_all;
176 };
177 
178 /**
179  * struct iio_scan_type - specification for channel data format in buffer
180  * @sign:		's' or 'u' to specify signed or unsigned
181  * @realbits:		Number of valid bits of data
182  * @storagebits:	Realbits + padding
183  * @shift:		Shift right by this before masking out realbits.
184  * @repeat:		Number of times real/storage bits repeats. When the
185  *			repeat element is more than 1, then the type element in
186  *			sysfs will show a repeat value. Otherwise, the number
187  *			of repetitions is omitted.
188  * @endianness:		little or big endian
189  */
190 struct iio_scan_type {
191 	char	sign;
192 	u8	realbits;
193 	u8	storagebits;
194 	u8	shift;
195 	u8	repeat;
196 	enum iio_endian endianness;
197 };
198 
199 /**
200  * struct iio_chan_spec - specification of a single channel
201  * @type:		What type of measurement is the channel making.
202  * @channel:		What number do we wish to assign the channel.
203  * @channel2:		If there is a second number for a differential
204  *			channel then this is it. If modified is set then the
205  *			value here specifies the modifier.
206  * @address:		Driver specific identifier.
207  * @scan_index:		Monotonic index to give ordering in scans when read
208  *			from a buffer.
209  * @scan_type:		struct describing the scan type - mutually exclusive
210  *			with ext_scan_type.
211  * @ext_scan_type:	Used in rare cases where there is more than one scan
212  *			format for a channel. When this is used, the flag
213  *			has_ext_scan_type must be set and the driver must
214  *			implement get_current_scan_type in struct iio_info.
215  * @num_ext_scan_type:	Number of elements in ext_scan_type.
216  * @info_mask_separate: What information is to be exported that is specific to
217  *			this channel.
218  * @info_mask_separate_available: What availability information is to be
219  *			exported that is specific to this channel.
220  * @info_mask_shared_by_type: What information is to be exported that is shared
221  *			by all channels of the same type.
222  * @info_mask_shared_by_type_available: What availability information is to be
223  *			exported that is shared by all channels of the same
224  *			type.
225  * @info_mask_shared_by_dir: What information is to be exported that is shared
226  *			by all channels of the same direction.
227  * @info_mask_shared_by_dir_available: What availability information is to be
228  *			exported that is shared by all channels of the same
229  *			direction.
230  * @info_mask_shared_by_all: What information is to be exported that is shared
231  *			by all channels.
232  * @info_mask_shared_by_all_available: What availability information is to be
233  *			exported that is shared by all channels.
234  * @event_spec:		Array of events which should be registered for this
235  *			channel.
236  * @num_event_specs:	Size of the event_spec array.
237  * @ext_info:		Array of extended info attributes for this channel.
238  *			The array is NULL terminated, the last element should
239  *			have its name field set to NULL.
240  * @extend_name:	Allows labeling of channel attributes with an
241  *			informative name. Note this has no effect codes etc,
242  *			unlike modifiers.
243  *			This field is deprecated in favour of providing
244  *			iio_info->read_label() to override the label, which
245  *			unlike @extend_name does not affect sysfs filenames.
246  * @datasheet_name:	A name used in in-kernel mapping of channels. It should
247  *			correspond to the first name that the channel is referred
248  *			to by in the datasheet (e.g. IND), or the nearest
249  *			possible compound name (e.g. IND-INC).
250  * @modified:		Does a modifier apply to this channel. What these are
251  *			depends on the channel type.  Modifier is set in
252  *			channel2. Examples are IIO_MOD_X for axial sensors about
253  *			the 'x' axis.
254  * @indexed:		Specify the channel has a numerical index. If not,
255  *			the channel index number will be suppressed for sysfs
256  *			attributes but not for event codes.
257  * @output:		Channel is output.
258  * @differential:	Channel is differential.
259  * @has_ext_scan_type:	True if ext_scan_type is used instead of scan_type.
260  */
261 struct iio_chan_spec {
262 	enum iio_chan_type	type;
263 	int			channel;
264 	int			channel2;
265 	unsigned long		address;
266 	int			scan_index;
267 	union {
268 		struct iio_scan_type scan_type;
269 		struct {
270 			const struct iio_scan_type *ext_scan_type;
271 			unsigned int num_ext_scan_type;
272 		};
273 	};
274 	long			info_mask_separate;
275 	long			info_mask_separate_available;
276 	long			info_mask_shared_by_type;
277 	long			info_mask_shared_by_type_available;
278 	long			info_mask_shared_by_dir;
279 	long			info_mask_shared_by_dir_available;
280 	long			info_mask_shared_by_all;
281 	long			info_mask_shared_by_all_available;
282 	const struct iio_event_spec *event_spec;
283 	unsigned int		num_event_specs;
284 	const struct iio_chan_spec_ext_info *ext_info;
285 	const char		*extend_name;
286 	const char		*datasheet_name;
287 	unsigned int		modified:1;
288 	unsigned int		indexed:1;
289 	unsigned int		output:1;
290 	unsigned int		differential:1;
291 	unsigned int		has_ext_scan_type:1;
292 };
293 
294 
295 /**
296  * iio_channel_has_info() - Checks whether a channel supports a info attribute
297  * @chan: The channel to be queried
298  * @type: Type of the info attribute to be checked
299  *
300  * Returns true if the channels supports reporting values for the given info
301  * attribute type, false otherwise.
302  */
iio_channel_has_info(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)303 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan,
304 	enum iio_chan_info_enum type)
305 {
306 	return (chan->info_mask_separate & BIT(type)) |
307 		(chan->info_mask_shared_by_type & BIT(type)) |
308 		(chan->info_mask_shared_by_dir & BIT(type)) |
309 		(chan->info_mask_shared_by_all & BIT(type));
310 }
311 
312 /**
313  * iio_channel_has_available() - Checks if a channel has an available attribute
314  * @chan: The channel to be queried
315  * @type: Type of the available attribute to be checked
316  *
317  * Returns true if the channel supports reporting available values for the
318  * given attribute type, false otherwise.
319  */
iio_channel_has_available(const struct iio_chan_spec * chan,enum iio_chan_info_enum type)320 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan,
321 					     enum iio_chan_info_enum type)
322 {
323 	return (chan->info_mask_separate_available & BIT(type)) |
324 		(chan->info_mask_shared_by_type_available & BIT(type)) |
325 		(chan->info_mask_shared_by_dir_available & BIT(type)) |
326 		(chan->info_mask_shared_by_all_available & BIT(type));
327 }
328 
329 #define IIO_CHAN_SOFT_TIMESTAMP(_si) {					\
330 	.type = IIO_TIMESTAMP,						\
331 	.channel = -1,							\
332 	.scan_index = _si,						\
333 	.scan_type = {							\
334 		.sign = 's',						\
335 		.realbits = 64,					\
336 		.storagebits = 64,					\
337 		},							\
338 }
339 
340 s64 iio_get_time_ns(const struct iio_dev *indio_dev);
341 
342 /*
343  * Device operating modes
344  * @INDIO_DIRECT_MODE: There is an access to either:
345  * a) The last single value available for devices that do not provide
346  *    on-demand reads.
347  * b) A new value after performing an on-demand read otherwise.
348  * On most devices, this is a single-shot read. On some devices with data
349  * streams without an 'on-demand' function, this might also be the 'last value'
350  * feature. Above all, this mode internally means that we are not in any of the
351  * other modes, and sysfs reads should work.
352  * Device drivers should inform the core if they support this mode.
353  * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers.
354  * It indicates that an explicit trigger is required. This requests the core to
355  * attach a poll function when enabling the buffer, which is indicated by the
356  * _TRIGGERED suffix.
357  * The core will ensure this mode is set when registering a triggered buffer
358  * with iio_triggered_buffer_setup().
359  * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered.
360  * No poll function can be attached because there is no triggered infrastructure
361  * we can use to cause capture. There is a kfifo that the driver will fill, but
362  * not "only one scan at a time". Typically, hardware will have a buffer that
363  * can hold multiple scans. Software may read one or more scans at a single time
364  * and push the available data to a Kfifo. This means the core will not attach
365  * any poll function when enabling the buffer.
366  * The core will ensure this mode is set when registering a simple kfifo buffer
367  * with devm_iio_kfifo_buffer_setup().
368  * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode.
369  * Same as above but this time the buffer is not a kfifo where we have direct
370  * access to the data. Instead, the consumer driver must access the data through
371  * non software visible channels (or DMA when there is no demux possible in
372  * software)
373  * The core will ensure this mode is set when registering a dmaengine buffer
374  * with devm_iio_dmaengine_buffer_setup().
375  * @INDIO_EVENT_TRIGGERED: Very unusual mode.
376  * Triggers usually refer to an external event which will start data capture.
377  * Here it is kind of the opposite as, a particular state of the data might
378  * produce an event which can be considered as an event. We don't necessarily
379  * have access to the data itself, but to the event produced. For example, this
380  * can be a threshold detector. The internal path of this mode is very close to
381  * the INDIO_BUFFER_TRIGGERED mode.
382  * The core will ensure this mode is set when registering a triggered event.
383  * @INDIO_HARDWARE_TRIGGERED: Very unusual mode.
384  * Here, triggers can result in data capture and can be routed to multiple
385  * hardware components, which make them close to regular triggers in the way
386  * they must be managed by the core, but without the entire interrupts/poll
387  * functions burden. Interrupts are irrelevant as the data flow is hardware
388  * mediated and distributed.
389  */
390 #define INDIO_DIRECT_MODE		0x01
391 #define INDIO_BUFFER_TRIGGERED		0x02
392 #define INDIO_BUFFER_SOFTWARE		0x04
393 #define INDIO_BUFFER_HARDWARE		0x08
394 #define INDIO_EVENT_TRIGGERED		0x10
395 #define INDIO_HARDWARE_TRIGGERED	0x20
396 
397 #define INDIO_ALL_BUFFER_MODES					\
398 	(INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE)
399 
400 #define INDIO_ALL_TRIGGERED_MODES	\
401 	(INDIO_BUFFER_TRIGGERED		\
402 	 | INDIO_EVENT_TRIGGERED	\
403 	 | INDIO_HARDWARE_TRIGGERED)
404 
405 #define INDIO_MAX_RAW_ELEMENTS		4
406 
407 struct iio_val_int_plus_micro {
408 	int integer;
409 	int micro;
410 };
411 
412 struct iio_trigger; /* forward declaration */
413 
414 /**
415  * struct iio_info - constant information about device
416  * @event_attrs:	event control attributes
417  * @attrs:		general purpose device attributes
418  * @read_raw:		function to request a value from the device.
419  *			mask specifies which value. Note 0 means a reading of
420  *			the channel in question.  Return value will specify the
421  *			type of value returned by the device. val and val2 will
422  *			contain the elements making up the returned value.
423  * @read_raw_multi:	function to return values from the device.
424  *			mask specifies which value. Note 0 means a reading of
425  *			the channel in question.  Return value will specify the
426  *			type of value returned by the device. vals pointer
427  *			contain the elements making up the returned value.
428  *			max_len specifies maximum number of elements
429  *			vals pointer can contain. val_len is used to return
430  *			length of valid elements in vals.
431  * @read_avail:		function to return the available values from the device.
432  *			mask specifies which value. Note 0 means the available
433  *			values for the channel in question.  Return value
434  *			specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is
435  *			returned in vals. The type of the vals are returned in
436  *			type and the number of vals is returned in length. For
437  *			ranges, there are always three vals returned; min, step
438  *			and max. For lists, all possible values are enumerated.
439  * @write_raw:		function to write a value to the device.
440  *			Parameters are the same as for read_raw.
441  * @read_label:		function to request label name for a specified label,
442  *			for better channel identification.
443  * @write_raw_get_fmt:	callback function to query the expected
444  *			format/precision. If not set by the driver, write_raw
445  *			returns IIO_VAL_INT_PLUS_MICRO.
446  * @read_event_config:	find out if the event is enabled.
447  * @write_event_config:	set if the event is enabled.
448  * @read_event_value:	read a configuration value associated with the event.
449  * @write_event_value:	write a configuration value for the event.
450  * @read_event_label:	function to request label name for a specified label,
451  *			for better event identification.
452  * @validate_trigger:	function to validate the trigger when the
453  *			current trigger gets changed.
454  * @get_current_scan_type: must be implemented by drivers that use ext_scan_type
455  *			in the channel spec to return the index of the currently
456  *			active ext_scan type for a channel.
457  * @update_scan_mode:	function to configure device and scan buffer when
458  *			channels have changed
459  * @debugfs_reg_access:	function to read or write register value of device
460  * @fwnode_xlate:	fwnode based function pointer to obtain channel specifier index.
461  * @hwfifo_set_watermark: function pointer to set the current hardware
462  *			fifo watermark level; see hwfifo_* entries in
463  *			Documentation/ABI/testing/sysfs-bus-iio for details on
464  *			how the hardware fifo operates
465  * @hwfifo_flush_to_buffer: function pointer to flush the samples stored
466  *			in the hardware fifo to the device buffer. The driver
467  *			should not flush more than count samples. The function
468  *			must return the number of samples flushed, 0 if no
469  *			samples were flushed or a negative integer if no samples
470  *			were flushed and there was an error.
471  **/
472 struct iio_info {
473 	const struct attribute_group	*event_attrs;
474 	const struct attribute_group	*attrs;
475 
476 	int (*read_raw)(struct iio_dev *indio_dev,
477 			struct iio_chan_spec const *chan,
478 			int *val,
479 			int *val2,
480 			long mask);
481 
482 	int (*read_raw_multi)(struct iio_dev *indio_dev,
483 			struct iio_chan_spec const *chan,
484 			int max_len,
485 			int *vals,
486 			int *val_len,
487 			long mask);
488 
489 	int (*read_avail)(struct iio_dev *indio_dev,
490 			  struct iio_chan_spec const *chan,
491 			  const int **vals,
492 			  int *type,
493 			  int *length,
494 			  long mask);
495 
496 	int (*write_raw)(struct iio_dev *indio_dev,
497 			 struct iio_chan_spec const *chan,
498 			 int val,
499 			 int val2,
500 			 long mask);
501 
502 	int (*read_label)(struct iio_dev *indio_dev,
503 			 struct iio_chan_spec const *chan,
504 			 char *label);
505 
506 	int (*write_raw_get_fmt)(struct iio_dev *indio_dev,
507 			 struct iio_chan_spec const *chan,
508 			 long mask);
509 
510 	int (*read_event_config)(struct iio_dev *indio_dev,
511 				 const struct iio_chan_spec *chan,
512 				 enum iio_event_type type,
513 				 enum iio_event_direction dir);
514 
515 	int (*write_event_config)(struct iio_dev *indio_dev,
516 				  const struct iio_chan_spec *chan,
517 				  enum iio_event_type type,
518 				  enum iio_event_direction dir,
519 				  bool state);
520 
521 	int (*read_event_value)(struct iio_dev *indio_dev,
522 				const struct iio_chan_spec *chan,
523 				enum iio_event_type type,
524 				enum iio_event_direction dir,
525 				enum iio_event_info info, int *val, int *val2);
526 
527 	int (*write_event_value)(struct iio_dev *indio_dev,
528 				 const struct iio_chan_spec *chan,
529 				 enum iio_event_type type,
530 				 enum iio_event_direction dir,
531 				 enum iio_event_info info, int val, int val2);
532 
533 	int (*read_event_label)(struct iio_dev *indio_dev,
534 				struct iio_chan_spec const *chan,
535 				enum iio_event_type type,
536 				enum iio_event_direction dir,
537 				char *label);
538 
539 	int (*validate_trigger)(struct iio_dev *indio_dev,
540 				struct iio_trigger *trig);
541 	int (*get_current_scan_type)(const struct iio_dev *indio_dev,
542 				     const struct iio_chan_spec *chan);
543 	int (*update_scan_mode)(struct iio_dev *indio_dev,
544 				const unsigned long *scan_mask);
545 	int (*debugfs_reg_access)(struct iio_dev *indio_dev,
546 				  unsigned int reg, unsigned int writeval,
547 				  unsigned int *readval);
548 	int (*fwnode_xlate)(struct iio_dev *indio_dev,
549 			    const struct fwnode_reference_args *iiospec);
550 	int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned int val);
551 	int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev,
552 				      unsigned int count);
553 };
554 
555 /**
556  * struct iio_buffer_setup_ops - buffer setup related callbacks
557  * @preenable:		[DRIVER] function to run prior to marking buffer enabled
558  * @postenable:		[DRIVER] function to run after marking buffer enabled
559  * @predisable:		[DRIVER] function to run prior to marking buffer
560  *			disabled
561  * @postdisable:	[DRIVER] function to run after marking buffer disabled
562  * @validate_scan_mask: [DRIVER] function callback to check whether a given
563  *			scan mask is valid for the device.
564  */
565 struct iio_buffer_setup_ops {
566 	int (*preenable)(struct iio_dev *);
567 	int (*postenable)(struct iio_dev *);
568 	int (*predisable)(struct iio_dev *);
569 	int (*postdisable)(struct iio_dev *);
570 	bool (*validate_scan_mask)(struct iio_dev *indio_dev,
571 				   const unsigned long *scan_mask);
572 };
573 
574 /**
575  * struct iio_dev - industrial I/O device
576  * @modes:		[DRIVER] bitmask listing all the operating modes
577  *			supported by the IIO device. This list should be
578  *			initialized before registering the IIO device. It can
579  *			also be filed up by the IIO core, as a result of
580  *			enabling particular features in the driver
581  *			(see iio_triggered_event_setup()).
582  * @dev:		[DRIVER] device structure, should be assigned a parent
583  *			and owner
584  * @buffer:		[DRIVER] any buffer present
585  * @scan_bytes:		[INTERN] num bytes captured to be fed to buffer demux
586  * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
587  *			   array in order of preference, the most preferred
588  *			   masks first.
589  * @masklength:		[INTERN] the length of the mask established from
590  *			channels
591  * @active_scan_mask:	[INTERN] union of all scan masks requested by buffers
592  * @scan_timestamp:	[INTERN] set if any buffers have requested timestamp
593  * @trig:		[INTERN] current device trigger (buffer modes)
594  * @pollfunc:		[DRIVER] function run on trigger being received
595  * @pollfunc_event:	[DRIVER] function run on events trigger being received
596  * @channels:		[DRIVER] channel specification structure table
597  * @num_channels:	[DRIVER] number of channels specified in @channels.
598  * @name:		[DRIVER] name of the device.
599  * @label:              [DRIVER] unique name to identify which device this is
600  * @info:		[DRIVER] callbacks and constant info from driver
601  * @setup_ops:		[DRIVER] callbacks to call before and after buffer
602  *			enable/disable
603  * @priv:		[DRIVER] reference to driver's private information
604  *			**MUST** be accessed **ONLY** via iio_priv() helper
605  */
606 struct iio_dev {
607 	int				modes;
608 	struct device			dev;
609 
610 	struct iio_buffer		*buffer;
611 	int				scan_bytes;
612 
613 	const unsigned long		*available_scan_masks;
614 	unsigned int			__private masklength;
615 	const unsigned long		*active_scan_mask;
616 	bool				__private scan_timestamp;
617 	struct iio_trigger		*trig;
618 	struct iio_poll_func		*pollfunc;
619 	struct iio_poll_func		*pollfunc_event;
620 
621 	struct iio_chan_spec const	*channels;
622 	int				num_channels;
623 
624 	const char			*name;
625 	const char			*label;
626 	const struct iio_info		*info;
627 	const struct iio_buffer_setup_ops	*setup_ops;
628 
629 	void				*__private priv;
630 };
631 
632 int iio_device_id(struct iio_dev *indio_dev);
633 int iio_device_get_current_mode(struct iio_dev *indio_dev);
634 bool iio_buffer_enabled(struct iio_dev *indio_dev);
635 
636 const struct iio_chan_spec
637 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si);
638 /**
639  * iio_device_register() - register a device with the IIO subsystem
640  * @indio_dev:		Device structure filled by the device driver
641  **/
642 #define iio_device_register(indio_dev) \
643 	__iio_device_register((indio_dev), THIS_MODULE)
644 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod);
645 void iio_device_unregister(struct iio_dev *indio_dev);
646 /**
647  * devm_iio_device_register - Resource-managed iio_device_register()
648  * @dev:	Device to allocate iio_dev for
649  * @indio_dev:	Device structure filled by the device driver
650  *
651  * Managed iio_device_register.  The IIO device registered with this
652  * function is automatically unregistered on driver detach. This function
653  * calls iio_device_register() internally. Refer to that function for more
654  * information.
655  *
656  * RETURNS:
657  * 0 on success, negative error number on failure.
658  */
659 #define devm_iio_device_register(dev, indio_dev) \
660 	__devm_iio_device_register((dev), (indio_dev), THIS_MODULE)
661 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
662 			       struct module *this_mod);
663 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
664 bool __iio_device_claim_direct(struct iio_dev *indio_dev);
665 void __iio_device_release_direct(struct iio_dev *indio_dev);
666 
667 /*
668  * Helper functions that allow claim and release of direct mode
669  * in a fashion that doesn't generate many false positives from sparse.
670  * Note this must remain static inline in the header so that sparse
671  * can see the __acquire() marking. Revisit when sparse supports
672  * __cond_acquires()
673  */
iio_device_claim_direct(struct iio_dev * indio_dev)674 static inline bool iio_device_claim_direct(struct iio_dev *indio_dev)
675 {
676 	if (!__iio_device_claim_direct(indio_dev))
677 		return false;
678 
679 	__acquire(iio_dev);
680 
681 	return true;
682 }
683 
iio_device_release_direct(struct iio_dev * indio_dev)684 static inline void iio_device_release_direct(struct iio_dev *indio_dev)
685 {
686 	__iio_device_release_direct(indio_dev);
687 	__release(indio_dev);
688 }
689 
690 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev);
691 void iio_device_release_buffer_mode(struct iio_dev *indio_dev);
692 
693 extern const struct bus_type iio_bus_type;
694 
695 /**
696  * iio_device_put() - reference counted deallocation of struct device
697  * @indio_dev: IIO device structure containing the device
698  **/
iio_device_put(struct iio_dev * indio_dev)699 static inline void iio_device_put(struct iio_dev *indio_dev)
700 {
701 	if (indio_dev)
702 		put_device(&indio_dev->dev);
703 }
704 
705 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev);
706 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id);
707 
708 /**
709  * dev_to_iio_dev() - Get IIO device struct from a device struct
710  * @dev: 		The device embedded in the IIO device
711  *
712  * Note: The device must be a IIO device, otherwise the result is undefined.
713  */
dev_to_iio_dev(struct device * dev)714 static inline struct iio_dev *dev_to_iio_dev(struct device *dev)
715 {
716 	return container_of(dev, struct iio_dev, dev);
717 }
718 
719 /**
720  * iio_device_get() - increment reference count for the device
721  * @indio_dev: 		IIO device structure
722  *
723  * Returns: The passed IIO device
724  **/
iio_device_get(struct iio_dev * indio_dev)725 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev)
726 {
727 	return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL;
728 }
729 
730 /**
731  * iio_device_set_parent() - assign parent device to the IIO device object
732  * @indio_dev: 		IIO device structure
733  * @parent:		reference to parent device object
734  *
735  * This utility must be called between IIO device allocation
736  * (via devm_iio_device_alloc()) & IIO device registration
737  * (via iio_device_register() and devm_iio_device_register())).
738  * By default, the device allocation will also assign a parent device to
739  * the IIO device object. In cases where devm_iio_device_alloc() is used,
740  * sometimes the parent device must be different than the device used to
741  * manage the allocation.
742  * In that case, this helper should be used to change the parent, hence the
743  * requirement to call this between allocation & registration.
744  **/
iio_device_set_parent(struct iio_dev * indio_dev,struct device * parent)745 static inline void iio_device_set_parent(struct iio_dev *indio_dev,
746 					 struct device *parent)
747 {
748 	indio_dev->dev.parent = parent;
749 }
750 
751 /**
752  * iio_device_set_drvdata() - Set device driver data
753  * @indio_dev: IIO device structure
754  * @data: Driver specific data
755  *
756  * Allows to attach an arbitrary pointer to an IIO device, which can later be
757  * retrieved by iio_device_get_drvdata().
758  */
iio_device_set_drvdata(struct iio_dev * indio_dev,void * data)759 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data)
760 {
761 	dev_set_drvdata(&indio_dev->dev, data);
762 }
763 
764 /**
765  * iio_device_get_drvdata() - Get device driver data
766  * @indio_dev: IIO device structure
767  *
768  * Returns the data previously set with iio_device_set_drvdata()
769  */
iio_device_get_drvdata(const struct iio_dev * indio_dev)770 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev)
771 {
772 	return dev_get_drvdata(&indio_dev->dev);
773 }
774 
775 /*
776  * Used to ensure the iio_priv() structure is aligned to allow that structure
777  * to in turn include IIO_DMA_MINALIGN'd elements such as buffers which
778  * must not share  cachelines with the rest of the structure, thus making
779  * them safe for use with non-coherent DMA.
780  *
781  * A number of drivers also use this on buffers that include a 64-bit timestamp
782  * that is used with iio_push_to_buffer_with_ts(). Therefore, in the case where
783  * DMA alignment is not sufficient for proper timestamp alignment, we align to
784  * 8 bytes instead.
785  */
786 #define IIO_DMA_MINALIGN MAX(ARCH_DMA_MINALIGN, sizeof(s64))
787 
788 #define __IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
789 	type name[ALIGN((count), sizeof(s64) / sizeof(type)) + sizeof(s64) / sizeof(type)]
790 
791 /**
792  * IIO_DECLARE_BUFFER_WITH_TS() - Declare a buffer with timestamp
793  * @type: element type of the buffer
794  * @name: identifier name of the buffer
795  * @count: number of elements in the buffer
796  *
797  * Declares a buffer that is safe to use with iio_push_to_buffer_with_ts(). In
798  * addition to allocating enough space for @count elements of @type, it also
799  * allocates space for a s64 timestamp at the end of the buffer and ensures
800  * proper alignment of the timestamp.
801  */
802 #define IIO_DECLARE_BUFFER_WITH_TS(type, name, count) \
803 	__IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(sizeof(s64))
804 
805 /**
806  * IIO_DECLARE_DMA_BUFFER_WITH_TS() - Declare a DMA-aligned buffer with timestamp
807  * @type: element type of the buffer
808  * @name: identifier name of the buffer
809  * @count: number of elements in the buffer
810  *
811  * Same as IIO_DECLARE_BUFFER_WITH_TS(), but is uses __aligned(IIO_DMA_MINALIGN)
812  * to ensure that the buffer doesn't share cachelines with anything that comes
813  * before it in a struct. This should not be used for stack-allocated buffers
814  * as stack memory cannot generally be used for DMA.
815  */
816 #define IIO_DECLARE_DMA_BUFFER_WITH_TS(type, name, count) \
817 	__IIO_DECLARE_BUFFER_WITH_TS(type, name, count) __aligned(IIO_DMA_MINALIGN)
818 
819 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv);
820 
821 /* The information at the returned address is guaranteed to be cacheline aligned */
iio_priv(const struct iio_dev * indio_dev)822 static inline void *iio_priv(const struct iio_dev *indio_dev)
823 {
824 	return ACCESS_PRIVATE(indio_dev, priv);
825 }
826 
827 void iio_device_free(struct iio_dev *indio_dev);
828 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv);
829 
830 #define devm_iio_trigger_alloc(parent, fmt, ...) \
831 	__devm_iio_trigger_alloc((parent), THIS_MODULE, (fmt), ##__VA_ARGS__)
832 __printf(3, 4)
833 struct iio_trigger *__devm_iio_trigger_alloc(struct device *parent,
834 					     struct module *this_mod,
835 					     const char *fmt, ...);
836 /**
837  * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry
838  * @indio_dev:		IIO device structure for device
839  **/
840 #if defined(CONFIG_DEBUG_FS)
841 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev);
842 #else
iio_get_debugfs_dentry(struct iio_dev * indio_dev)843 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
844 {
845 	return NULL;
846 }
847 #endif
848 
849 /**
850  * iio_device_suspend_triggering() - suspend trigger attached to an iio_dev
851  * @indio_dev: iio_dev associated with the device that will have triggers suspended
852  *
853  * Return 0 if successful, negative otherwise
854  **/
855 int iio_device_suspend_triggering(struct iio_dev *indio_dev);
856 
857 /**
858  * iio_device_resume_triggering() - resume trigger attached to an iio_dev
859  *	that was previously suspended with iio_device_suspend_triggering()
860  * @indio_dev: iio_dev associated with the device that will have triggers resumed
861  *
862  * Return 0 if successful, negative otherwise
863  **/
864 int iio_device_resume_triggering(struct iio_dev *indio_dev);
865 
866 #ifdef CONFIG_ACPI
867 bool iio_read_acpi_mount_matrix(struct device *dev,
868 				struct iio_mount_matrix *orientation,
869 				char *acpi_method);
870 const char *iio_get_acpi_device_name_and_data(struct device *dev, const void **data);
871 #else
iio_read_acpi_mount_matrix(struct device * dev,struct iio_mount_matrix * orientation,char * acpi_method)872 static inline bool iio_read_acpi_mount_matrix(struct device *dev,
873 					      struct iio_mount_matrix *orientation,
874 					      char *acpi_method)
875 {
876 	return false;
877 }
878 static inline const char *
iio_get_acpi_device_name_and_data(struct device * dev,const void ** data)879 iio_get_acpi_device_name_and_data(struct device *dev, const void **data)
880 {
881 	return NULL;
882 }
883 #endif
iio_get_acpi_device_name(struct device * dev)884 static inline const char *iio_get_acpi_device_name(struct device *dev)
885 {
886 	return iio_get_acpi_device_name_and_data(dev, NULL);
887 }
888 
889 /**
890  * iio_get_current_scan_type - Get the current scan type for a channel
891  * @indio_dev:	the IIO device to get the scan type for
892  * @chan:	the channel to get the scan type for
893  *
894  * Most devices only have one scan type per channel and can just access it
895  * directly without calling this function. Core IIO code and drivers that
896  * implement ext_scan_type in the channel spec should use this function to
897  * get the current scan type for a channel.
898  *
899  * Returns: the current scan type for the channel or error.
900  */
901 static inline const struct iio_scan_type
iio_get_current_scan_type(const struct iio_dev * indio_dev,const struct iio_chan_spec * chan)902 *iio_get_current_scan_type(const struct iio_dev *indio_dev,
903 			   const struct iio_chan_spec *chan)
904 {
905 	int ret;
906 
907 	if (chan->has_ext_scan_type) {
908 		ret = indio_dev->info->get_current_scan_type(indio_dev, chan);
909 		if (ret < 0)
910 			return ERR_PTR(ret);
911 
912 		if (ret >= chan->num_ext_scan_type)
913 			return ERR_PTR(-EINVAL);
914 
915 		return &chan->ext_scan_type[ret];
916 	}
917 
918 	return &chan->scan_type;
919 }
920 
921 /**
922  * iio_get_masklength - Get length of the channels mask
923  * @indio_dev: the IIO device to get the masklength for
924  */
iio_get_masklength(const struct iio_dev * indio_dev)925 static inline unsigned int iio_get_masklength(const struct iio_dev *indio_dev)
926 {
927 	return ACCESS_PRIVATE(indio_dev, masklength);
928 }
929 
930 int iio_active_scan_mask_index(struct iio_dev *indio_dev);
931 
932 /**
933  * iio_for_each_active_channel - Iterated over active channels
934  * @indio_dev: the IIO device
935  * @chan: Holds the index of the enabled channel
936  */
937 #define iio_for_each_active_channel(indio_dev, chan) \
938 	for_each_set_bit((chan), (indio_dev)->active_scan_mask, \
939 			 iio_get_masklength(indio_dev))
940 
941 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals);
942 
943 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer,
944 	int *fract);
945 
946 /**
947  * IIO_DEGREE_TO_RAD() - Convert degree to rad
948  * @deg: A value in degree
949  *
950  * Returns the given value converted from degree to rad
951  */
952 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL)
953 
954 /**
955  * IIO_RAD_TO_DEGREE() - Convert rad to degree
956  * @rad: A value in rad
957  *
958  * Returns the given value converted from rad to degree
959  */
960 #define IIO_RAD_TO_DEGREE(rad) \
961 	(((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL)
962 
963 /**
964  * IIO_G_TO_M_S_2() - Convert g to meter / second**2
965  * @g: A value in g
966  *
967  * Returns the given value converted from g to meter / second**2
968  */
969 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL)
970 
971 /**
972  * IIO_M_S_2_TO_G() - Convert meter / second**2 to g
973  * @ms2: A value in meter / second**2
974  *
975  * Returns the given value converted from meter / second**2 to g
976  */
977 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL)
978 
979 #endif /* _INDUSTRIAL_IO_H_ */
980