1 /* SPDX-License-Identifier: GPL-2.0-only */ 2 3 /* The industrial I/O core 4 * 5 * Copyright (c) 2008 Jonathan Cameron 6 */ 7 #ifndef _INDUSTRIAL_IO_H_ 8 #define _INDUSTRIAL_IO_H_ 9 10 #include <linux/device.h> 11 #include <linux/cdev.h> 12 #include <linux/iio/types.h> 13 #include <linux/of.h> 14 /* IIO TODO LIST */ 15 /* 16 * Provide means of adjusting timer accuracy. 17 * Currently assumes nano seconds. 18 */ 19 20 enum iio_shared_by { 21 IIO_SEPARATE, 22 IIO_SHARED_BY_TYPE, 23 IIO_SHARED_BY_DIR, 24 IIO_SHARED_BY_ALL 25 }; 26 27 enum iio_endian { 28 IIO_CPU, 29 IIO_BE, 30 IIO_LE, 31 }; 32 33 struct iio_chan_spec; 34 struct iio_dev; 35 36 /** 37 * struct iio_chan_spec_ext_info - Extended channel info attribute 38 * @name: Info attribute name 39 * @shared: Whether this attribute is shared between all channels. 40 * @read: Read callback for this info attribute, may be NULL. 41 * @write: Write callback for this info attribute, may be NULL. 42 * @private: Data private to the driver. 43 */ 44 struct iio_chan_spec_ext_info { 45 const char *name; 46 enum iio_shared_by shared; 47 ssize_t (*read)(struct iio_dev *, uintptr_t private, 48 struct iio_chan_spec const *, char *buf); 49 ssize_t (*write)(struct iio_dev *, uintptr_t private, 50 struct iio_chan_spec const *, const char *buf, 51 size_t len); 52 uintptr_t private; 53 }; 54 55 /** 56 * struct iio_enum - Enum channel info attribute 57 * @items: An array of strings. 58 * @num_items: Length of the item array. 59 * @set: Set callback function, may be NULL. 60 * @get: Get callback function, may be NULL. 61 * 62 * The iio_enum struct can be used to implement enum style channel attributes. 63 * Enum style attributes are those which have a set of strings which map to 64 * unsigned integer values. The IIO enum helper code takes care of mapping 65 * between value and string as well as generating a "_available" file which 66 * contains a list of all available items. The set callback will be called when 67 * the attribute is updated. The last parameter is the index to the newly 68 * activated item. The get callback will be used to query the currently active 69 * item and is supposed to return the index for it. 70 */ 71 struct iio_enum { 72 const char * const *items; 73 unsigned int num_items; 74 int (*set)(struct iio_dev *, const struct iio_chan_spec *, unsigned int); 75 int (*get)(struct iio_dev *, const struct iio_chan_spec *); 76 }; 77 78 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 79 uintptr_t priv, const struct iio_chan_spec *chan, char *buf); 80 ssize_t iio_enum_read(struct iio_dev *indio_dev, 81 uintptr_t priv, const struct iio_chan_spec *chan, char *buf); 82 ssize_t iio_enum_write(struct iio_dev *indio_dev, 83 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 84 size_t len); 85 86 /** 87 * IIO_ENUM() - Initialize enum extended channel attribute 88 * @_name: Attribute name 89 * @_shared: Whether the attribute is shared between all channels 90 * @_e: Pointer to an iio_enum struct 91 * 92 * This should usually be used together with IIO_ENUM_AVAILABLE() 93 */ 94 #define IIO_ENUM(_name, _shared, _e) \ 95 { \ 96 .name = (_name), \ 97 .shared = (_shared), \ 98 .read = iio_enum_read, \ 99 .write = iio_enum_write, \ 100 .private = (uintptr_t)(_e), \ 101 } 102 103 /** 104 * IIO_ENUM_AVAILABLE() - Initialize enum available extended channel attribute 105 * @_name: Attribute name ("_available" will be appended to the name) 106 * @_shared: Whether the attribute is shared between all channels 107 * @_e: Pointer to an iio_enum struct 108 * 109 * Creates a read only attribute which lists all the available enum items in a 110 * space separated list. This should usually be used together with IIO_ENUM() 111 */ 112 #define IIO_ENUM_AVAILABLE(_name, _shared, _e) \ 113 { \ 114 .name = (_name "_available"), \ 115 .shared = _shared, \ 116 .read = iio_enum_available_read, \ 117 .private = (uintptr_t)(_e), \ 118 } 119 120 /** 121 * struct iio_mount_matrix - iio mounting matrix 122 * @rotation: 3 dimensional space rotation matrix defining sensor alignment with 123 * main hardware 124 */ 125 struct iio_mount_matrix { 126 const char *rotation[9]; 127 }; 128 129 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 130 const struct iio_chan_spec *chan, char *buf); 131 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix); 132 133 typedef const struct iio_mount_matrix * 134 (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev, 135 const struct iio_chan_spec *chan); 136 137 /** 138 * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute 139 * @_shared: Whether the attribute is shared between all channels 140 * @_get: Pointer to an iio_get_mount_matrix_t accessor 141 */ 142 #define IIO_MOUNT_MATRIX(_shared, _get) \ 143 { \ 144 .name = "mount_matrix", \ 145 .shared = (_shared), \ 146 .read = iio_show_mount_matrix, \ 147 .private = (uintptr_t)(_get), \ 148 } 149 150 /** 151 * struct iio_event_spec - specification for a channel event 152 * @type: Type of the event 153 * @dir: Direction of the event 154 * @mask_separate: Bit mask of enum iio_event_info values. Attributes 155 * set in this mask will be registered per channel. 156 * @mask_shared_by_type: Bit mask of enum iio_event_info values. Attributes 157 * set in this mask will be shared by channel type. 158 * @mask_shared_by_dir: Bit mask of enum iio_event_info values. Attributes 159 * set in this mask will be shared by channel type and 160 * direction. 161 * @mask_shared_by_all: Bit mask of enum iio_event_info values. Attributes 162 * set in this mask will be shared by all channels. 163 */ 164 struct iio_event_spec { 165 enum iio_event_type type; 166 enum iio_event_direction dir; 167 unsigned long mask_separate; 168 unsigned long mask_shared_by_type; 169 unsigned long mask_shared_by_dir; 170 unsigned long mask_shared_by_all; 171 }; 172 173 /** 174 * struct iio_chan_spec - specification of a single channel 175 * @type: What type of measurement is the channel making. 176 * @channel: What number do we wish to assign the channel. 177 * @channel2: If there is a second number for a differential 178 * channel then this is it. If modified is set then the 179 * value here specifies the modifier. 180 * @address: Driver specific identifier. 181 * @scan_index: Monotonic index to give ordering in scans when read 182 * from a buffer. 183 * @scan_type: struct describing the scan type 184 * @scan_type.sign: 's' or 'u' to specify signed or unsigned 185 * @scan_type.realbits: Number of valid bits of data 186 * @scan_type.storagebits: Realbits + padding 187 * @scan_type.shift: Shift right by this before masking out 188 * realbits. 189 * @scan_type.repeat: Number of times real/storage bits repeats. 190 * When the repeat element is more than 1, then 191 * the type element in sysfs will show a repeat 192 * value. Otherwise, the number of repetitions 193 * is omitted. 194 * @scan_type.endianness: little or big endian 195 * @info_mask_separate: What information is to be exported that is specific to 196 * this channel. 197 * @info_mask_separate_available: What availability information is to be 198 * exported that is specific to this channel. 199 * @info_mask_shared_by_type: What information is to be exported that is shared 200 * by all channels of the same type. 201 * @info_mask_shared_by_type_available: What availability information is to be 202 * exported that is shared by all channels of the same 203 * type. 204 * @info_mask_shared_by_dir: What information is to be exported that is shared 205 * by all channels of the same direction. 206 * @info_mask_shared_by_dir_available: What availability information is to be 207 * exported that is shared by all channels of the same 208 * direction. 209 * @info_mask_shared_by_all: What information is to be exported that is shared 210 * by all channels. 211 * @info_mask_shared_by_all_available: What availability information is to be 212 * exported that is shared by all channels. 213 * @event_spec: Array of events which should be registered for this 214 * channel. 215 * @num_event_specs: Size of the event_spec array. 216 * @ext_info: Array of extended info attributes for this channel. 217 * The array is NULL terminated, the last element should 218 * have its name field set to NULL. 219 * @extend_name: Allows labeling of channel attributes with an 220 * informative name. Note this has no effect codes etc, 221 * unlike modifiers. 222 * @datasheet_name: A name used in in-kernel mapping of channels. It should 223 * correspond to the first name that the channel is referred 224 * to by in the datasheet (e.g. IND), or the nearest 225 * possible compound name (e.g. IND-INC). 226 * @modified: Does a modifier apply to this channel. What these are 227 * depends on the channel type. Modifier is set in 228 * channel2. Examples are IIO_MOD_X for axial sensors about 229 * the 'x' axis. 230 * @indexed: Specify the channel has a numerical index. If not, 231 * the channel index number will be suppressed for sysfs 232 * attributes but not for event codes. 233 * @output: Channel is output. 234 * @differential: Channel is differential. 235 */ 236 struct iio_chan_spec { 237 enum iio_chan_type type; 238 int channel; 239 int channel2; 240 unsigned long address; 241 int scan_index; 242 struct { 243 char sign; 244 u8 realbits; 245 u8 storagebits; 246 u8 shift; 247 u8 repeat; 248 enum iio_endian endianness; 249 } scan_type; 250 long info_mask_separate; 251 long info_mask_separate_available; 252 long info_mask_shared_by_type; 253 long info_mask_shared_by_type_available; 254 long info_mask_shared_by_dir; 255 long info_mask_shared_by_dir_available; 256 long info_mask_shared_by_all; 257 long info_mask_shared_by_all_available; 258 const struct iio_event_spec *event_spec; 259 unsigned int num_event_specs; 260 const struct iio_chan_spec_ext_info *ext_info; 261 const char *extend_name; 262 const char *datasheet_name; 263 unsigned modified:1; 264 unsigned indexed:1; 265 unsigned output:1; 266 unsigned differential:1; 267 }; 268 269 270 /** 271 * iio_channel_has_info() - Checks whether a channel supports a info attribute 272 * @chan: The channel to be queried 273 * @type: Type of the info attribute to be checked 274 * 275 * Returns true if the channels supports reporting values for the given info 276 * attribute type, false otherwise. 277 */ 278 static inline bool iio_channel_has_info(const struct iio_chan_spec *chan, 279 enum iio_chan_info_enum type) 280 { 281 return (chan->info_mask_separate & BIT(type)) | 282 (chan->info_mask_shared_by_type & BIT(type)) | 283 (chan->info_mask_shared_by_dir & BIT(type)) | 284 (chan->info_mask_shared_by_all & BIT(type)); 285 } 286 287 /** 288 * iio_channel_has_available() - Checks if a channel has an available attribute 289 * @chan: The channel to be queried 290 * @type: Type of the available attribute to be checked 291 * 292 * Returns true if the channel supports reporting available values for the 293 * given attribute type, false otherwise. 294 */ 295 static inline bool iio_channel_has_available(const struct iio_chan_spec *chan, 296 enum iio_chan_info_enum type) 297 { 298 return (chan->info_mask_separate_available & BIT(type)) | 299 (chan->info_mask_shared_by_type_available & BIT(type)) | 300 (chan->info_mask_shared_by_dir_available & BIT(type)) | 301 (chan->info_mask_shared_by_all_available & BIT(type)); 302 } 303 304 #define IIO_CHAN_SOFT_TIMESTAMP(_si) { \ 305 .type = IIO_TIMESTAMP, \ 306 .channel = -1, \ 307 .scan_index = _si, \ 308 .scan_type = { \ 309 .sign = 's', \ 310 .realbits = 64, \ 311 .storagebits = 64, \ 312 }, \ 313 } 314 315 s64 iio_get_time_ns(const struct iio_dev *indio_dev); 316 unsigned int iio_get_time_res(const struct iio_dev *indio_dev); 317 318 /* 319 * Device operating modes 320 * @INDIO_DIRECT_MODE: There is an access to either: 321 * a) The last single value available for devices that do not provide 322 * on-demand reads. 323 * b) A new value after performing an on-demand read otherwise. 324 * On most devices, this is a single-shot read. On some devices with data 325 * streams without an 'on-demand' function, this might also be the 'last value' 326 * feature. Above all, this mode internally means that we are not in any of the 327 * other modes, and sysfs reads should work. 328 * Device drivers should inform the core if they support this mode. 329 * @INDIO_BUFFER_TRIGGERED: Common mode when dealing with kfifo buffers. 330 * It indicates that an explicit trigger is required. This requests the core to 331 * attach a poll function when enabling the buffer, which is indicated by the 332 * _TRIGGERED suffix. 333 * The core will ensure this mode is set when registering a triggered buffer 334 * with iio_triggered_buffer_setup(). 335 * @INDIO_BUFFER_SOFTWARE: Another kfifo buffer mode, but not event triggered. 336 * No poll function can be attached because there is no triggered infrastructure 337 * we can use to cause capture. There is a kfifo that the driver will fill, but 338 * not "only one scan at a time". Typically, hardware will have a buffer that 339 * can hold multiple scans. Software may read one or more scans at a single time 340 * and push the available data to a Kfifo. This means the core will not attach 341 * any poll function when enabling the buffer. 342 * The core will ensure this mode is set when registering a simple kfifo buffer 343 * with devm_iio_kfifo_buffer_setup(). 344 * @INDIO_BUFFER_HARDWARE: For specific hardware, if unsure do not use this mode. 345 * Same as above but this time the buffer is not a kfifo where we have direct 346 * access to the data. Instead, the consumer driver must access the data through 347 * non software visible channels (or DMA when there is no demux possible in 348 * software) 349 * The core will ensure this mode is set when registering a dmaengine buffer 350 * with devm_iio_dmaengine_buffer_setup(). 351 * @INDIO_EVENT_TRIGGERED: Very unusual mode. 352 * Triggers usually refer to an external event which will start data capture. 353 * Here it is kind of the opposite as, a particular state of the data might 354 * produce an event which can be considered as an event. We don't necessarily 355 * have access to the data itself, but to the event produced. For example, this 356 * can be a threshold detector. The internal path of this mode is very close to 357 * the INDIO_BUFFER_TRIGGERED mode. 358 * The core will ensure this mode is set when registering a triggered event. 359 * @INDIO_HARDWARE_TRIGGERED: Very unusual mode. 360 * Here, triggers can result in data capture and can be routed to multiple 361 * hardware components, which make them close to regular triggers in the way 362 * they must be managed by the core, but without the entire interrupts/poll 363 * functions burden. Interrupts are irrelevant as the data flow is hardware 364 * mediated and distributed. 365 */ 366 #define INDIO_DIRECT_MODE 0x01 367 #define INDIO_BUFFER_TRIGGERED 0x02 368 #define INDIO_BUFFER_SOFTWARE 0x04 369 #define INDIO_BUFFER_HARDWARE 0x08 370 #define INDIO_EVENT_TRIGGERED 0x10 371 #define INDIO_HARDWARE_TRIGGERED 0x20 372 373 #define INDIO_ALL_BUFFER_MODES \ 374 (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE) 375 376 #define INDIO_ALL_TRIGGERED_MODES \ 377 (INDIO_BUFFER_TRIGGERED \ 378 | INDIO_EVENT_TRIGGERED \ 379 | INDIO_HARDWARE_TRIGGERED) 380 381 #define INDIO_MAX_RAW_ELEMENTS 4 382 383 struct iio_trigger; /* forward declaration */ 384 385 /** 386 * struct iio_info - constant information about device 387 * @event_attrs: event control attributes 388 * @attrs: general purpose device attributes 389 * @read_raw: function to request a value from the device. 390 * mask specifies which value. Note 0 means a reading of 391 * the channel in question. Return value will specify the 392 * type of value returned by the device. val and val2 will 393 * contain the elements making up the returned value. 394 * @read_raw_multi: function to return values from the device. 395 * mask specifies which value. Note 0 means a reading of 396 * the channel in question. Return value will specify the 397 * type of value returned by the device. vals pointer 398 * contain the elements making up the returned value. 399 * max_len specifies maximum number of elements 400 * vals pointer can contain. val_len is used to return 401 * length of valid elements in vals. 402 * @read_avail: function to return the available values from the device. 403 * mask specifies which value. Note 0 means the available 404 * values for the channel in question. Return value 405 * specifies if a IIO_AVAIL_LIST or a IIO_AVAIL_RANGE is 406 * returned in vals. The type of the vals are returned in 407 * type and the number of vals is returned in length. For 408 * ranges, there are always three vals returned; min, step 409 * and max. For lists, all possible values are enumerated. 410 * @write_raw: function to write a value to the device. 411 * Parameters are the same as for read_raw. 412 * @read_label: function to request label name for a specified label, 413 * for better channel identification. 414 * @write_raw_get_fmt: callback function to query the expected 415 * format/precision. If not set by the driver, write_raw 416 * returns IIO_VAL_INT_PLUS_MICRO. 417 * @read_event_config: find out if the event is enabled. 418 * @write_event_config: set if the event is enabled. 419 * @read_event_value: read a configuration value associated with the event. 420 * @write_event_value: write a configuration value for the event. 421 * @validate_trigger: function to validate the trigger when the 422 * current trigger gets changed. 423 * @update_scan_mode: function to configure device and scan buffer when 424 * channels have changed 425 * @debugfs_reg_access: function to read or write register value of device 426 * @of_xlate: function pointer to obtain channel specifier index. 427 * When #iio-cells is greater than '0', the driver could 428 * provide a custom of_xlate function that reads the 429 * *args* and returns the appropriate index in registered 430 * IIO channels array. 431 * @hwfifo_set_watermark: function pointer to set the current hardware 432 * fifo watermark level; see hwfifo_* entries in 433 * Documentation/ABI/testing/sysfs-bus-iio for details on 434 * how the hardware fifo operates 435 * @hwfifo_flush_to_buffer: function pointer to flush the samples stored 436 * in the hardware fifo to the device buffer. The driver 437 * should not flush more than count samples. The function 438 * must return the number of samples flushed, 0 if no 439 * samples were flushed or a negative integer if no samples 440 * were flushed and there was an error. 441 **/ 442 struct iio_info { 443 const struct attribute_group *event_attrs; 444 const struct attribute_group *attrs; 445 446 int (*read_raw)(struct iio_dev *indio_dev, 447 struct iio_chan_spec const *chan, 448 int *val, 449 int *val2, 450 long mask); 451 452 int (*read_raw_multi)(struct iio_dev *indio_dev, 453 struct iio_chan_spec const *chan, 454 int max_len, 455 int *vals, 456 int *val_len, 457 long mask); 458 459 int (*read_avail)(struct iio_dev *indio_dev, 460 struct iio_chan_spec const *chan, 461 const int **vals, 462 int *type, 463 int *length, 464 long mask); 465 466 int (*write_raw)(struct iio_dev *indio_dev, 467 struct iio_chan_spec const *chan, 468 int val, 469 int val2, 470 long mask); 471 472 int (*read_label)(struct iio_dev *indio_dev, 473 struct iio_chan_spec const *chan, 474 char *label); 475 476 int (*write_raw_get_fmt)(struct iio_dev *indio_dev, 477 struct iio_chan_spec const *chan, 478 long mask); 479 480 int (*read_event_config)(struct iio_dev *indio_dev, 481 const struct iio_chan_spec *chan, 482 enum iio_event_type type, 483 enum iio_event_direction dir); 484 485 int (*write_event_config)(struct iio_dev *indio_dev, 486 const struct iio_chan_spec *chan, 487 enum iio_event_type type, 488 enum iio_event_direction dir, 489 int state); 490 491 int (*read_event_value)(struct iio_dev *indio_dev, 492 const struct iio_chan_spec *chan, 493 enum iio_event_type type, 494 enum iio_event_direction dir, 495 enum iio_event_info info, int *val, int *val2); 496 497 int (*write_event_value)(struct iio_dev *indio_dev, 498 const struct iio_chan_spec *chan, 499 enum iio_event_type type, 500 enum iio_event_direction dir, 501 enum iio_event_info info, int val, int val2); 502 503 int (*validate_trigger)(struct iio_dev *indio_dev, 504 struct iio_trigger *trig); 505 int (*update_scan_mode)(struct iio_dev *indio_dev, 506 const unsigned long *scan_mask); 507 int (*debugfs_reg_access)(struct iio_dev *indio_dev, 508 unsigned reg, unsigned writeval, 509 unsigned *readval); 510 int (*of_xlate)(struct iio_dev *indio_dev, 511 const struct of_phandle_args *iiospec); 512 int (*hwfifo_set_watermark)(struct iio_dev *indio_dev, unsigned val); 513 int (*hwfifo_flush_to_buffer)(struct iio_dev *indio_dev, 514 unsigned count); 515 }; 516 517 /** 518 * struct iio_buffer_setup_ops - buffer setup related callbacks 519 * @preenable: [DRIVER] function to run prior to marking buffer enabled 520 * @postenable: [DRIVER] function to run after marking buffer enabled 521 * @predisable: [DRIVER] function to run prior to marking buffer 522 * disabled 523 * @postdisable: [DRIVER] function to run after marking buffer disabled 524 * @validate_scan_mask: [DRIVER] function callback to check whether a given 525 * scan mask is valid for the device. 526 */ 527 struct iio_buffer_setup_ops { 528 int (*preenable)(struct iio_dev *); 529 int (*postenable)(struct iio_dev *); 530 int (*predisable)(struct iio_dev *); 531 int (*postdisable)(struct iio_dev *); 532 bool (*validate_scan_mask)(struct iio_dev *indio_dev, 533 const unsigned long *scan_mask); 534 }; 535 536 /** 537 * struct iio_dev - industrial I/O device 538 * @modes: [DRIVER] bitmask listing all the operating modes 539 * supported by the IIO device. This list should be 540 * initialized before registering the IIO device. It can 541 * also be filed up by the IIO core, as a result of 542 * enabling particular features in the driver 543 * (see iio_triggered_event_setup()). 544 * @dev: [DRIVER] device structure, should be assigned a parent 545 * and owner 546 * @buffer: [DRIVER] any buffer present 547 * @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux 548 * @mlock: [INTERN] lock used to prevent simultaneous device state 549 * changes 550 * @available_scan_masks: [DRIVER] optional array of allowed bitmasks 551 * @masklength: [INTERN] the length of the mask established from 552 * channels 553 * @active_scan_mask: [INTERN] union of all scan masks requested by buffers 554 * @scan_timestamp: [INTERN] set if any buffers have requested timestamp 555 * @trig: [INTERN] current device trigger (buffer modes) 556 * @pollfunc: [DRIVER] function run on trigger being received 557 * @pollfunc_event: [DRIVER] function run on events trigger being received 558 * @channels: [DRIVER] channel specification structure table 559 * @num_channels: [DRIVER] number of channels specified in @channels. 560 * @name: [DRIVER] name of the device. 561 * @label: [DRIVER] unique name to identify which device this is 562 * @info: [DRIVER] callbacks and constant info from driver 563 * @setup_ops: [DRIVER] callbacks to call before and after buffer 564 * enable/disable 565 * @priv: [DRIVER] reference to driver's private information 566 * **MUST** be accessed **ONLY** via iio_priv() helper 567 */ 568 struct iio_dev { 569 int modes; 570 struct device dev; 571 572 struct iio_buffer *buffer; 573 int scan_bytes; 574 struct mutex mlock; 575 576 const unsigned long *available_scan_masks; 577 unsigned masklength; 578 const unsigned long *active_scan_mask; 579 bool scan_timestamp; 580 struct iio_trigger *trig; 581 struct iio_poll_func *pollfunc; 582 struct iio_poll_func *pollfunc_event; 583 584 struct iio_chan_spec const *channels; 585 int num_channels; 586 587 const char *name; 588 const char *label; 589 const struct iio_info *info; 590 const struct iio_buffer_setup_ops *setup_ops; 591 592 void *priv; 593 }; 594 595 int iio_device_id(struct iio_dev *indio_dev); 596 int iio_device_get_current_mode(struct iio_dev *indio_dev); 597 bool iio_buffer_enabled(struct iio_dev *indio_dev); 598 599 const struct iio_chan_spec 600 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si); 601 /** 602 * iio_device_register() - register a device with the IIO subsystem 603 * @indio_dev: Device structure filled by the device driver 604 **/ 605 #define iio_device_register(indio_dev) \ 606 __iio_device_register((indio_dev), THIS_MODULE) 607 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod); 608 void iio_device_unregister(struct iio_dev *indio_dev); 609 /** 610 * devm_iio_device_register - Resource-managed iio_device_register() 611 * @dev: Device to allocate iio_dev for 612 * @indio_dev: Device structure filled by the device driver 613 * 614 * Managed iio_device_register. The IIO device registered with this 615 * function is automatically unregistered on driver detach. This function 616 * calls iio_device_register() internally. Refer to that function for more 617 * information. 618 * 619 * RETURNS: 620 * 0 on success, negative error number on failure. 621 */ 622 #define devm_iio_device_register(dev, indio_dev) \ 623 __devm_iio_device_register((dev), (indio_dev), THIS_MODULE) 624 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 625 struct module *this_mod); 626 int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp); 627 int iio_device_claim_direct_mode(struct iio_dev *indio_dev); 628 void iio_device_release_direct_mode(struct iio_dev *indio_dev); 629 630 extern struct bus_type iio_bus_type; 631 632 /** 633 * iio_device_put() - reference counted deallocation of struct device 634 * @indio_dev: IIO device structure containing the device 635 **/ 636 static inline void iio_device_put(struct iio_dev *indio_dev) 637 { 638 if (indio_dev) 639 put_device(&indio_dev->dev); 640 } 641 642 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev); 643 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id); 644 645 /** 646 * dev_to_iio_dev() - Get IIO device struct from a device struct 647 * @dev: The device embedded in the IIO device 648 * 649 * Note: The device must be a IIO device, otherwise the result is undefined. 650 */ 651 static inline struct iio_dev *dev_to_iio_dev(struct device *dev) 652 { 653 return container_of(dev, struct iio_dev, dev); 654 } 655 656 /** 657 * iio_device_get() - increment reference count for the device 658 * @indio_dev: IIO device structure 659 * 660 * Returns: The passed IIO device 661 **/ 662 static inline struct iio_dev *iio_device_get(struct iio_dev *indio_dev) 663 { 664 return indio_dev ? dev_to_iio_dev(get_device(&indio_dev->dev)) : NULL; 665 } 666 667 /** 668 * iio_device_set_parent() - assign parent device to the IIO device object 669 * @indio_dev: IIO device structure 670 * @parent: reference to parent device object 671 * 672 * This utility must be called between IIO device allocation 673 * (via devm_iio_device_alloc()) & IIO device registration 674 * (via iio_device_register() and devm_iio_device_register())). 675 * By default, the device allocation will also assign a parent device to 676 * the IIO device object. In cases where devm_iio_device_alloc() is used, 677 * sometimes the parent device must be different than the device used to 678 * manage the allocation. 679 * In that case, this helper should be used to change the parent, hence the 680 * requirement to call this between allocation & registration. 681 **/ 682 static inline void iio_device_set_parent(struct iio_dev *indio_dev, 683 struct device *parent) 684 { 685 indio_dev->dev.parent = parent; 686 } 687 688 /** 689 * iio_device_set_drvdata() - Set device driver data 690 * @indio_dev: IIO device structure 691 * @data: Driver specific data 692 * 693 * Allows to attach an arbitrary pointer to an IIO device, which can later be 694 * retrieved by iio_device_get_drvdata(). 695 */ 696 static inline void iio_device_set_drvdata(struct iio_dev *indio_dev, void *data) 697 { 698 dev_set_drvdata(&indio_dev->dev, data); 699 } 700 701 /** 702 * iio_device_get_drvdata() - Get device driver data 703 * @indio_dev: IIO device structure 704 * 705 * Returns the data previously set with iio_device_set_drvdata() 706 */ 707 static inline void *iio_device_get_drvdata(const struct iio_dev *indio_dev) 708 { 709 return dev_get_drvdata(&indio_dev->dev); 710 } 711 712 /* Can we make this smaller? */ 713 #define IIO_ALIGN L1_CACHE_BYTES 714 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv); 715 716 /* The information at the returned address is guaranteed to be cacheline aligned */ 717 static inline void *iio_priv(const struct iio_dev *indio_dev) 718 { 719 return indio_dev->priv; 720 } 721 722 void iio_device_free(struct iio_dev *indio_dev); 723 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv); 724 __printf(2, 3) 725 struct iio_trigger *devm_iio_trigger_alloc(struct device *parent, 726 const char *fmt, ...); 727 728 /** 729 * iio_get_debugfs_dentry() - helper function to get the debugfs_dentry 730 * @indio_dev: IIO device structure for device 731 **/ 732 #if defined(CONFIG_DEBUG_FS) 733 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev); 734 #else 735 static inline struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 736 { 737 return NULL; 738 } 739 #endif 740 741 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals); 742 743 int iio_str_to_fixpoint(const char *str, int fract_mult, int *integer, 744 int *fract); 745 746 /** 747 * IIO_DEGREE_TO_RAD() - Convert degree to rad 748 * @deg: A value in degree 749 * 750 * Returns the given value converted from degree to rad 751 */ 752 #define IIO_DEGREE_TO_RAD(deg) (((deg) * 314159ULL + 9000000ULL) / 18000000ULL) 753 754 /** 755 * IIO_RAD_TO_DEGREE() - Convert rad to degree 756 * @rad: A value in rad 757 * 758 * Returns the given value converted from rad to degree 759 */ 760 #define IIO_RAD_TO_DEGREE(rad) \ 761 (((rad) * 18000000ULL + 314159ULL / 2) / 314159ULL) 762 763 /** 764 * IIO_G_TO_M_S_2() - Convert g to meter / second**2 765 * @g: A value in g 766 * 767 * Returns the given value converted from g to meter / second**2 768 */ 769 #define IIO_G_TO_M_S_2(g) ((g) * 980665ULL / 100000ULL) 770 771 /** 772 * IIO_M_S_2_TO_G() - Convert meter / second**2 to g 773 * @ms2: A value in meter / second**2 774 * 775 * Returns the given value converted from meter / second**2 to g 776 */ 777 #define IIO_M_S_2_TO_G(ms2) (((ms2) * 100000ULL + 980665ULL / 2) / 980665ULL) 778 779 #endif /* _INDUSTRIAL_IO_H_ */ 780