xref: /linux/drivers/iio/industrialio-core.c (revision f898c16a0624e7f2dcb0b1cda6916c9be6489197)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The industrial I/O core
4  *
5  * Copyright (c) 2008 Jonathan Cameron
6  *
7  * Based on elements of hwmon and input subsystems.
8  */
9 
10 #define pr_fmt(fmt) "iio-core: " fmt
11 
12 #include <linux/anon_inodes.h>
13 #include <linux/cdev.h>
14 #include <linux/cleanup.h>
15 #include <linux/debugfs.h>
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/idr.h>
20 #include <linux/kdev_t.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/poll.h>
25 #include <linux/property.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/wait.h>
29 
30 #include <linux/iio/buffer.h>
31 #include <linux/iio/buffer_impl.h>
32 #include <linux/iio/events.h>
33 #include <linux/iio/iio-opaque.h>
34 #include <linux/iio/iio.h>
35 #include <linux/iio/sysfs.h>
36 
37 #include "iio_core.h"
38 #include "iio_core_trigger.h"
39 
40 /* IDA to assign each registered device a unique id */
41 static DEFINE_IDA(iio_ida);
42 
43 static dev_t iio_devt;
44 
45 #define IIO_DEV_MAX 256
46 const struct bus_type iio_bus_type = {
47 	.name = "iio",
48 };
49 EXPORT_SYMBOL(iio_bus_type);
50 
51 static struct dentry *iio_debugfs_dentry;
52 
53 static const char * const iio_direction[] = {
54 	[0] = "in",
55 	[1] = "out",
56 };
57 
58 static const char * const iio_chan_type_name_spec[] = {
59 	[IIO_VOLTAGE] = "voltage",
60 	[IIO_CURRENT] = "current",
61 	[IIO_POWER] = "power",
62 	[IIO_ACCEL] = "accel",
63 	[IIO_ANGL_VEL] = "anglvel",
64 	[IIO_MAGN] = "magn",
65 	[IIO_LIGHT] = "illuminance",
66 	[IIO_INTENSITY] = "intensity",
67 	[IIO_PROXIMITY] = "proximity",
68 	[IIO_TEMP] = "temp",
69 	[IIO_INCLI] = "incli",
70 	[IIO_ROT] = "rot",
71 	[IIO_ANGL] = "angl",
72 	[IIO_TIMESTAMP] = "timestamp",
73 	[IIO_CAPACITANCE] = "capacitance",
74 	[IIO_ALTVOLTAGE] = "altvoltage",
75 	[IIO_CCT] = "cct",
76 	[IIO_PRESSURE] = "pressure",
77 	[IIO_HUMIDITYRELATIVE] = "humidityrelative",
78 	[IIO_ACTIVITY] = "activity",
79 	[IIO_STEPS] = "steps",
80 	[IIO_ENERGY] = "energy",
81 	[IIO_DISTANCE] = "distance",
82 	[IIO_VELOCITY] = "velocity",
83 	[IIO_CONCENTRATION] = "concentration",
84 	[IIO_RESISTANCE] = "resistance",
85 	[IIO_PH] = "ph",
86 	[IIO_UVINDEX] = "uvindex",
87 	[IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
88 	[IIO_COUNT] = "count",
89 	[IIO_INDEX] = "index",
90 	[IIO_GRAVITY]  = "gravity",
91 	[IIO_POSITIONRELATIVE]  = "positionrelative",
92 	[IIO_PHASE] = "phase",
93 	[IIO_MASSCONCENTRATION] = "massconcentration",
94 	[IIO_DELTA_ANGL] = "deltaangl",
95 	[IIO_DELTA_VELOCITY] = "deltavelocity",
96 	[IIO_COLORTEMP] = "colortemp",
97 	[IIO_CHROMATICITY] = "chromaticity",
98 };
99 
100 static const char * const iio_modifier_names[] = {
101 	[IIO_MOD_X] = "x",
102 	[IIO_MOD_Y] = "y",
103 	[IIO_MOD_Z] = "z",
104 	[IIO_MOD_X_AND_Y] = "x&y",
105 	[IIO_MOD_X_AND_Z] = "x&z",
106 	[IIO_MOD_Y_AND_Z] = "y&z",
107 	[IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
108 	[IIO_MOD_X_OR_Y] = "x|y",
109 	[IIO_MOD_X_OR_Z] = "x|z",
110 	[IIO_MOD_Y_OR_Z] = "y|z",
111 	[IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
112 	[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
113 	[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
114 	[IIO_MOD_LIGHT_BOTH] = "both",
115 	[IIO_MOD_LIGHT_IR] = "ir",
116 	[IIO_MOD_LIGHT_CLEAR] = "clear",
117 	[IIO_MOD_LIGHT_RED] = "red",
118 	[IIO_MOD_LIGHT_GREEN] = "green",
119 	[IIO_MOD_LIGHT_BLUE] = "blue",
120 	[IIO_MOD_LIGHT_UV] = "uv",
121 	[IIO_MOD_LIGHT_UVA] = "uva",
122 	[IIO_MOD_LIGHT_UVB] = "uvb",
123 	[IIO_MOD_LIGHT_DUV] = "duv",
124 	[IIO_MOD_QUATERNION] = "quaternion",
125 	[IIO_MOD_TEMP_AMBIENT] = "ambient",
126 	[IIO_MOD_TEMP_OBJECT] = "object",
127 	[IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
128 	[IIO_MOD_NORTH_TRUE] = "from_north_true",
129 	[IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
130 	[IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
131 	[IIO_MOD_RUNNING] = "running",
132 	[IIO_MOD_JOGGING] = "jogging",
133 	[IIO_MOD_WALKING] = "walking",
134 	[IIO_MOD_STILL] = "still",
135 	[IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
136 	[IIO_MOD_I] = "i",
137 	[IIO_MOD_Q] = "q",
138 	[IIO_MOD_CO2] = "co2",
139 	[IIO_MOD_VOC] = "voc",
140 	[IIO_MOD_PM1] = "pm1",
141 	[IIO_MOD_PM2P5] = "pm2p5",
142 	[IIO_MOD_PM4] = "pm4",
143 	[IIO_MOD_PM10] = "pm10",
144 	[IIO_MOD_ETHANOL] = "ethanol",
145 	[IIO_MOD_H2] = "h2",
146 	[IIO_MOD_O2] = "o2",
147 	[IIO_MOD_LINEAR_X] = "linear_x",
148 	[IIO_MOD_LINEAR_Y] = "linear_y",
149 	[IIO_MOD_LINEAR_Z] = "linear_z",
150 	[IIO_MOD_PITCH] = "pitch",
151 	[IIO_MOD_YAW] = "yaw",
152 	[IIO_MOD_ROLL] = "roll",
153 };
154 
155 /* relies on pairs of these shared then separate */
156 static const char * const iio_chan_info_postfix[] = {
157 	[IIO_CHAN_INFO_RAW] = "raw",
158 	[IIO_CHAN_INFO_PROCESSED] = "input",
159 	[IIO_CHAN_INFO_SCALE] = "scale",
160 	[IIO_CHAN_INFO_OFFSET] = "offset",
161 	[IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
162 	[IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
163 	[IIO_CHAN_INFO_PEAK] = "peak_raw",
164 	[IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
165 	[IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
166 	[IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
167 	[IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
168 	= "filter_low_pass_3db_frequency",
169 	[IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY]
170 	= "filter_high_pass_3db_frequency",
171 	[IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
172 	[IIO_CHAN_INFO_FREQUENCY] = "frequency",
173 	[IIO_CHAN_INFO_PHASE] = "phase",
174 	[IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
175 	[IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
176 	[IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative",
177 	[IIO_CHAN_INFO_INT_TIME] = "integration_time",
178 	[IIO_CHAN_INFO_ENABLE] = "en",
179 	[IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight",
180 	[IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight",
181 	[IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count",
182 	[IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
183 	[IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
184 	[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
185 	[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
186 	[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
187 	[IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
188 	[IIO_CHAN_INFO_TROUGH] = "trough_raw",
189 };
190 /**
191  * iio_device_id() - query the unique ID for the device
192  * @indio_dev:		Device structure whose ID is being queried
193  *
194  * The IIO device ID is a unique index used for example for the naming
195  * of the character device /dev/iio\:device[ID].
196  *
197  * Returns: Unique ID for the device.
198  */
199 int iio_device_id(struct iio_dev *indio_dev)
200 {
201 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
202 
203 	return iio_dev_opaque->id;
204 }
205 EXPORT_SYMBOL_GPL(iio_device_id);
206 
207 /**
208  * iio_buffer_enabled() - helper function to test if the buffer is enabled
209  * @indio_dev:		IIO device structure for device
210  *
211  * Returns: True, if the buffer is enabled.
212  */
213 bool iio_buffer_enabled(struct iio_dev *indio_dev)
214 {
215 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
216 
217 	return iio_dev_opaque->currentmode & INDIO_ALL_BUFFER_MODES;
218 }
219 EXPORT_SYMBOL_GPL(iio_buffer_enabled);
220 
221 #if defined(CONFIG_DEBUG_FS)
222 /*
223  * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
224  * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined
225  */
226 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
227 {
228 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
229 
230 	return iio_dev_opaque->debugfs_dentry;
231 }
232 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
233 #endif
234 
235 /**
236  * iio_find_channel_from_si() - get channel from its scan index
237  * @indio_dev:		device
238  * @si:			scan index to match
239  *
240  * Returns:
241  * Constant pointer to iio_chan_spec, if scan index matches, NULL on failure.
242  */
243 const struct iio_chan_spec
244 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
245 {
246 	int i;
247 
248 	for (i = 0; i < indio_dev->num_channels; i++)
249 		if (indio_dev->channels[i].scan_index == si)
250 			return &indio_dev->channels[i];
251 	return NULL;
252 }
253 
254 /* This turns up an awful lot */
255 ssize_t iio_read_const_attr(struct device *dev,
256 			    struct device_attribute *attr,
257 			    char *buf)
258 {
259 	return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string);
260 }
261 EXPORT_SYMBOL(iio_read_const_attr);
262 
263 /**
264  * iio_device_set_clock() - Set current timestamping clock for the device
265  * @indio_dev: IIO device structure containing the device
266  * @clock_id: timestamping clock POSIX identifier to set.
267  *
268  * Returns: 0 on success, or a negative error code.
269  */
270 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
271 {
272 	int ret;
273 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
274 	const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
275 
276 	ret = mutex_lock_interruptible(&iio_dev_opaque->mlock);
277 	if (ret)
278 		return ret;
279 	if ((ev_int && iio_event_enabled(ev_int)) ||
280 	    iio_buffer_enabled(indio_dev)) {
281 		mutex_unlock(&iio_dev_opaque->mlock);
282 		return -EBUSY;
283 	}
284 	iio_dev_opaque->clock_id = clock_id;
285 	mutex_unlock(&iio_dev_opaque->mlock);
286 
287 	return 0;
288 }
289 EXPORT_SYMBOL(iio_device_set_clock);
290 
291 /**
292  * iio_device_get_clock() - Retrieve current timestamping clock for the device
293  * @indio_dev: IIO device structure containing the device
294  *
295  * Returns: Clock ID of the current timestamping clock for the device.
296  */
297 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
298 {
299 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
300 
301 	return iio_dev_opaque->clock_id;
302 }
303 EXPORT_SYMBOL(iio_device_get_clock);
304 
305 /**
306  * iio_get_time_ns() - utility function to get a time stamp for events etc
307  * @indio_dev: device
308  *
309  * Returns: Timestamp of the event in nanoseconds.
310  */
311 s64 iio_get_time_ns(const struct iio_dev *indio_dev)
312 {
313 	struct timespec64 tp;
314 
315 	switch (iio_device_get_clock(indio_dev)) {
316 	case CLOCK_REALTIME:
317 		return ktime_get_real_ns();
318 	case CLOCK_MONOTONIC:
319 		return ktime_get_ns();
320 	case CLOCK_MONOTONIC_RAW:
321 		return ktime_get_raw_ns();
322 	case CLOCK_REALTIME_COARSE:
323 		return ktime_to_ns(ktime_get_coarse_real());
324 	case CLOCK_MONOTONIC_COARSE:
325 		ktime_get_coarse_ts64(&tp);
326 		return timespec64_to_ns(&tp);
327 	case CLOCK_BOOTTIME:
328 		return ktime_get_boottime_ns();
329 	case CLOCK_TAI:
330 		return ktime_get_clocktai_ns();
331 	default:
332 		BUG();
333 	}
334 }
335 EXPORT_SYMBOL(iio_get_time_ns);
336 
337 static int __init iio_init(void)
338 {
339 	int ret;
340 
341 	/* Register sysfs bus */
342 	ret  = bus_register(&iio_bus_type);
343 	if (ret < 0) {
344 		pr_err("could not register bus type\n");
345 		goto error_nothing;
346 	}
347 
348 	ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
349 	if (ret < 0) {
350 		pr_err("failed to allocate char dev region\n");
351 		goto error_unregister_bus_type;
352 	}
353 
354 	iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
355 
356 	return 0;
357 
358 error_unregister_bus_type:
359 	bus_unregister(&iio_bus_type);
360 error_nothing:
361 	return ret;
362 }
363 
364 static void __exit iio_exit(void)
365 {
366 	if (iio_devt)
367 		unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
368 	bus_unregister(&iio_bus_type);
369 	debugfs_remove(iio_debugfs_dentry);
370 }
371 
372 #if defined(CONFIG_DEBUG_FS)
373 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
374 			      size_t count, loff_t *ppos)
375 {
376 	struct iio_dev *indio_dev = file->private_data;
377 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
378 	unsigned int val = 0;
379 	int ret;
380 
381 	if (*ppos > 0)
382 		return simple_read_from_buffer(userbuf, count, ppos,
383 					       iio_dev_opaque->read_buf,
384 					       iio_dev_opaque->read_buf_len);
385 
386 	ret = indio_dev->info->debugfs_reg_access(indio_dev,
387 						  iio_dev_opaque->cached_reg_addr,
388 						  0, &val);
389 	if (ret) {
390 		dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
391 		return ret;
392 	}
393 
394 	iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf,
395 						sizeof(iio_dev_opaque->read_buf),
396 						"0x%X\n", val);
397 
398 	return simple_read_from_buffer(userbuf, count, ppos,
399 				       iio_dev_opaque->read_buf,
400 				       iio_dev_opaque->read_buf_len);
401 }
402 
403 static ssize_t iio_debugfs_write_reg(struct file *file,
404 		     const char __user *userbuf, size_t count, loff_t *ppos)
405 {
406 	struct iio_dev *indio_dev = file->private_data;
407 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
408 	unsigned int reg, val;
409 	char buf[80];
410 	int ret;
411 
412 	count = min(count, sizeof(buf) - 1);
413 	if (copy_from_user(buf, userbuf, count))
414 		return -EFAULT;
415 
416 	buf[count] = 0;
417 
418 	ret = sscanf(buf, "%i %i", &reg, &val);
419 
420 	switch (ret) {
421 	case 1:
422 		iio_dev_opaque->cached_reg_addr = reg;
423 		break;
424 	case 2:
425 		iio_dev_opaque->cached_reg_addr = reg;
426 		ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
427 							  val, NULL);
428 		if (ret) {
429 			dev_err(indio_dev->dev.parent, "%s: write failed\n",
430 				__func__);
431 			return ret;
432 		}
433 		break;
434 	default:
435 		return -EINVAL;
436 	}
437 
438 	return count;
439 }
440 
441 static const struct file_operations iio_debugfs_reg_fops = {
442 	.open = simple_open,
443 	.read = iio_debugfs_read_reg,
444 	.write = iio_debugfs_write_reg,
445 };
446 
447 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
448 {
449 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
450 
451 	debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
452 }
453 
454 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
455 {
456 	struct iio_dev_opaque *iio_dev_opaque;
457 
458 	if (indio_dev->info->debugfs_reg_access == NULL)
459 		return;
460 
461 	if (!iio_debugfs_dentry)
462 		return;
463 
464 	iio_dev_opaque = to_iio_dev_opaque(indio_dev);
465 
466 	iio_dev_opaque->debugfs_dentry =
467 		debugfs_create_dir(dev_name(&indio_dev->dev),
468 				   iio_debugfs_dentry);
469 
470 	debugfs_create_file("direct_reg_access", 0644,
471 			    iio_dev_opaque->debugfs_dentry, indio_dev,
472 			    &iio_debugfs_reg_fops);
473 }
474 #else
475 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
476 {
477 }
478 
479 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
480 {
481 }
482 #endif /* CONFIG_DEBUG_FS */
483 
484 static ssize_t iio_read_channel_ext_info(struct device *dev,
485 				     struct device_attribute *attr,
486 				     char *buf)
487 {
488 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
489 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
490 	const struct iio_chan_spec_ext_info *ext_info;
491 
492 	ext_info = &this_attr->c->ext_info[this_attr->address];
493 
494 	return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
495 }
496 
497 static ssize_t iio_write_channel_ext_info(struct device *dev,
498 				     struct device_attribute *attr,
499 				     const char *buf, size_t len)
500 {
501 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
502 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
503 	const struct iio_chan_spec_ext_info *ext_info;
504 
505 	ext_info = &this_attr->c->ext_info[this_attr->address];
506 
507 	return ext_info->write(indio_dev, ext_info->private,
508 			       this_attr->c, buf, len);
509 }
510 
511 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
512 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
513 {
514 	const struct iio_enum *e = (const struct iio_enum *)priv;
515 	unsigned int i;
516 	size_t len = 0;
517 
518 	if (!e->num_items)
519 		return 0;
520 
521 	for (i = 0; i < e->num_items; ++i) {
522 		if (!e->items[i])
523 			continue;
524 		len += sysfs_emit_at(buf, len, "%s ", e->items[i]);
525 	}
526 
527 	/* replace last space with a newline */
528 	buf[len - 1] = '\n';
529 
530 	return len;
531 }
532 EXPORT_SYMBOL_GPL(iio_enum_available_read);
533 
534 ssize_t iio_enum_read(struct iio_dev *indio_dev,
535 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
536 {
537 	const struct iio_enum *e = (const struct iio_enum *)priv;
538 	int i;
539 
540 	if (!e->get)
541 		return -EINVAL;
542 
543 	i = e->get(indio_dev, chan);
544 	if (i < 0)
545 		return i;
546 	if (i >= e->num_items || !e->items[i])
547 		return -EINVAL;
548 
549 	return sysfs_emit(buf, "%s\n", e->items[i]);
550 }
551 EXPORT_SYMBOL_GPL(iio_enum_read);
552 
553 ssize_t iio_enum_write(struct iio_dev *indio_dev,
554 	uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
555 	size_t len)
556 {
557 	const struct iio_enum *e = (const struct iio_enum *)priv;
558 	int ret;
559 
560 	if (!e->set)
561 		return -EINVAL;
562 
563 	ret = __sysfs_match_string(e->items, e->num_items, buf);
564 	if (ret < 0)
565 		return ret;
566 
567 	ret = e->set(indio_dev, chan, ret);
568 	return ret ? ret : len;
569 }
570 EXPORT_SYMBOL_GPL(iio_enum_write);
571 
572 static const struct iio_mount_matrix iio_mount_idmatrix = {
573 	.rotation = {
574 		"1", "0", "0",
575 		"0", "1", "0",
576 		"0", "0", "1"
577 	}
578 };
579 
580 static int iio_setup_mount_idmatrix(const struct device *dev,
581 				    struct iio_mount_matrix *matrix)
582 {
583 	*matrix = iio_mount_idmatrix;
584 	dev_info(dev, "mounting matrix not found: using identity...\n");
585 	return 0;
586 }
587 
588 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
589 			      const struct iio_chan_spec *chan, char *buf)
590 {
591 	const struct iio_mount_matrix *mtx;
592 
593 	mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan);
594 	if (IS_ERR(mtx))
595 		return PTR_ERR(mtx);
596 
597 	if (!mtx)
598 		mtx = &iio_mount_idmatrix;
599 
600 	return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
601 			  mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
602 			  mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
603 			  mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
604 }
605 EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
606 
607 /**
608  * iio_read_mount_matrix() - retrieve iio device mounting matrix from
609  *                           device "mount-matrix" property
610  * @dev:	device the mounting matrix property is assigned to
611  * @matrix:	where to store retrieved matrix
612  *
613  * If device is assigned no mounting matrix property, a default 3x3 identity
614  * matrix will be filled in.
615  *
616  * Returns: 0 if success, or a negative error code on failure.
617  */
618 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
619 {
620 	size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
621 	int err;
622 
623 	err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len);
624 	if (err == len)
625 		return 0;
626 
627 	if (err >= 0)
628 		/* Invalid number of matrix entries. */
629 		return -EINVAL;
630 
631 	if (err != -EINVAL)
632 		/* Invalid matrix declaration format. */
633 		return err;
634 
635 	/* Matrix was not declared at all: fallback to identity. */
636 	return iio_setup_mount_idmatrix(dev, matrix);
637 }
638 EXPORT_SYMBOL(iio_read_mount_matrix);
639 
640 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
641 				  int size, const int *vals)
642 {
643 	int tmp0, tmp1;
644 	s64 tmp2;
645 	bool scale_db = false;
646 
647 	switch (type) {
648 	case IIO_VAL_INT:
649 		return sysfs_emit_at(buf, offset, "%d", vals[0]);
650 	case IIO_VAL_INT_PLUS_MICRO_DB:
651 		scale_db = true;
652 		fallthrough;
653 	case IIO_VAL_INT_PLUS_MICRO:
654 		if (vals[1] < 0)
655 			return sysfs_emit_at(buf, offset, "-%d.%06u%s",
656 					     abs(vals[0]), -vals[1],
657 					     scale_db ? " dB" : "");
658 		else
659 			return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0],
660 					     vals[1], scale_db ? " dB" : "");
661 	case IIO_VAL_INT_PLUS_NANO:
662 		if (vals[1] < 0)
663 			return sysfs_emit_at(buf, offset, "-%d.%09u",
664 					     abs(vals[0]), -vals[1]);
665 		else
666 			return sysfs_emit_at(buf, offset, "%d.%09u", vals[0],
667 					     vals[1]);
668 	case IIO_VAL_FRACTIONAL:
669 		tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
670 		tmp1 = vals[1];
671 		tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
672 		if ((tmp2 < 0) && (tmp0 == 0))
673 			return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
674 		else
675 			return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
676 					     abs(tmp1));
677 	case IIO_VAL_FRACTIONAL_LOG2:
678 		tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
679 		tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1);
680 		if (tmp0 == 0 && tmp2 < 0)
681 			return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
682 		else
683 			return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
684 					     abs(tmp1));
685 	case IIO_VAL_INT_MULTIPLE:
686 	{
687 		int i;
688 		int l = 0;
689 
690 		for (i = 0; i < size; ++i)
691 			l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]);
692 		return l;
693 	}
694 	case IIO_VAL_CHAR:
695 		return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
696 	case IIO_VAL_INT_64:
697 		tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]);
698 		return sysfs_emit_at(buf, offset, "%lld", tmp2);
699 	default:
700 		return 0;
701 	}
702 }
703 
704 /**
705  * iio_format_value() - Formats a IIO value into its string representation
706  * @buf:	The buffer to which the formatted value gets written
707  *		which is assumed to be big enough (i.e. PAGE_SIZE).
708  * @type:	One of the IIO_VAL_* constants. This decides how the val
709  *		and val2 parameters are formatted.
710  * @size:	Number of IIO value entries contained in vals
711  * @vals:	Pointer to the values, exact meaning depends on the
712  *		type parameter.
713  *
714  * Returns:
715  * 0 by default, a negative number on failure or the total number of characters
716  * written for a type that belongs to the IIO_VAL_* constant.
717  */
718 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
719 {
720 	ssize_t len;
721 
722 	len = __iio_format_value(buf, 0, type, size, vals);
723 	if (len >= PAGE_SIZE - 1)
724 		return -EFBIG;
725 
726 	return len + sysfs_emit_at(buf, len, "\n");
727 }
728 EXPORT_SYMBOL_GPL(iio_format_value);
729 
730 static ssize_t iio_read_channel_label(struct device *dev,
731 				      struct device_attribute *attr,
732 				      char *buf)
733 {
734 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
735 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
736 
737 	if (indio_dev->info->read_label)
738 		return indio_dev->info->read_label(indio_dev, this_attr->c, buf);
739 
740 	if (this_attr->c->extend_name)
741 		return sysfs_emit(buf, "%s\n", this_attr->c->extend_name);
742 
743 	return -EINVAL;
744 }
745 
746 static ssize_t iio_read_channel_info(struct device *dev,
747 				     struct device_attribute *attr,
748 				     char *buf)
749 {
750 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
751 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
752 	int vals[INDIO_MAX_RAW_ELEMENTS];
753 	int ret;
754 	int val_len = 2;
755 
756 	if (indio_dev->info->read_raw_multi)
757 		ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c,
758 							INDIO_MAX_RAW_ELEMENTS,
759 							vals, &val_len,
760 							this_attr->address);
761 	else
762 		ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
763 				    &vals[0], &vals[1], this_attr->address);
764 
765 	if (ret < 0)
766 		return ret;
767 
768 	return iio_format_value(buf, ret, val_len, vals);
769 }
770 
771 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
772 			       const char *prefix, const char *suffix)
773 {
774 	ssize_t len;
775 	int stride;
776 	int i;
777 
778 	switch (type) {
779 	case IIO_VAL_INT:
780 		stride = 1;
781 		break;
782 	default:
783 		stride = 2;
784 		break;
785 	}
786 
787 	len = sysfs_emit(buf, prefix);
788 
789 	for (i = 0; i <= length - stride; i += stride) {
790 		if (i != 0) {
791 			len += sysfs_emit_at(buf, len, " ");
792 			if (len >= PAGE_SIZE)
793 				return -EFBIG;
794 		}
795 
796 		len += __iio_format_value(buf, len, type, stride, &vals[i]);
797 		if (len >= PAGE_SIZE)
798 			return -EFBIG;
799 	}
800 
801 	len += sysfs_emit_at(buf, len, "%s\n", suffix);
802 
803 	return len;
804 }
805 
806 static ssize_t iio_format_avail_list(char *buf, const int *vals,
807 				     int type, int length)
808 {
809 
810 	return iio_format_list(buf, vals, type, length, "", "");
811 }
812 
813 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
814 {
815 	int length;
816 
817 	/*
818 	 * length refers to the array size , not the number of elements.
819 	 * The purpose is to print the range [min , step ,max] so length should
820 	 * be 3 in case of int, and 6 for other types.
821 	 */
822 	switch (type) {
823 	case IIO_VAL_INT:
824 		length = 3;
825 		break;
826 	default:
827 		length = 6;
828 		break;
829 	}
830 
831 	return iio_format_list(buf, vals, type, length, "[", "]");
832 }
833 
834 static ssize_t iio_read_channel_info_avail(struct device *dev,
835 					   struct device_attribute *attr,
836 					   char *buf)
837 {
838 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
839 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
840 	const int *vals;
841 	int ret;
842 	int length;
843 	int type;
844 
845 	ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
846 					  &vals, &type, &length,
847 					  this_attr->address);
848 
849 	if (ret < 0)
850 		return ret;
851 	switch (ret) {
852 	case IIO_AVAIL_LIST:
853 		return iio_format_avail_list(buf, vals, type, length);
854 	case IIO_AVAIL_RANGE:
855 		return iio_format_avail_range(buf, vals, type);
856 	default:
857 		return -EINVAL;
858 	}
859 }
860 
861 /**
862  * __iio_str_to_fixpoint() - Parse a fixed-point number from a string
863  * @str: The string to parse
864  * @fract_mult: Multiplier for the first decimal place, should be a power of 10
865  * @integer: The integer part of the number
866  * @fract: The fractional part of the number
867  * @scale_db: True if this should parse as dB
868  *
869  * Returns:
870  * 0 on success, or a negative error code if the string could not be parsed.
871  */
872 static int __iio_str_to_fixpoint(const char *str, int fract_mult,
873 				 int *integer, int *fract, bool scale_db)
874 {
875 	int i = 0, f = 0;
876 	bool integer_part = true, negative = false;
877 
878 	if (fract_mult == 0) {
879 		*fract = 0;
880 
881 		return kstrtoint(str, 0, integer);
882 	}
883 
884 	if (str[0] == '-') {
885 		negative = true;
886 		str++;
887 	} else if (str[0] == '+') {
888 		str++;
889 	}
890 
891 	while (*str) {
892 		if ('0' <= *str && *str <= '9') {
893 			if (integer_part) {
894 				i = i * 10 + *str - '0';
895 			} else {
896 				f += fract_mult * (*str - '0');
897 				fract_mult /= 10;
898 			}
899 		} else if (*str == '\n') {
900 			if (*(str + 1) == '\0')
901 				break;
902 			return -EINVAL;
903 		} else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
904 			/* Ignore the dB suffix */
905 			str += sizeof(" dB") - 1;
906 			continue;
907 		} else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) {
908 			/* Ignore the dB suffix */
909 			str += sizeof("dB") - 1;
910 			continue;
911 		} else if (*str == '.' && integer_part) {
912 			integer_part = false;
913 		} else {
914 			return -EINVAL;
915 		}
916 		str++;
917 	}
918 
919 	if (negative) {
920 		if (i)
921 			i = -i;
922 		else
923 			f = -f;
924 	}
925 
926 	*integer = i;
927 	*fract = f;
928 
929 	return 0;
930 }
931 
932 /**
933  * iio_str_to_fixpoint() - Parse a fixed-point number from a string
934  * @str: The string to parse
935  * @fract_mult: Multiplier for the first decimal place, should be a power of 10
936  * @integer: The integer part of the number
937  * @fract: The fractional part of the number
938  *
939  * Returns:
940  * 0 on success, or a negative error code if the string could not be parsed.
941  */
942 int iio_str_to_fixpoint(const char *str, int fract_mult,
943 			int *integer, int *fract)
944 {
945 	return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false);
946 }
947 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
948 
949 static ssize_t iio_write_channel_info(struct device *dev,
950 				      struct device_attribute *attr,
951 				      const char *buf,
952 				      size_t len)
953 {
954 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
955 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
956 	int ret, fract_mult = 100000;
957 	int integer, fract = 0;
958 	bool is_char = false;
959 	bool scale_db = false;
960 
961 	/* Assumes decimal - precision based on number of digits */
962 	if (!indio_dev->info->write_raw)
963 		return -EINVAL;
964 
965 	if (indio_dev->info->write_raw_get_fmt)
966 		switch (indio_dev->info->write_raw_get_fmt(indio_dev,
967 			this_attr->c, this_attr->address)) {
968 		case IIO_VAL_INT:
969 			fract_mult = 0;
970 			break;
971 		case IIO_VAL_INT_PLUS_MICRO_DB:
972 			scale_db = true;
973 			fallthrough;
974 		case IIO_VAL_INT_PLUS_MICRO:
975 			fract_mult = 100000;
976 			break;
977 		case IIO_VAL_INT_PLUS_NANO:
978 			fract_mult = 100000000;
979 			break;
980 		case IIO_VAL_CHAR:
981 			is_char = true;
982 			break;
983 		default:
984 			return -EINVAL;
985 		}
986 
987 	if (is_char) {
988 		char ch;
989 
990 		if (sscanf(buf, "%c", &ch) != 1)
991 			return -EINVAL;
992 		integer = ch;
993 	} else {
994 		ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
995 					    scale_db);
996 		if (ret)
997 			return ret;
998 	}
999 
1000 	ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
1001 					 integer, fract, this_attr->address);
1002 	if (ret)
1003 		return ret;
1004 
1005 	return len;
1006 }
1007 
1008 static
1009 int __iio_device_attr_init(struct device_attribute *dev_attr,
1010 			   const char *postfix,
1011 			   struct iio_chan_spec const *chan,
1012 			   ssize_t (*readfunc)(struct device *dev,
1013 					       struct device_attribute *attr,
1014 					       char *buf),
1015 			   ssize_t (*writefunc)(struct device *dev,
1016 						struct device_attribute *attr,
1017 						const char *buf,
1018 						size_t len),
1019 			   enum iio_shared_by shared_by)
1020 {
1021 	int ret = 0;
1022 	char *name = NULL;
1023 	char *full_postfix;
1024 
1025 	sysfs_attr_init(&dev_attr->attr);
1026 
1027 	/* Build up postfix of <extend_name>_<modifier>_postfix */
1028 	if (chan->modified && (shared_by == IIO_SEPARATE)) {
1029 		if (chan->extend_name)
1030 			full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
1031 						 iio_modifier_names[chan->channel2],
1032 						 chan->extend_name,
1033 						 postfix);
1034 		else
1035 			full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
1036 						 iio_modifier_names[chan->channel2],
1037 						 postfix);
1038 	} else {
1039 		if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
1040 			full_postfix = kstrdup(postfix, GFP_KERNEL);
1041 		else
1042 			full_postfix = kasprintf(GFP_KERNEL,
1043 						 "%s_%s",
1044 						 chan->extend_name,
1045 						 postfix);
1046 	}
1047 	if (full_postfix == NULL)
1048 		return -ENOMEM;
1049 
1050 	if (chan->differential) { /* Differential can not have modifier */
1051 		switch (shared_by) {
1052 		case IIO_SHARED_BY_ALL:
1053 			name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1054 			break;
1055 		case IIO_SHARED_BY_DIR:
1056 			name = kasprintf(GFP_KERNEL, "%s_%s",
1057 						iio_direction[chan->output],
1058 						full_postfix);
1059 			break;
1060 		case IIO_SHARED_BY_TYPE:
1061 			name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
1062 					    iio_direction[chan->output],
1063 					    iio_chan_type_name_spec[chan->type],
1064 					    iio_chan_type_name_spec[chan->type],
1065 					    full_postfix);
1066 			break;
1067 		case IIO_SEPARATE:
1068 			if (!chan->indexed) {
1069 				WARN(1, "Differential channels must be indexed\n");
1070 				ret = -EINVAL;
1071 				goto error_free_full_postfix;
1072 			}
1073 			name = kasprintf(GFP_KERNEL,
1074 					    "%s_%s%d-%s%d_%s",
1075 					    iio_direction[chan->output],
1076 					    iio_chan_type_name_spec[chan->type],
1077 					    chan->channel,
1078 					    iio_chan_type_name_spec[chan->type],
1079 					    chan->channel2,
1080 					    full_postfix);
1081 			break;
1082 		}
1083 	} else { /* Single ended */
1084 		switch (shared_by) {
1085 		case IIO_SHARED_BY_ALL:
1086 			name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1087 			break;
1088 		case IIO_SHARED_BY_DIR:
1089 			name = kasprintf(GFP_KERNEL, "%s_%s",
1090 						iio_direction[chan->output],
1091 						full_postfix);
1092 			break;
1093 		case IIO_SHARED_BY_TYPE:
1094 			name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1095 					    iio_direction[chan->output],
1096 					    iio_chan_type_name_spec[chan->type],
1097 					    full_postfix);
1098 			break;
1099 
1100 		case IIO_SEPARATE:
1101 			if (chan->indexed)
1102 				name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
1103 						    iio_direction[chan->output],
1104 						    iio_chan_type_name_spec[chan->type],
1105 						    chan->channel,
1106 						    full_postfix);
1107 			else
1108 				name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1109 						    iio_direction[chan->output],
1110 						    iio_chan_type_name_spec[chan->type],
1111 						    full_postfix);
1112 			break;
1113 		}
1114 	}
1115 	if (name == NULL) {
1116 		ret = -ENOMEM;
1117 		goto error_free_full_postfix;
1118 	}
1119 	dev_attr->attr.name = name;
1120 
1121 	if (readfunc) {
1122 		dev_attr->attr.mode |= 0444;
1123 		dev_attr->show = readfunc;
1124 	}
1125 
1126 	if (writefunc) {
1127 		dev_attr->attr.mode |= 0200;
1128 		dev_attr->store = writefunc;
1129 	}
1130 
1131 error_free_full_postfix:
1132 	kfree(full_postfix);
1133 
1134 	return ret;
1135 }
1136 
1137 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
1138 {
1139 	kfree(dev_attr->attr.name);
1140 }
1141 
1142 int __iio_add_chan_devattr(const char *postfix,
1143 			   struct iio_chan_spec const *chan,
1144 			   ssize_t (*readfunc)(struct device *dev,
1145 					       struct device_attribute *attr,
1146 					       char *buf),
1147 			   ssize_t (*writefunc)(struct device *dev,
1148 						struct device_attribute *attr,
1149 						const char *buf,
1150 						size_t len),
1151 			   u64 mask,
1152 			   enum iio_shared_by shared_by,
1153 			   struct device *dev,
1154 			   struct iio_buffer *buffer,
1155 			   struct list_head *attr_list)
1156 {
1157 	int ret;
1158 	struct iio_dev_attr *iio_attr, *t;
1159 
1160 	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1161 	if (iio_attr == NULL)
1162 		return -ENOMEM;
1163 	ret = __iio_device_attr_init(&iio_attr->dev_attr,
1164 				     postfix, chan,
1165 				     readfunc, writefunc, shared_by);
1166 	if (ret)
1167 		goto error_iio_dev_attr_free;
1168 	iio_attr->c = chan;
1169 	iio_attr->address = mask;
1170 	iio_attr->buffer = buffer;
1171 	list_for_each_entry(t, attr_list, l)
1172 		if (strcmp(t->dev_attr.attr.name,
1173 			   iio_attr->dev_attr.attr.name) == 0) {
1174 			if (shared_by == IIO_SEPARATE)
1175 				dev_err(dev, "tried to double register : %s\n",
1176 					t->dev_attr.attr.name);
1177 			ret = -EBUSY;
1178 			goto error_device_attr_deinit;
1179 		}
1180 	list_add(&iio_attr->l, attr_list);
1181 
1182 	return 0;
1183 
1184 error_device_attr_deinit:
1185 	__iio_device_attr_deinit(&iio_attr->dev_attr);
1186 error_iio_dev_attr_free:
1187 	kfree(iio_attr);
1188 	return ret;
1189 }
1190 
1191 static int iio_device_add_channel_label(struct iio_dev *indio_dev,
1192 					 struct iio_chan_spec const *chan)
1193 {
1194 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1195 	int ret;
1196 
1197 	if (!indio_dev->info->read_label && !chan->extend_name)
1198 		return 0;
1199 
1200 	ret = __iio_add_chan_devattr("label",
1201 				     chan,
1202 				     &iio_read_channel_label,
1203 				     NULL,
1204 				     0,
1205 				     IIO_SEPARATE,
1206 				     &indio_dev->dev,
1207 				     NULL,
1208 				     &iio_dev_opaque->channel_attr_list);
1209 	if (ret < 0)
1210 		return ret;
1211 
1212 	return 1;
1213 }
1214 
1215 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
1216 					 struct iio_chan_spec const *chan,
1217 					 enum iio_shared_by shared_by,
1218 					 const long *infomask)
1219 {
1220 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1221 	int i, ret, attrcount = 0;
1222 
1223 	for_each_set_bit(i, infomask, sizeof(*infomask)*8) {
1224 		if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1225 			return -EINVAL;
1226 		ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
1227 					     chan,
1228 					     &iio_read_channel_info,
1229 					     &iio_write_channel_info,
1230 					     i,
1231 					     shared_by,
1232 					     &indio_dev->dev,
1233 					     NULL,
1234 					     &iio_dev_opaque->channel_attr_list);
1235 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1236 			continue;
1237 		if (ret < 0)
1238 			return ret;
1239 		attrcount++;
1240 	}
1241 
1242 	return attrcount;
1243 }
1244 
1245 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
1246 					       struct iio_chan_spec const *chan,
1247 					       enum iio_shared_by shared_by,
1248 					       const long *infomask)
1249 {
1250 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1251 	int i, ret, attrcount = 0;
1252 	char *avail_postfix;
1253 
1254 	for_each_set_bit(i, infomask, sizeof(*infomask) * 8) {
1255 		if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1256 			return -EINVAL;
1257 		avail_postfix = kasprintf(GFP_KERNEL,
1258 					  "%s_available",
1259 					  iio_chan_info_postfix[i]);
1260 		if (!avail_postfix)
1261 			return -ENOMEM;
1262 
1263 		ret = __iio_add_chan_devattr(avail_postfix,
1264 					     chan,
1265 					     &iio_read_channel_info_avail,
1266 					     NULL,
1267 					     i,
1268 					     shared_by,
1269 					     &indio_dev->dev,
1270 					     NULL,
1271 					     &iio_dev_opaque->channel_attr_list);
1272 		kfree(avail_postfix);
1273 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1274 			continue;
1275 		if (ret < 0)
1276 			return ret;
1277 		attrcount++;
1278 	}
1279 
1280 	return attrcount;
1281 }
1282 
1283 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1284 					struct iio_chan_spec const *chan)
1285 {
1286 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1287 	int ret, attrcount = 0;
1288 	const struct iio_chan_spec_ext_info *ext_info;
1289 
1290 	if (chan->channel < 0)
1291 		return 0;
1292 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1293 					    IIO_SEPARATE,
1294 					    &chan->info_mask_separate);
1295 	if (ret < 0)
1296 		return ret;
1297 	attrcount += ret;
1298 
1299 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1300 						  IIO_SEPARATE,
1301 						  &chan->info_mask_separate_available);
1302 	if (ret < 0)
1303 		return ret;
1304 	attrcount += ret;
1305 
1306 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1307 					    IIO_SHARED_BY_TYPE,
1308 					    &chan->info_mask_shared_by_type);
1309 	if (ret < 0)
1310 		return ret;
1311 	attrcount += ret;
1312 
1313 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1314 						  IIO_SHARED_BY_TYPE,
1315 						  &chan->info_mask_shared_by_type_available);
1316 	if (ret < 0)
1317 		return ret;
1318 	attrcount += ret;
1319 
1320 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1321 					    IIO_SHARED_BY_DIR,
1322 					    &chan->info_mask_shared_by_dir);
1323 	if (ret < 0)
1324 		return ret;
1325 	attrcount += ret;
1326 
1327 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1328 						  IIO_SHARED_BY_DIR,
1329 						  &chan->info_mask_shared_by_dir_available);
1330 	if (ret < 0)
1331 		return ret;
1332 	attrcount += ret;
1333 
1334 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1335 					    IIO_SHARED_BY_ALL,
1336 					    &chan->info_mask_shared_by_all);
1337 	if (ret < 0)
1338 		return ret;
1339 	attrcount += ret;
1340 
1341 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1342 						  IIO_SHARED_BY_ALL,
1343 						  &chan->info_mask_shared_by_all_available);
1344 	if (ret < 0)
1345 		return ret;
1346 	attrcount += ret;
1347 
1348 	ret = iio_device_add_channel_label(indio_dev, chan);
1349 	if (ret < 0)
1350 		return ret;
1351 	attrcount += ret;
1352 
1353 	if (chan->ext_info) {
1354 		unsigned int i = 0;
1355 
1356 		for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
1357 			ret = __iio_add_chan_devattr(ext_info->name,
1358 					chan,
1359 					ext_info->read ?
1360 					    &iio_read_channel_ext_info : NULL,
1361 					ext_info->write ?
1362 					    &iio_write_channel_ext_info : NULL,
1363 					i,
1364 					ext_info->shared,
1365 					&indio_dev->dev,
1366 					NULL,
1367 					&iio_dev_opaque->channel_attr_list);
1368 			i++;
1369 			if (ret == -EBUSY && ext_info->shared)
1370 				continue;
1371 
1372 			if (ret)
1373 				return ret;
1374 
1375 			attrcount++;
1376 		}
1377 	}
1378 
1379 	return attrcount;
1380 }
1381 
1382 /**
1383  * iio_free_chan_devattr_list() - Free a list of IIO device attributes
1384  * @attr_list: List of IIO device attributes
1385  *
1386  * This function frees the memory allocated for each of the IIO device
1387  * attributes in the list.
1388  */
1389 void iio_free_chan_devattr_list(struct list_head *attr_list)
1390 {
1391 	struct iio_dev_attr *p, *n;
1392 
1393 	list_for_each_entry_safe(p, n, attr_list, l) {
1394 		kfree_const(p->dev_attr.attr.name);
1395 		list_del(&p->l);
1396 		kfree(p);
1397 	}
1398 }
1399 
1400 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
1401 			 char *buf)
1402 {
1403 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1404 
1405 	return sysfs_emit(buf, "%s\n", indio_dev->name);
1406 }
1407 
1408 static DEVICE_ATTR_RO(name);
1409 
1410 static ssize_t label_show(struct device *dev, struct device_attribute *attr,
1411 			  char *buf)
1412 {
1413 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1414 
1415 	return sysfs_emit(buf, "%s\n", indio_dev->label);
1416 }
1417 
1418 static DEVICE_ATTR_RO(label);
1419 
1420 static const char * const clock_names[] = {
1421 	[CLOCK_REALTIME]	 	= "realtime",
1422 	[CLOCK_MONOTONIC]	 	= "monotonic",
1423 	[CLOCK_PROCESS_CPUTIME_ID]	= "process_cputime_id",
1424 	[CLOCK_THREAD_CPUTIME_ID]	= "thread_cputime_id",
1425 	[CLOCK_MONOTONIC_RAW]	 	= "monotonic_raw",
1426 	[CLOCK_REALTIME_COARSE]	 	= "realtime_coarse",
1427 	[CLOCK_MONOTONIC_COARSE] 	= "monotonic_coarse",
1428 	[CLOCK_BOOTTIME]	 	= "boottime",
1429 	[CLOCK_REALTIME_ALARM]		= "realtime_alarm",
1430 	[CLOCK_BOOTTIME_ALARM]		= "boottime_alarm",
1431 	[CLOCK_SGI_CYCLE]		= "sgi_cycle",
1432 	[CLOCK_TAI]		 	= "tai",
1433 };
1434 
1435 static ssize_t current_timestamp_clock_show(struct device *dev,
1436 					    struct device_attribute *attr,
1437 					    char *buf)
1438 {
1439 	const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1440 	const clockid_t clk = iio_device_get_clock(indio_dev);
1441 
1442 	switch (clk) {
1443 	case CLOCK_REALTIME:
1444 	case CLOCK_MONOTONIC:
1445 	case CLOCK_MONOTONIC_RAW:
1446 	case CLOCK_REALTIME_COARSE:
1447 	case CLOCK_MONOTONIC_COARSE:
1448 	case CLOCK_BOOTTIME:
1449 	case CLOCK_TAI:
1450 		break;
1451 	default:
1452 		BUG();
1453 	}
1454 
1455 	return sysfs_emit(buf, "%s\n", clock_names[clk]);
1456 }
1457 
1458 static ssize_t current_timestamp_clock_store(struct device *dev,
1459 					     struct device_attribute *attr,
1460 					     const char *buf, size_t len)
1461 {
1462 	clockid_t clk;
1463 	int ret;
1464 
1465 	ret = sysfs_match_string(clock_names, buf);
1466 	if (ret < 0)
1467 		return ret;
1468 	clk = ret;
1469 
1470 	switch (clk) {
1471 	case CLOCK_REALTIME:
1472 	case CLOCK_MONOTONIC:
1473 	case CLOCK_MONOTONIC_RAW:
1474 	case CLOCK_REALTIME_COARSE:
1475 	case CLOCK_MONOTONIC_COARSE:
1476 	case CLOCK_BOOTTIME:
1477 	case CLOCK_TAI:
1478 		break;
1479 	default:
1480 		return -EINVAL;
1481 	}
1482 
1483 	ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
1484 	if (ret)
1485 		return ret;
1486 
1487 	return len;
1488 }
1489 
1490 int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
1491 				    const struct attribute_group *group)
1492 {
1493 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1494 	const struct attribute_group **new, **old = iio_dev_opaque->groups;
1495 	unsigned int cnt = iio_dev_opaque->groupcounter;
1496 
1497 	new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL);
1498 	if (!new)
1499 		return -ENOMEM;
1500 
1501 	new[iio_dev_opaque->groupcounter++] = group;
1502 	new[iio_dev_opaque->groupcounter] = NULL;
1503 
1504 	iio_dev_opaque->groups = new;
1505 
1506 	return 0;
1507 }
1508 
1509 static DEVICE_ATTR_RW(current_timestamp_clock);
1510 
1511 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
1512 {
1513 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1514 	int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
1515 	struct iio_dev_attr *p;
1516 	struct attribute **attr, *clk = NULL;
1517 
1518 	/* First count elements in any existing group */
1519 	if (indio_dev->info->attrs) {
1520 		attr = indio_dev->info->attrs->attrs;
1521 		while (*attr++ != NULL)
1522 			attrcount_orig++;
1523 	}
1524 	attrcount = attrcount_orig;
1525 	/*
1526 	 * New channel registration method - relies on the fact a group does
1527 	 * not need to be initialized if its name is NULL.
1528 	 */
1529 	if (indio_dev->channels)
1530 		for (i = 0; i < indio_dev->num_channels; i++) {
1531 			const struct iio_chan_spec *chan =
1532 				&indio_dev->channels[i];
1533 
1534 			if (chan->type == IIO_TIMESTAMP)
1535 				clk = &dev_attr_current_timestamp_clock.attr;
1536 
1537 			ret = iio_device_add_channel_sysfs(indio_dev, chan);
1538 			if (ret < 0)
1539 				goto error_clear_attrs;
1540 			attrcount += ret;
1541 		}
1542 
1543 	if (iio_dev_opaque->event_interface)
1544 		clk = &dev_attr_current_timestamp_clock.attr;
1545 
1546 	if (indio_dev->name)
1547 		attrcount++;
1548 	if (indio_dev->label)
1549 		attrcount++;
1550 	if (clk)
1551 		attrcount++;
1552 
1553 	iio_dev_opaque->chan_attr_group.attrs =
1554 		kcalloc(attrcount + 1,
1555 			sizeof(iio_dev_opaque->chan_attr_group.attrs[0]),
1556 			GFP_KERNEL);
1557 	if (iio_dev_opaque->chan_attr_group.attrs == NULL) {
1558 		ret = -ENOMEM;
1559 		goto error_clear_attrs;
1560 	}
1561 	/* Copy across original attributes, and point to original binary attributes */
1562 	if (indio_dev->info->attrs) {
1563 		memcpy(iio_dev_opaque->chan_attr_group.attrs,
1564 		       indio_dev->info->attrs->attrs,
1565 		       sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
1566 		       *attrcount_orig);
1567 		iio_dev_opaque->chan_attr_group.is_visible =
1568 			indio_dev->info->attrs->is_visible;
1569 		iio_dev_opaque->chan_attr_group.bin_attrs =
1570 			indio_dev->info->attrs->bin_attrs;
1571 	}
1572 	attrn = attrcount_orig;
1573 	/* Add all elements from the list. */
1574 	list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
1575 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
1576 	if (indio_dev->name)
1577 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
1578 	if (indio_dev->label)
1579 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
1580 	if (clk)
1581 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk;
1582 
1583 	ret = iio_device_register_sysfs_group(indio_dev,
1584 					      &iio_dev_opaque->chan_attr_group);
1585 	if (ret)
1586 		goto error_free_chan_attrs;
1587 
1588 	return 0;
1589 
1590 error_free_chan_attrs:
1591 	kfree(iio_dev_opaque->chan_attr_group.attrs);
1592 	iio_dev_opaque->chan_attr_group.attrs = NULL;
1593 error_clear_attrs:
1594 	iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1595 
1596 	return ret;
1597 }
1598 
1599 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
1600 {
1601 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1602 
1603 	iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1604 	kfree(iio_dev_opaque->chan_attr_group.attrs);
1605 	iio_dev_opaque->chan_attr_group.attrs = NULL;
1606 	kfree(iio_dev_opaque->groups);
1607 	iio_dev_opaque->groups = NULL;
1608 }
1609 
1610 static void iio_dev_release(struct device *device)
1611 {
1612 	struct iio_dev *indio_dev = dev_to_iio_dev(device);
1613 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1614 
1615 	if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
1616 		iio_device_unregister_trigger_consumer(indio_dev);
1617 	iio_device_unregister_eventset(indio_dev);
1618 	iio_device_unregister_sysfs(indio_dev);
1619 
1620 	iio_device_detach_buffers(indio_dev);
1621 
1622 	lockdep_unregister_key(&iio_dev_opaque->mlock_key);
1623 
1624 	ida_free(&iio_ida, iio_dev_opaque->id);
1625 	kfree(iio_dev_opaque);
1626 }
1627 
1628 const struct device_type iio_device_type = {
1629 	.name = "iio_device",
1630 	.release = iio_dev_release,
1631 };
1632 
1633 /**
1634  * iio_device_alloc() - allocate an iio_dev from a driver
1635  * @parent:		Parent device.
1636  * @sizeof_priv:	Space to allocate for private structure.
1637  *
1638  * Returns:
1639  * Pointer to allocated iio_dev on success, NULL on failure.
1640  */
1641 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
1642 {
1643 	struct iio_dev_opaque *iio_dev_opaque;
1644 	struct iio_dev *indio_dev;
1645 	size_t alloc_size;
1646 
1647 	if (sizeof_priv)
1648 		alloc_size = ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN) + sizeof_priv;
1649 	else
1650 		alloc_size = sizeof(*iio_dev_opaque);
1651 
1652 	iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL);
1653 	if (!iio_dev_opaque)
1654 		return NULL;
1655 
1656 	indio_dev = &iio_dev_opaque->indio_dev;
1657 
1658 	if (sizeof_priv)
1659 		indio_dev->priv = (char *)iio_dev_opaque +
1660 			ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
1661 
1662 	indio_dev->dev.parent = parent;
1663 	indio_dev->dev.type = &iio_device_type;
1664 	indio_dev->dev.bus = &iio_bus_type;
1665 	device_initialize(&indio_dev->dev);
1666 	mutex_init(&iio_dev_opaque->mlock);
1667 	mutex_init(&iio_dev_opaque->info_exist_lock);
1668 	INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
1669 
1670 	iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL);
1671 	if (iio_dev_opaque->id < 0) {
1672 		/* cannot use a dev_err as the name isn't available */
1673 		pr_err("failed to get device id\n");
1674 		kfree(iio_dev_opaque);
1675 		return NULL;
1676 	}
1677 
1678 	if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
1679 		ida_free(&iio_ida, iio_dev_opaque->id);
1680 		kfree(iio_dev_opaque);
1681 		return NULL;
1682 	}
1683 
1684 	INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
1685 	INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
1686 
1687 	lockdep_register_key(&iio_dev_opaque->mlock_key);
1688 	lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
1689 
1690 	return indio_dev;
1691 }
1692 EXPORT_SYMBOL(iio_device_alloc);
1693 
1694 /**
1695  * iio_device_free() - free an iio_dev from a driver
1696  * @dev:		the iio_dev associated with the device
1697  */
1698 void iio_device_free(struct iio_dev *dev)
1699 {
1700 	if (dev)
1701 		put_device(&dev->dev);
1702 }
1703 EXPORT_SYMBOL(iio_device_free);
1704 
1705 static void devm_iio_device_release(void *iio_dev)
1706 {
1707 	iio_device_free(iio_dev);
1708 }
1709 
1710 /**
1711  * devm_iio_device_alloc - Resource-managed iio_device_alloc()
1712  * @parent:		Device to allocate iio_dev for, and parent for this IIO device
1713  * @sizeof_priv:	Space to allocate for private structure.
1714  *
1715  * Managed iio_device_alloc. iio_dev allocated with this function is
1716  * automatically freed on driver detach.
1717  *
1718  * Returns:
1719  * Pointer to allocated iio_dev on success, NULL on failure.
1720  */
1721 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
1722 {
1723 	struct iio_dev *iio_dev;
1724 	int ret;
1725 
1726 	iio_dev = iio_device_alloc(parent, sizeof_priv);
1727 	if (!iio_dev)
1728 		return NULL;
1729 
1730 	ret = devm_add_action_or_reset(parent, devm_iio_device_release,
1731 				       iio_dev);
1732 	if (ret)
1733 		return NULL;
1734 
1735 	return iio_dev;
1736 }
1737 EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
1738 
1739 /**
1740  * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1741  * @inode:	Inode structure for identifying the device in the file system
1742  * @filp:	File structure for iio device used to keep and later access
1743  *		private data
1744  *
1745  * Returns: 0 on success or -EBUSY if the device is already opened
1746  */
1747 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1748 {
1749 	struct iio_dev_opaque *iio_dev_opaque =
1750 		container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1751 	struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1752 	struct iio_dev_buffer_pair *ib;
1753 
1754 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags))
1755 		return -EBUSY;
1756 
1757 	iio_device_get(indio_dev);
1758 
1759 	ib = kmalloc(sizeof(*ib), GFP_KERNEL);
1760 	if (!ib) {
1761 		iio_device_put(indio_dev);
1762 		clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1763 		return -ENOMEM;
1764 	}
1765 
1766 	ib->indio_dev = indio_dev;
1767 	ib->buffer = indio_dev->buffer;
1768 
1769 	filp->private_data = ib;
1770 
1771 	return 0;
1772 }
1773 
1774 /**
1775  * iio_chrdev_release() - chrdev file close buffer access and ioctls
1776  * @inode:	Inode structure pointer for the char device
1777  * @filp:	File structure pointer for the char device
1778  *
1779  * Returns: 0 for successful release.
1780  */
1781 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1782 {
1783 	struct iio_dev_buffer_pair *ib = filp->private_data;
1784 	struct iio_dev_opaque *iio_dev_opaque =
1785 		container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1786 	struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1787 
1788 	kfree(ib);
1789 	clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1790 	iio_device_put(indio_dev);
1791 
1792 	return 0;
1793 }
1794 
1795 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
1796 				       struct iio_ioctl_handler *h)
1797 {
1798 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1799 
1800 	list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers);
1801 }
1802 
1803 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h)
1804 {
1805 	list_del(&h->entry);
1806 }
1807 
1808 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1809 {
1810 	struct iio_dev_buffer_pair *ib = filp->private_data;
1811 	struct iio_dev *indio_dev = ib->indio_dev;
1812 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1813 	struct iio_ioctl_handler *h;
1814 	int ret;
1815 
1816 	guard(mutex)(&iio_dev_opaque->info_exist_lock);
1817 	/*
1818 	 * The NULL check here is required to prevent crashing when a device
1819 	 * is being removed while userspace would still have open file handles
1820 	 * to try to access this device.
1821 	 */
1822 	if (!indio_dev->info)
1823 		return -ENODEV;
1824 
1825 	list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
1826 		ret = h->ioctl(indio_dev, filp, cmd, arg);
1827 		if (ret != IIO_IOCTL_UNHANDLED)
1828 			return ret;
1829 	}
1830 
1831 	return -ENODEV;
1832 }
1833 
1834 static const struct file_operations iio_buffer_fileops = {
1835 	.owner = THIS_MODULE,
1836 	.llseek = noop_llseek,
1837 	.read = iio_buffer_read_outer_addr,
1838 	.write = iio_buffer_write_outer_addr,
1839 	.poll = iio_buffer_poll_addr,
1840 	.unlocked_ioctl = iio_ioctl,
1841 	.compat_ioctl = compat_ptr_ioctl,
1842 	.open = iio_chrdev_open,
1843 	.release = iio_chrdev_release,
1844 };
1845 
1846 static const struct file_operations iio_event_fileops = {
1847 	.owner = THIS_MODULE,
1848 	.llseek = noop_llseek,
1849 	.unlocked_ioctl = iio_ioctl,
1850 	.compat_ioctl = compat_ptr_ioctl,
1851 	.open = iio_chrdev_open,
1852 	.release = iio_chrdev_release,
1853 };
1854 
1855 static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
1856 {
1857 	int i, j;
1858 	const struct iio_chan_spec *channels = indio_dev->channels;
1859 
1860 	if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES))
1861 		return 0;
1862 
1863 	for (i = 0; i < indio_dev->num_channels - 1; i++) {
1864 		if (channels[i].scan_index < 0)
1865 			continue;
1866 		for (j = i + 1; j < indio_dev->num_channels; j++)
1867 			if (channels[i].scan_index == channels[j].scan_index) {
1868 				dev_err(&indio_dev->dev,
1869 					"Duplicate scan index %d\n",
1870 					channels[i].scan_index);
1871 				return -EINVAL;
1872 			}
1873 	}
1874 
1875 	return 0;
1876 }
1877 
1878 static int iio_check_extended_name(const struct iio_dev *indio_dev)
1879 {
1880 	unsigned int i;
1881 
1882 	if (!indio_dev->info->read_label)
1883 		return 0;
1884 
1885 	for (i = 0; i < indio_dev->num_channels; i++) {
1886 		if (indio_dev->channels[i].extend_name) {
1887 			dev_err(&indio_dev->dev,
1888 				"Cannot use labels and extend_name at the same time\n");
1889 			return -EINVAL;
1890 		}
1891 	}
1892 
1893 	return 0;
1894 }
1895 
1896 static const struct iio_buffer_setup_ops noop_ring_setup_ops;
1897 
1898 static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
1899 {
1900 	unsigned int num_masks, masklength, longs_per_mask;
1901 	const unsigned long *av_masks;
1902 	int i;
1903 
1904 	av_masks = indio_dev->available_scan_masks;
1905 	masklength = indio_dev->masklength;
1906 	longs_per_mask = BITS_TO_LONGS(masklength);
1907 
1908 	/*
1909 	 * The code determining how many available_scan_masks is in the array
1910 	 * will be assuming the end of masks when first long with all bits
1911 	 * zeroed is encountered. This is incorrect for masks where mask
1912 	 * consists of more than one long, and where some of the available masks
1913 	 * has long worth of bits zeroed (but has subsequent bit(s) set). This
1914 	 * is a safety measure against bug where array of masks is terminated by
1915 	 * a single zero while mask width is greater than width of a long.
1916 	 */
1917 	if (longs_per_mask > 1)
1918 		dev_warn(indio_dev->dev.parent,
1919 			 "multi long available scan masks not fully supported\n");
1920 
1921 	if (bitmap_empty(av_masks, masklength))
1922 		dev_warn(indio_dev->dev.parent, "empty scan mask\n");
1923 
1924 	for (num_masks = 0; *av_masks; num_masks++)
1925 		av_masks += longs_per_mask;
1926 
1927 	if (num_masks < 2)
1928 		return;
1929 
1930 	av_masks = indio_dev->available_scan_masks;
1931 
1932 	/*
1933 	 * Go through all the masks from first to one before the last, and see
1934 	 * that no mask found later from the available_scan_masks array is a
1935 	 * subset of mask found earlier. If this happens, then the mask found
1936 	 * later will never get used because scanning the array is stopped when
1937 	 * the first suitable mask is found. Drivers should order the array of
1938 	 * available masks in the order of preference (presumably the least
1939 	 * costy to access masks first).
1940 	 */
1941 	for (i = 0; i < num_masks - 1; i++) {
1942 		const unsigned long *mask1;
1943 		int j;
1944 
1945 		mask1 = av_masks + i * longs_per_mask;
1946 		for (j = i + 1; j < num_masks; j++) {
1947 			const unsigned long *mask2;
1948 
1949 			mask2 = av_masks + j * longs_per_mask;
1950 			if (bitmap_subset(mask2, mask1, masklength))
1951 				dev_warn(indio_dev->dev.parent,
1952 					 "available_scan_mask %d subset of %d. Never used\n",
1953 					 j, i);
1954 		}
1955 	}
1956 }
1957 
1958 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
1959 {
1960 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1961 	struct fwnode_handle *fwnode = NULL;
1962 	int ret;
1963 
1964 	if (!indio_dev->info)
1965 		return -EINVAL;
1966 
1967 	iio_dev_opaque->driver_module = this_mod;
1968 
1969 	/* If the calling driver did not initialize firmware node, do it here */
1970 	if (dev_fwnode(&indio_dev->dev))
1971 		fwnode = dev_fwnode(&indio_dev->dev);
1972 	/* The default dummy IIO device has no parent */
1973 	else if (indio_dev->dev.parent)
1974 		fwnode = dev_fwnode(indio_dev->dev.parent);
1975 	device_set_node(&indio_dev->dev, fwnode);
1976 
1977 	fwnode_property_read_string(fwnode, "label", &indio_dev->label);
1978 
1979 	ret = iio_check_unique_scan_index(indio_dev);
1980 	if (ret < 0)
1981 		return ret;
1982 
1983 	ret = iio_check_extended_name(indio_dev);
1984 	if (ret < 0)
1985 		return ret;
1986 
1987 	iio_device_register_debugfs(indio_dev);
1988 
1989 	ret = iio_buffers_alloc_sysfs_and_mask(indio_dev);
1990 	if (ret) {
1991 		dev_err(indio_dev->dev.parent,
1992 			"Failed to create buffer sysfs interfaces\n");
1993 		goto error_unreg_debugfs;
1994 	}
1995 
1996 	if (indio_dev->available_scan_masks)
1997 		iio_sanity_check_avail_scan_masks(indio_dev);
1998 
1999 	ret = iio_device_register_sysfs(indio_dev);
2000 	if (ret) {
2001 		dev_err(indio_dev->dev.parent,
2002 			"Failed to register sysfs interfaces\n");
2003 		goto error_buffer_free_sysfs;
2004 	}
2005 	ret = iio_device_register_eventset(indio_dev);
2006 	if (ret) {
2007 		dev_err(indio_dev->dev.parent,
2008 			"Failed to register event set\n");
2009 		goto error_free_sysfs;
2010 	}
2011 	if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
2012 		iio_device_register_trigger_consumer(indio_dev);
2013 
2014 	if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
2015 		indio_dev->setup_ops == NULL)
2016 		indio_dev->setup_ops = &noop_ring_setup_ops;
2017 
2018 	if (iio_dev_opaque->attached_buffers_cnt)
2019 		cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops);
2020 	else if (iio_dev_opaque->event_interface)
2021 		cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops);
2022 
2023 	if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) {
2024 		indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id);
2025 		iio_dev_opaque->chrdev.owner = this_mod;
2026 	}
2027 
2028 	/* assign device groups now; they should be all registered now */
2029 	indio_dev->dev.groups = iio_dev_opaque->groups;
2030 
2031 	ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev);
2032 	if (ret < 0)
2033 		goto error_unreg_eventset;
2034 
2035 	return 0;
2036 
2037 error_unreg_eventset:
2038 	iio_device_unregister_eventset(indio_dev);
2039 error_free_sysfs:
2040 	iio_device_unregister_sysfs(indio_dev);
2041 error_buffer_free_sysfs:
2042 	iio_buffers_free_sysfs_and_mask(indio_dev);
2043 error_unreg_debugfs:
2044 	iio_device_unregister_debugfs(indio_dev);
2045 	return ret;
2046 }
2047 EXPORT_SYMBOL(__iio_device_register);
2048 
2049 /**
2050  * iio_device_unregister() - unregister a device from the IIO subsystem
2051  * @indio_dev:		Device structure representing the device.
2052  */
2053 void iio_device_unregister(struct iio_dev *indio_dev)
2054 {
2055 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2056 
2057 	cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
2058 
2059 	scoped_guard(mutex, &iio_dev_opaque->info_exist_lock) {
2060 		iio_device_unregister_debugfs(indio_dev);
2061 
2062 		iio_disable_all_buffers(indio_dev);
2063 
2064 		indio_dev->info = NULL;
2065 
2066 		iio_device_wakeup_eventset(indio_dev);
2067 		iio_buffer_wakeup_poll(indio_dev);
2068 	}
2069 
2070 	iio_buffers_free_sysfs_and_mask(indio_dev);
2071 }
2072 EXPORT_SYMBOL(iio_device_unregister);
2073 
2074 static void devm_iio_device_unreg(void *indio_dev)
2075 {
2076 	iio_device_unregister(indio_dev);
2077 }
2078 
2079 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
2080 			       struct module *this_mod)
2081 {
2082 	int ret;
2083 
2084 	ret = __iio_device_register(indio_dev, this_mod);
2085 	if (ret)
2086 		return ret;
2087 
2088 	return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev);
2089 }
2090 EXPORT_SYMBOL_GPL(__devm_iio_device_register);
2091 
2092 /**
2093  * iio_device_claim_direct_mode - Keep device in direct mode
2094  * @indio_dev:	the iio_dev associated with the device
2095  *
2096  * If the device is in direct mode it is guaranteed to stay
2097  * that way until iio_device_release_direct_mode() is called.
2098  *
2099  * Use with iio_device_release_direct_mode()
2100  *
2101  * Returns: 0 on success, -EBUSY on failure.
2102  */
2103 int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
2104 {
2105 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2106 
2107 	mutex_lock(&iio_dev_opaque->mlock);
2108 
2109 	if (iio_buffer_enabled(indio_dev)) {
2110 		mutex_unlock(&iio_dev_opaque->mlock);
2111 		return -EBUSY;
2112 	}
2113 	return 0;
2114 }
2115 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
2116 
2117 /**
2118  * iio_device_release_direct_mode - releases claim on direct mode
2119  * @indio_dev:	the iio_dev associated with the device
2120  *
2121  * Release the claim. Device is no longer guaranteed to stay
2122  * in direct mode.
2123  *
2124  * Use with iio_device_claim_direct_mode()
2125  */
2126 void iio_device_release_direct_mode(struct iio_dev *indio_dev)
2127 {
2128 	mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
2129 }
2130 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
2131 
2132 /**
2133  * iio_device_claim_buffer_mode - Keep device in buffer mode
2134  * @indio_dev:	the iio_dev associated with the device
2135  *
2136  * If the device is in buffer mode it is guaranteed to stay
2137  * that way until iio_device_release_buffer_mode() is called.
2138  *
2139  * Use with iio_device_release_buffer_mode().
2140  *
2141  * Returns: 0 on success, -EBUSY on failure.
2142  */
2143 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
2144 {
2145 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2146 
2147 	mutex_lock(&iio_dev_opaque->mlock);
2148 
2149 	if (iio_buffer_enabled(indio_dev))
2150 		return 0;
2151 
2152 	mutex_unlock(&iio_dev_opaque->mlock);
2153 	return -EBUSY;
2154 }
2155 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
2156 
2157 /**
2158  * iio_device_release_buffer_mode - releases claim on buffer mode
2159  * @indio_dev:	the iio_dev associated with the device
2160  *
2161  * Release the claim. Device is no longer guaranteed to stay
2162  * in buffer mode.
2163  *
2164  * Use with iio_device_claim_buffer_mode().
2165  */
2166 void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
2167 {
2168 	mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
2169 }
2170 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
2171 
2172 /**
2173  * iio_device_get_current_mode() - helper function providing read-only access to
2174  *				   the opaque @currentmode variable
2175  * @indio_dev:			   IIO device structure for device
2176  */
2177 int iio_device_get_current_mode(struct iio_dev *indio_dev)
2178 {
2179 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2180 
2181 	return iio_dev_opaque->currentmode;
2182 }
2183 EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
2184 
2185 subsys_initcall(iio_init);
2186 module_exit(iio_exit);
2187 
2188 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
2189 MODULE_DESCRIPTION("Industrial I/O core");
2190 MODULE_LICENSE("GPL");
2191