xref: /linux/drivers/iio/industrialio-core.c (revision 83bd89291f5cc866f60d32c34e268896c7ba8a3d)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * The industrial I/O core
4  *
5  * Copyright (c) 2008 Jonathan Cameron
6  *
7  * Based on elements of hwmon and input subsystems.
8  */
9 
10 #define pr_fmt(fmt) "iio-core: " fmt
11 
12 #include <linux/anon_inodes.h>
13 #include <linux/cdev.h>
14 #include <linux/cleanup.h>
15 #include <linux/debugfs.h>
16 #include <linux/device.h>
17 #include <linux/err.h>
18 #include <linux/fs.h>
19 #include <linux/idr.h>
20 #include <linux/kdev_t.h>
21 #include <linux/kernel.h>
22 #include <linux/module.h>
23 #include <linux/mutex.h>
24 #include <linux/poll.h>
25 #include <linux/property.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/wait.h>
29 #include <linux/wordpart.h>
30 
31 #include <linux/iio/buffer.h>
32 #include <linux/iio/buffer_impl.h>
33 #include <linux/iio/events.h>
34 #include <linux/iio/iio-opaque.h>
35 #include <linux/iio/iio.h>
36 #include <linux/iio/sysfs.h>
37 
38 #include "iio_core.h"
39 #include "iio_core_trigger.h"
40 
41 /* IDA to assign each registered device a unique id */
42 static DEFINE_IDA(iio_ida);
43 
44 static dev_t iio_devt;
45 
46 #define IIO_DEV_MAX 256
47 const struct bus_type iio_bus_type = {
48 	.name = "iio",
49 };
50 EXPORT_SYMBOL(iio_bus_type);
51 
52 static struct dentry *iio_debugfs_dentry;
53 
54 static const char * const iio_direction[] = {
55 	[0] = "in",
56 	[1] = "out",
57 };
58 
59 static const char * const iio_chan_type_name_spec[] = {
60 	[IIO_VOLTAGE] = "voltage",
61 	[IIO_CURRENT] = "current",
62 	[IIO_POWER] = "power",
63 	[IIO_ACCEL] = "accel",
64 	[IIO_ANGL_VEL] = "anglvel",
65 	[IIO_MAGN] = "magn",
66 	[IIO_LIGHT] = "illuminance",
67 	[IIO_INTENSITY] = "intensity",
68 	[IIO_PROXIMITY] = "proximity",
69 	[IIO_TEMP] = "temp",
70 	[IIO_INCLI] = "incli",
71 	[IIO_ROT] = "rot",
72 	[IIO_ANGL] = "angl",
73 	[IIO_TIMESTAMP] = "timestamp",
74 	[IIO_CAPACITANCE] = "capacitance",
75 	[IIO_ALTVOLTAGE] = "altvoltage",
76 	[IIO_CCT] = "cct",
77 	[IIO_PRESSURE] = "pressure",
78 	[IIO_HUMIDITYRELATIVE] = "humidityrelative",
79 	[IIO_ACTIVITY] = "activity",
80 	[IIO_STEPS] = "steps",
81 	[IIO_ENERGY] = "energy",
82 	[IIO_DISTANCE] = "distance",
83 	[IIO_VELOCITY] = "velocity",
84 	[IIO_CONCENTRATION] = "concentration",
85 	[IIO_RESISTANCE] = "resistance",
86 	[IIO_PH] = "ph",
87 	[IIO_UVINDEX] = "uvindex",
88 	[IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity",
89 	[IIO_COUNT] = "count",
90 	[IIO_INDEX] = "index",
91 	[IIO_GRAVITY]  = "gravity",
92 	[IIO_POSITIONRELATIVE]  = "positionrelative",
93 	[IIO_PHASE] = "phase",
94 	[IIO_MASSCONCENTRATION] = "massconcentration",
95 	[IIO_DELTA_ANGL] = "deltaangl",
96 	[IIO_DELTA_VELOCITY] = "deltavelocity",
97 	[IIO_COLORTEMP] = "colortemp",
98 	[IIO_CHROMATICITY] = "chromaticity",
99 	[IIO_ATTENTION] = "attention",
100 	[IIO_ALTCURRENT] = "altcurrent",
101 };
102 
103 static const char * const iio_modifier_names[] = {
104 	[IIO_MOD_X] = "x",
105 	[IIO_MOD_Y] = "y",
106 	[IIO_MOD_Z] = "z",
107 	[IIO_MOD_X_AND_Y] = "x&y",
108 	[IIO_MOD_X_AND_Z] = "x&z",
109 	[IIO_MOD_Y_AND_Z] = "y&z",
110 	[IIO_MOD_X_AND_Y_AND_Z] = "x&y&z",
111 	[IIO_MOD_X_OR_Y] = "x|y",
112 	[IIO_MOD_X_OR_Z] = "x|z",
113 	[IIO_MOD_Y_OR_Z] = "y|z",
114 	[IIO_MOD_X_OR_Y_OR_Z] = "x|y|z",
115 	[IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)",
116 	[IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2",
117 	[IIO_MOD_LIGHT_BOTH] = "both",
118 	[IIO_MOD_LIGHT_IR] = "ir",
119 	[IIO_MOD_LIGHT_CLEAR] = "clear",
120 	[IIO_MOD_LIGHT_RED] = "red",
121 	[IIO_MOD_LIGHT_GREEN] = "green",
122 	[IIO_MOD_LIGHT_BLUE] = "blue",
123 	[IIO_MOD_LIGHT_UV] = "uv",
124 	[IIO_MOD_LIGHT_UVA] = "uva",
125 	[IIO_MOD_LIGHT_UVB] = "uvb",
126 	[IIO_MOD_LIGHT_DUV] = "duv",
127 	[IIO_MOD_QUATERNION] = "quaternion",
128 	[IIO_MOD_TEMP_AMBIENT] = "ambient",
129 	[IIO_MOD_TEMP_OBJECT] = "object",
130 	[IIO_MOD_NORTH_MAGN] = "from_north_magnetic",
131 	[IIO_MOD_NORTH_TRUE] = "from_north_true",
132 	[IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp",
133 	[IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp",
134 	[IIO_MOD_RUNNING] = "running",
135 	[IIO_MOD_JOGGING] = "jogging",
136 	[IIO_MOD_WALKING] = "walking",
137 	[IIO_MOD_STILL] = "still",
138 	[IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
139 	[IIO_MOD_I] = "i",
140 	[IIO_MOD_Q] = "q",
141 	[IIO_MOD_CO2] = "co2",
142 	[IIO_MOD_VOC] = "voc",
143 	[IIO_MOD_PM1] = "pm1",
144 	[IIO_MOD_PM2P5] = "pm2p5",
145 	[IIO_MOD_PM4] = "pm4",
146 	[IIO_MOD_PM10] = "pm10",
147 	[IIO_MOD_ETHANOL] = "ethanol",
148 	[IIO_MOD_H2] = "h2",
149 	[IIO_MOD_O2] = "o2",
150 	[IIO_MOD_LINEAR_X] = "linear_x",
151 	[IIO_MOD_LINEAR_Y] = "linear_y",
152 	[IIO_MOD_LINEAR_Z] = "linear_z",
153 	[IIO_MOD_PITCH] = "pitch",
154 	[IIO_MOD_YAW] = "yaw",
155 	[IIO_MOD_ROLL] = "roll",
156 	[IIO_MOD_RMS] = "rms",
157 	[IIO_MOD_ACTIVE] = "active",
158 	[IIO_MOD_REACTIVE] = "reactive",
159 	[IIO_MOD_APPARENT] = "apparent",
160 };
161 
162 /* relies on pairs of these shared then separate */
163 static const char * const iio_chan_info_postfix[] = {
164 	[IIO_CHAN_INFO_RAW] = "raw",
165 	[IIO_CHAN_INFO_PROCESSED] = "input",
166 	[IIO_CHAN_INFO_SCALE] = "scale",
167 	[IIO_CHAN_INFO_OFFSET] = "offset",
168 	[IIO_CHAN_INFO_CALIBSCALE] = "calibscale",
169 	[IIO_CHAN_INFO_CALIBBIAS] = "calibbias",
170 	[IIO_CHAN_INFO_PEAK] = "peak_raw",
171 	[IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale",
172 	[IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw",
173 	[IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw",
174 	[IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY]
175 	= "filter_low_pass_3db_frequency",
176 	[IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY]
177 	= "filter_high_pass_3db_frequency",
178 	[IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency",
179 	[IIO_CHAN_INFO_FREQUENCY] = "frequency",
180 	[IIO_CHAN_INFO_PHASE] = "phase",
181 	[IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain",
182 	[IIO_CHAN_INFO_HYSTERESIS] = "hysteresis",
183 	[IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative",
184 	[IIO_CHAN_INFO_INT_TIME] = "integration_time",
185 	[IIO_CHAN_INFO_ENABLE] = "en",
186 	[IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight",
187 	[IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight",
188 	[IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count",
189 	[IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time",
190 	[IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity",
191 	[IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio",
192 	[IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type",
193 	[IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient",
194 	[IIO_CHAN_INFO_ZEROPOINT] = "zeropoint",
195 	[IIO_CHAN_INFO_TROUGH] = "trough_raw",
196 	[IIO_CHAN_INFO_CONVDELAY] = "convdelay",
197 	[IIO_CHAN_INFO_POWERFACTOR] = "powerfactor",
198 };
199 /**
200  * iio_device_id() - query the unique ID for the device
201  * @indio_dev:		Device structure whose ID is being queried
202  *
203  * The IIO device ID is a unique index used for example for the naming
204  * of the character device /dev/iio\:device[ID].
205  *
206  * Returns: Unique ID for the device.
207  */
iio_device_id(struct iio_dev * indio_dev)208 int iio_device_id(struct iio_dev *indio_dev)
209 {
210 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
211 
212 	return iio_dev_opaque->id;
213 }
214 EXPORT_SYMBOL_GPL(iio_device_id);
215 
216 /**
217  * iio_buffer_enabled() - helper function to test if the buffer is enabled
218  * @indio_dev:		IIO device structure for device
219  *
220  * Returns: True, if the buffer is enabled.
221  */
iio_buffer_enabled(struct iio_dev * indio_dev)222 bool iio_buffer_enabled(struct iio_dev *indio_dev)
223 {
224 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
225 
226 	return iio_dev_opaque->currentmode & INDIO_ALL_BUFFER_MODES;
227 }
228 EXPORT_SYMBOL_GPL(iio_buffer_enabled);
229 
230 #if defined(CONFIG_DEBUG_FS)
231 /*
232  * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for
233  * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined
234  */
iio_get_debugfs_dentry(struct iio_dev * indio_dev)235 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev)
236 {
237 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
238 
239 	return iio_dev_opaque->debugfs_dentry;
240 }
241 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry);
242 #endif
243 
244 /**
245  * iio_find_channel_from_si() - get channel from its scan index
246  * @indio_dev:		device
247  * @si:			scan index to match
248  *
249  * Returns:
250  * Constant pointer to iio_chan_spec, if scan index matches, NULL on failure.
251  */
252 const struct iio_chan_spec
iio_find_channel_from_si(struct iio_dev * indio_dev,int si)253 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si)
254 {
255 	int i;
256 
257 	for (i = 0; i < indio_dev->num_channels; i++)
258 		if (indio_dev->channels[i].scan_index == si)
259 			return &indio_dev->channels[i];
260 	return NULL;
261 }
262 
263 /* This turns up an awful lot */
iio_read_const_attr(struct device * dev,struct device_attribute * attr,char * buf)264 ssize_t iio_read_const_attr(struct device *dev,
265 			    struct device_attribute *attr,
266 			    char *buf)
267 {
268 	return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string);
269 }
270 EXPORT_SYMBOL(iio_read_const_attr);
271 
272 /**
273  * iio_device_set_clock() - Set current timestamping clock for the device
274  * @indio_dev: IIO device structure containing the device
275  * @clock_id: timestamping clock POSIX identifier to set.
276  *
277  * Returns: 0 on success, or a negative error code.
278  */
iio_device_set_clock(struct iio_dev * indio_dev,clockid_t clock_id)279 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id)
280 {
281 	int ret;
282 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
283 	const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface;
284 
285 	ret = mutex_lock_interruptible(&iio_dev_opaque->mlock);
286 	if (ret)
287 		return ret;
288 	if ((ev_int && iio_event_enabled(ev_int)) ||
289 	    iio_buffer_enabled(indio_dev)) {
290 		mutex_unlock(&iio_dev_opaque->mlock);
291 		return -EBUSY;
292 	}
293 	iio_dev_opaque->clock_id = clock_id;
294 	mutex_unlock(&iio_dev_opaque->mlock);
295 
296 	return 0;
297 }
298 EXPORT_SYMBOL(iio_device_set_clock);
299 
300 /**
301  * iio_device_get_clock() - Retrieve current timestamping clock for the device
302  * @indio_dev: IIO device structure containing the device
303  *
304  * Returns: Clock ID of the current timestamping clock for the device.
305  */
iio_device_get_clock(const struct iio_dev * indio_dev)306 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev)
307 {
308 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
309 
310 	return iio_dev_opaque->clock_id;
311 }
312 EXPORT_SYMBOL(iio_device_get_clock);
313 
314 /**
315  * iio_get_time_ns() - utility function to get a time stamp for events etc
316  * @indio_dev: device
317  *
318  * Returns: Timestamp of the event in nanoseconds.
319  */
iio_get_time_ns(const struct iio_dev * indio_dev)320 s64 iio_get_time_ns(const struct iio_dev *indio_dev)
321 {
322 	struct timespec64 tp;
323 
324 	switch (iio_device_get_clock(indio_dev)) {
325 	case CLOCK_REALTIME:
326 		return ktime_get_real_ns();
327 	case CLOCK_MONOTONIC:
328 		return ktime_get_ns();
329 	case CLOCK_MONOTONIC_RAW:
330 		return ktime_get_raw_ns();
331 	case CLOCK_REALTIME_COARSE:
332 		return ktime_to_ns(ktime_get_coarse_real());
333 	case CLOCK_MONOTONIC_COARSE:
334 		ktime_get_coarse_ts64(&tp);
335 		return timespec64_to_ns(&tp);
336 	case CLOCK_BOOTTIME:
337 		return ktime_get_boottime_ns();
338 	case CLOCK_TAI:
339 		return ktime_get_clocktai_ns();
340 	default:
341 		BUG();
342 	}
343 }
344 EXPORT_SYMBOL(iio_get_time_ns);
345 
iio_init(void)346 static int __init iio_init(void)
347 {
348 	int ret;
349 
350 	/* Register sysfs bus */
351 	ret  = bus_register(&iio_bus_type);
352 	if (ret < 0) {
353 		pr_err("could not register bus type\n");
354 		goto error_nothing;
355 	}
356 
357 	ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio");
358 	if (ret < 0) {
359 		pr_err("failed to allocate char dev region\n");
360 		goto error_unregister_bus_type;
361 	}
362 
363 	iio_debugfs_dentry = debugfs_create_dir("iio", NULL);
364 
365 	return 0;
366 
367 error_unregister_bus_type:
368 	bus_unregister(&iio_bus_type);
369 error_nothing:
370 	return ret;
371 }
372 
iio_exit(void)373 static void __exit iio_exit(void)
374 {
375 	if (iio_devt)
376 		unregister_chrdev_region(iio_devt, IIO_DEV_MAX);
377 	bus_unregister(&iio_bus_type);
378 	debugfs_remove(iio_debugfs_dentry);
379 }
380 
381 #if defined(CONFIG_DEBUG_FS)
iio_debugfs_read_reg(struct file * file,char __user * userbuf,size_t count,loff_t * ppos)382 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf,
383 			      size_t count, loff_t *ppos)
384 {
385 	struct iio_dev *indio_dev = file->private_data;
386 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
387 	unsigned int val = 0;
388 	int ret;
389 
390 	if (*ppos > 0)
391 		return simple_read_from_buffer(userbuf, count, ppos,
392 					       iio_dev_opaque->read_buf,
393 					       iio_dev_opaque->read_buf_len);
394 
395 	ret = indio_dev->info->debugfs_reg_access(indio_dev,
396 						  iio_dev_opaque->cached_reg_addr,
397 						  0, &val);
398 	if (ret) {
399 		dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__);
400 		return ret;
401 	}
402 
403 	iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf,
404 						sizeof(iio_dev_opaque->read_buf),
405 						"0x%X\n", val);
406 
407 	return simple_read_from_buffer(userbuf, count, ppos,
408 				       iio_dev_opaque->read_buf,
409 				       iio_dev_opaque->read_buf_len);
410 }
411 
iio_debugfs_write_reg(struct file * file,const char __user * userbuf,size_t count,loff_t * ppos)412 static ssize_t iio_debugfs_write_reg(struct file *file,
413 		     const char __user *userbuf, size_t count, loff_t *ppos)
414 {
415 	struct iio_dev *indio_dev = file->private_data;
416 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
417 	unsigned int reg, val;
418 	char buf[80];
419 	int ret;
420 
421 	if (count >= sizeof(buf))
422 		return -EINVAL;
423 
424 	ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, userbuf,
425 				     count);
426 	if (ret < 0)
427 		return ret;
428 
429 	buf[ret] = '\0';
430 
431 	ret = sscanf(buf, "%i %i", &reg, &val);
432 
433 	switch (ret) {
434 	case 1:
435 		iio_dev_opaque->cached_reg_addr = reg;
436 		break;
437 	case 2:
438 		iio_dev_opaque->cached_reg_addr = reg;
439 		ret = indio_dev->info->debugfs_reg_access(indio_dev, reg,
440 							  val, NULL);
441 		if (ret) {
442 			dev_err(indio_dev->dev.parent, "%s: write failed\n",
443 				__func__);
444 			return ret;
445 		}
446 		break;
447 	default:
448 		return -EINVAL;
449 	}
450 
451 	return count;
452 }
453 
454 static const struct file_operations iio_debugfs_reg_fops = {
455 	.open = simple_open,
456 	.read = iio_debugfs_read_reg,
457 	.write = iio_debugfs_write_reg,
458 };
459 
iio_device_unregister_debugfs(struct iio_dev * indio_dev)460 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
461 {
462 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
463 
464 	debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry);
465 }
466 
iio_device_register_debugfs(struct iio_dev * indio_dev)467 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
468 {
469 	struct iio_dev_opaque *iio_dev_opaque;
470 
471 	if (indio_dev->info->debugfs_reg_access == NULL)
472 		return;
473 
474 	if (!iio_debugfs_dentry)
475 		return;
476 
477 	iio_dev_opaque = to_iio_dev_opaque(indio_dev);
478 
479 	iio_dev_opaque->debugfs_dentry =
480 		debugfs_create_dir(dev_name(&indio_dev->dev),
481 				   iio_debugfs_dentry);
482 
483 	debugfs_create_file("direct_reg_access", 0644,
484 			    iio_dev_opaque->debugfs_dentry, indio_dev,
485 			    &iio_debugfs_reg_fops);
486 }
487 #else
iio_device_register_debugfs(struct iio_dev * indio_dev)488 static void iio_device_register_debugfs(struct iio_dev *indio_dev)
489 {
490 }
491 
iio_device_unregister_debugfs(struct iio_dev * indio_dev)492 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev)
493 {
494 }
495 #endif /* CONFIG_DEBUG_FS */
496 
iio_read_channel_ext_info(struct device * dev,struct device_attribute * attr,char * buf)497 static ssize_t iio_read_channel_ext_info(struct device *dev,
498 				     struct device_attribute *attr,
499 				     char *buf)
500 {
501 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
502 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
503 	const struct iio_chan_spec_ext_info *ext_info;
504 
505 	ext_info = &this_attr->c->ext_info[this_attr->address];
506 
507 	return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf);
508 }
509 
iio_write_channel_ext_info(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)510 static ssize_t iio_write_channel_ext_info(struct device *dev,
511 				     struct device_attribute *attr,
512 				     const char *buf, size_t len)
513 {
514 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
515 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
516 	const struct iio_chan_spec_ext_info *ext_info;
517 
518 	ext_info = &this_attr->c->ext_info[this_attr->address];
519 
520 	return ext_info->write(indio_dev, ext_info->private,
521 			       this_attr->c, buf, len);
522 }
523 
iio_enum_available_read(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,char * buf)524 ssize_t iio_enum_available_read(struct iio_dev *indio_dev,
525 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
526 {
527 	const struct iio_enum *e = (const struct iio_enum *)priv;
528 	unsigned int i;
529 	size_t len = 0;
530 
531 	if (!e->num_items)
532 		return 0;
533 
534 	for (i = 0; i < e->num_items; ++i) {
535 		if (!e->items[i])
536 			continue;
537 		len += sysfs_emit_at(buf, len, "%s ", e->items[i]);
538 	}
539 
540 	/* replace last space with a newline */
541 	buf[len - 1] = '\n';
542 
543 	return len;
544 }
545 EXPORT_SYMBOL_GPL(iio_enum_available_read);
546 
iio_enum_read(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,char * buf)547 ssize_t iio_enum_read(struct iio_dev *indio_dev,
548 	uintptr_t priv, const struct iio_chan_spec *chan, char *buf)
549 {
550 	const struct iio_enum *e = (const struct iio_enum *)priv;
551 	int i;
552 
553 	if (!e->get)
554 		return -EINVAL;
555 
556 	i = e->get(indio_dev, chan);
557 	if (i < 0)
558 		return i;
559 	if (i >= e->num_items || !e->items[i])
560 		return -EINVAL;
561 
562 	return sysfs_emit(buf, "%s\n", e->items[i]);
563 }
564 EXPORT_SYMBOL_GPL(iio_enum_read);
565 
iio_enum_write(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,const char * buf,size_t len)566 ssize_t iio_enum_write(struct iio_dev *indio_dev,
567 	uintptr_t priv, const struct iio_chan_spec *chan, const char *buf,
568 	size_t len)
569 {
570 	const struct iio_enum *e = (const struct iio_enum *)priv;
571 	int ret;
572 
573 	if (!e->set)
574 		return -EINVAL;
575 
576 	ret = __sysfs_match_string(e->items, e->num_items, buf);
577 	if (ret < 0)
578 		return ret;
579 
580 	ret = e->set(indio_dev, chan, ret);
581 	return ret ? ret : len;
582 }
583 EXPORT_SYMBOL_GPL(iio_enum_write);
584 
585 static const struct iio_mount_matrix iio_mount_idmatrix = {
586 	.rotation = {
587 		"1", "0", "0",
588 		"0", "1", "0",
589 		"0", "0", "1"
590 	}
591 };
592 
iio_setup_mount_idmatrix(const struct device * dev,struct iio_mount_matrix * matrix)593 static int iio_setup_mount_idmatrix(const struct device *dev,
594 				    struct iio_mount_matrix *matrix)
595 {
596 	*matrix = iio_mount_idmatrix;
597 	dev_info(dev, "mounting matrix not found: using identity...\n");
598 	return 0;
599 }
600 
iio_show_mount_matrix(struct iio_dev * indio_dev,uintptr_t priv,const struct iio_chan_spec * chan,char * buf)601 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
602 			      const struct iio_chan_spec *chan, char *buf)
603 {
604 	const struct iio_mount_matrix *mtx;
605 
606 	mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan);
607 	if (IS_ERR(mtx))
608 		return PTR_ERR(mtx);
609 
610 	if (!mtx)
611 		mtx = &iio_mount_idmatrix;
612 
613 	return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
614 			  mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
615 			  mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
616 			  mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
617 }
618 EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
619 
620 /**
621  * iio_read_mount_matrix() - retrieve iio device mounting matrix from
622  *                           device "mount-matrix" property
623  * @dev:	device the mounting matrix property is assigned to
624  * @matrix:	where to store retrieved matrix
625  *
626  * If device is assigned no mounting matrix property, a default 3x3 identity
627  * matrix will be filled in.
628  *
629  * Returns: 0 if success, or a negative error code on failure.
630  */
iio_read_mount_matrix(struct device * dev,struct iio_mount_matrix * matrix)631 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix)
632 {
633 	size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation);
634 	int err;
635 
636 	err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len);
637 	if (err == len)
638 		return 0;
639 
640 	if (err >= 0)
641 		/* Invalid number of matrix entries. */
642 		return -EINVAL;
643 
644 	if (err != -EINVAL)
645 		/* Invalid matrix declaration format. */
646 		return err;
647 
648 	/* Matrix was not declared at all: fallback to identity. */
649 	return iio_setup_mount_idmatrix(dev, matrix);
650 }
651 EXPORT_SYMBOL(iio_read_mount_matrix);
652 
__iio_format_value(char * buf,size_t offset,unsigned int type,int size,const int * vals)653 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type,
654 				  int size, const int *vals)
655 {
656 	int tmp0, tmp1;
657 	s64 tmp2;
658 	bool scale_db = false;
659 
660 	switch (type) {
661 	case IIO_VAL_INT:
662 		return sysfs_emit_at(buf, offset, "%d", vals[0]);
663 	case IIO_VAL_INT_PLUS_MICRO_DB:
664 		scale_db = true;
665 		fallthrough;
666 	case IIO_VAL_INT_PLUS_MICRO:
667 		if (vals[1] < 0)
668 			return sysfs_emit_at(buf, offset, "-%d.%06u%s",
669 					     abs(vals[0]), -vals[1],
670 					     scale_db ? " dB" : "");
671 		else
672 			return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0],
673 					     vals[1], scale_db ? " dB" : "");
674 	case IIO_VAL_INT_PLUS_NANO:
675 		if (vals[1] < 0)
676 			return sysfs_emit_at(buf, offset, "-%d.%09u",
677 					     abs(vals[0]), -vals[1]);
678 		else
679 			return sysfs_emit_at(buf, offset, "%d.%09u", vals[0],
680 					     vals[1]);
681 	case IIO_VAL_FRACTIONAL:
682 		tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]);
683 		tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1);
684 		if ((tmp2 < 0) && (tmp0 == 0))
685 			return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
686 		else
687 			return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
688 					     abs(tmp1));
689 	case IIO_VAL_FRACTIONAL_LOG2:
690 		tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]);
691 		tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1);
692 		if (tmp0 == 0 && tmp2 < 0)
693 			return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1));
694 		else
695 			return sysfs_emit_at(buf, offset, "%d.%09u", tmp0,
696 					     abs(tmp1));
697 	case IIO_VAL_INT_MULTIPLE:
698 	{
699 		int i;
700 		int l = 0;
701 
702 		for (i = 0; i < size; ++i)
703 			l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]);
704 		return l;
705 	}
706 	case IIO_VAL_CHAR:
707 		return sysfs_emit_at(buf, offset, "%c", (char)vals[0]);
708 	case IIO_VAL_INT_64:
709 		tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]);
710 		return sysfs_emit_at(buf, offset, "%lld", tmp2);
711 	default:
712 		return 0;
713 	}
714 }
715 
716 /**
717  * iio_format_value() - Formats a IIO value into its string representation
718  * @buf:	The buffer to which the formatted value gets written
719  *		which is assumed to be big enough (i.e. PAGE_SIZE).
720  * @type:	One of the IIO_VAL_* constants. This decides how the val
721  *		and val2 parameters are formatted.
722  * @size:	Number of IIO value entries contained in vals
723  * @vals:	Pointer to the values, exact meaning depends on the
724  *		type parameter.
725  *
726  * Returns:
727  * 0 by default, a negative number on failure or the total number of characters
728  * written for a type that belongs to the IIO_VAL_* constant.
729  */
iio_format_value(char * buf,unsigned int type,int size,int * vals)730 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals)
731 {
732 	ssize_t len;
733 
734 	len = __iio_format_value(buf, 0, type, size, vals);
735 	if (len >= PAGE_SIZE - 1)
736 		return -EFBIG;
737 
738 	return len + sysfs_emit_at(buf, len, "\n");
739 }
740 EXPORT_SYMBOL_GPL(iio_format_value);
741 
do_iio_read_channel_label(struct iio_dev * indio_dev,const struct iio_chan_spec * c,char * buf)742 ssize_t do_iio_read_channel_label(struct iio_dev *indio_dev,
743 				  const struct iio_chan_spec *c,
744 				  char *buf)
745 {
746 	if (indio_dev->info->read_label)
747 		return indio_dev->info->read_label(indio_dev, c, buf);
748 
749 	if (c->extend_name)
750 		return sysfs_emit(buf, "%s\n", c->extend_name);
751 
752 	return -EINVAL;
753 }
754 
iio_read_channel_label(struct device * dev,struct device_attribute * attr,char * buf)755 static ssize_t iio_read_channel_label(struct device *dev,
756 				      struct device_attribute *attr,
757 				      char *buf)
758 {
759 	return do_iio_read_channel_label(dev_to_iio_dev(dev),
760 					 to_iio_dev_attr(attr)->c, buf);
761 }
762 
iio_read_channel_info(struct device * dev,struct device_attribute * attr,char * buf)763 static ssize_t iio_read_channel_info(struct device *dev,
764 				     struct device_attribute *attr,
765 				     char *buf)
766 {
767 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
768 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
769 	int vals[INDIO_MAX_RAW_ELEMENTS];
770 	int ret;
771 	int val_len = 2;
772 
773 	if (indio_dev->info->read_raw_multi)
774 		ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c,
775 							INDIO_MAX_RAW_ELEMENTS,
776 							vals, &val_len,
777 							this_attr->address);
778 	else if (indio_dev->info->read_raw)
779 		ret = indio_dev->info->read_raw(indio_dev, this_attr->c,
780 				    &vals[0], &vals[1], this_attr->address);
781 	else
782 		return -EINVAL;
783 
784 	if (ret < 0)
785 		return ret;
786 
787 	return iio_format_value(buf, ret, val_len, vals);
788 }
789 
iio_format_list(char * buf,const int * vals,int type,int length,const char * prefix,const char * suffix)790 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length,
791 			       const char *prefix, const char *suffix)
792 {
793 	ssize_t len;
794 	int stride;
795 	int i;
796 
797 	switch (type) {
798 	case IIO_VAL_INT:
799 	case IIO_VAL_CHAR:
800 		stride = 1;
801 		break;
802 	default:
803 		stride = 2;
804 		break;
805 	}
806 
807 	len = sysfs_emit(buf, prefix);
808 
809 	for (i = 0; i <= length - stride; i += stride) {
810 		if (i != 0) {
811 			len += sysfs_emit_at(buf, len, " ");
812 			if (len >= PAGE_SIZE)
813 				return -EFBIG;
814 		}
815 
816 		len += __iio_format_value(buf, len, type, stride, &vals[i]);
817 		if (len >= PAGE_SIZE)
818 			return -EFBIG;
819 	}
820 
821 	len += sysfs_emit_at(buf, len, "%s\n", suffix);
822 
823 	return len;
824 }
825 
iio_format_avail_list(char * buf,const int * vals,int type,int length)826 static ssize_t iio_format_avail_list(char *buf, const int *vals,
827 				     int type, int length)
828 {
829 
830 	return iio_format_list(buf, vals, type, length, "", "");
831 }
832 
iio_format_avail_range(char * buf,const int * vals,int type)833 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type)
834 {
835 	int length;
836 
837 	/*
838 	 * length refers to the array size , not the number of elements.
839 	 * The purpose is to print the range [min , step ,max] so length should
840 	 * be 3 in case of int, and 6 for other types.
841 	 */
842 	switch (type) {
843 	case IIO_VAL_INT:
844 		length = 3;
845 		break;
846 	default:
847 		length = 6;
848 		break;
849 	}
850 
851 	return iio_format_list(buf, vals, type, length, "[", "]");
852 }
853 
iio_read_channel_info_avail(struct device * dev,struct device_attribute * attr,char * buf)854 static ssize_t iio_read_channel_info_avail(struct device *dev,
855 					   struct device_attribute *attr,
856 					   char *buf)
857 {
858 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
859 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
860 	const int *vals;
861 	int ret;
862 	int length;
863 	int type;
864 
865 	if (!indio_dev->info->read_avail)
866 		return -EINVAL;
867 
868 	ret = indio_dev->info->read_avail(indio_dev, this_attr->c,
869 					  &vals, &type, &length,
870 					  this_attr->address);
871 
872 	if (ret < 0)
873 		return ret;
874 	switch (ret) {
875 	case IIO_AVAIL_LIST:
876 		return iio_format_avail_list(buf, vals, type, length);
877 	case IIO_AVAIL_RANGE:
878 		return iio_format_avail_range(buf, vals, type);
879 	default:
880 		return -EINVAL;
881 	}
882 }
883 
884 /**
885  * __iio_str_to_fixpoint() - Parse a fixed-point number from a string
886  * @str: The string to parse
887  * @fract_mult: Multiplier for the first decimal place, should be a power of 10
888  * @integer: The integer part of the number
889  * @fract: The fractional part of the number
890  * @scale_db: True if this should parse as dB
891  *
892  * Returns:
893  * 0 on success, or a negative error code if the string could not be parsed.
894  */
__iio_str_to_fixpoint(const char * str,int fract_mult,int * integer,int * fract,bool scale_db)895 static int __iio_str_to_fixpoint(const char *str, int fract_mult,
896 				 int *integer, int *fract, bool scale_db)
897 {
898 	int i = 0, f = 0;
899 	bool integer_part = true, negative = false;
900 
901 	if (fract_mult == 0) {
902 		*fract = 0;
903 
904 		return kstrtoint(str, 0, integer);
905 	}
906 
907 	if (str[0] == '-') {
908 		negative = true;
909 		str++;
910 	} else if (str[0] == '+') {
911 		str++;
912 	}
913 
914 	while (*str) {
915 		if ('0' <= *str && *str <= '9') {
916 			if (integer_part) {
917 				i = i * 10 + *str - '0';
918 			} else {
919 				f += fract_mult * (*str - '0');
920 				fract_mult /= 10;
921 			}
922 		} else if (*str == '\n') {
923 			if (*(str + 1) == '\0')
924 				break;
925 			return -EINVAL;
926 		} else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) {
927 			/* Ignore the dB suffix */
928 			str += sizeof(" dB") - 1;
929 			continue;
930 		} else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) {
931 			/* Ignore the dB suffix */
932 			str += sizeof("dB") - 1;
933 			continue;
934 		} else if (*str == '.' && integer_part) {
935 			integer_part = false;
936 		} else {
937 			return -EINVAL;
938 		}
939 		str++;
940 	}
941 
942 	if (negative) {
943 		if (i)
944 			i = -i;
945 		else
946 			f = -f;
947 	}
948 
949 	*integer = i;
950 	*fract = f;
951 
952 	return 0;
953 }
954 
955 /**
956  * iio_str_to_fixpoint() - Parse a fixed-point number from a string
957  * @str: The string to parse
958  * @fract_mult: Multiplier for the first decimal place, should be a power of 10
959  * @integer: The integer part of the number
960  * @fract: The fractional part of the number
961  *
962  * Returns:
963  * 0 on success, or a negative error code if the string could not be parsed.
964  */
iio_str_to_fixpoint(const char * str,int fract_mult,int * integer,int * fract)965 int iio_str_to_fixpoint(const char *str, int fract_mult,
966 			int *integer, int *fract)
967 {
968 	return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false);
969 }
970 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint);
971 
iio_write_channel_info(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)972 static ssize_t iio_write_channel_info(struct device *dev,
973 				      struct device_attribute *attr,
974 				      const char *buf,
975 				      size_t len)
976 {
977 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
978 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
979 	int ret, fract_mult = 100000;
980 	int integer, fract = 0;
981 	long long integer64;
982 	bool is_char = false;
983 	bool scale_db = false;
984 	bool is_64bit = false;
985 
986 	/* Assumes decimal - precision based on number of digits */
987 	if (!indio_dev->info->write_raw)
988 		return -EINVAL;
989 
990 	if (indio_dev->info->write_raw_get_fmt)
991 		switch (indio_dev->info->write_raw_get_fmt(indio_dev,
992 			this_attr->c, this_attr->address)) {
993 		case IIO_VAL_INT:
994 			fract_mult = 0;
995 			break;
996 		case IIO_VAL_INT_PLUS_MICRO_DB:
997 			scale_db = true;
998 			fallthrough;
999 		case IIO_VAL_INT_PLUS_MICRO:
1000 			fract_mult = 100000;
1001 			break;
1002 		case IIO_VAL_INT_PLUS_NANO:
1003 			fract_mult = 100000000;
1004 			break;
1005 		case IIO_VAL_CHAR:
1006 			is_char = true;
1007 			break;
1008 		case IIO_VAL_INT_64:
1009 			is_64bit = true;
1010 			break;
1011 		default:
1012 			return -EINVAL;
1013 		}
1014 
1015 	if (is_char) {
1016 		char ch;
1017 
1018 		if (sscanf(buf, "%c", &ch) != 1)
1019 			return -EINVAL;
1020 		integer = ch;
1021 	} else if (is_64bit) {
1022 		ret = kstrtoll(buf, 0, &integer64);
1023 		if (ret)
1024 			return ret;
1025 
1026 		fract = upper_32_bits(integer64);
1027 		integer = lower_32_bits(integer64);
1028 	} else {
1029 		ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract,
1030 					    scale_db);
1031 		if (ret)
1032 			return ret;
1033 	}
1034 
1035 	ret = indio_dev->info->write_raw(indio_dev, this_attr->c,
1036 					 integer, fract, this_attr->address);
1037 	if (ret)
1038 		return ret;
1039 
1040 	return len;
1041 }
1042 
1043 static
__iio_device_attr_init(struct device_attribute * dev_attr,const char * postfix,struct iio_chan_spec const * chan,ssize_t (* readfunc)(struct device * dev,struct device_attribute * attr,char * buf),ssize_t (* writefunc)(struct device * dev,struct device_attribute * attr,const char * buf,size_t len),enum iio_shared_by shared_by)1044 int __iio_device_attr_init(struct device_attribute *dev_attr,
1045 			   const char *postfix,
1046 			   struct iio_chan_spec const *chan,
1047 			   ssize_t (*readfunc)(struct device *dev,
1048 					       struct device_attribute *attr,
1049 					       char *buf),
1050 			   ssize_t (*writefunc)(struct device *dev,
1051 						struct device_attribute *attr,
1052 						const char *buf,
1053 						size_t len),
1054 			   enum iio_shared_by shared_by)
1055 {
1056 	int ret = 0;
1057 	char *name = NULL;
1058 	char *full_postfix;
1059 
1060 	sysfs_attr_init(&dev_attr->attr);
1061 
1062 	/* Build up postfix of <extend_name>_<modifier>_postfix */
1063 	if (chan->modified && (shared_by == IIO_SEPARATE)) {
1064 		if (chan->extend_name)
1065 			full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s",
1066 						 iio_modifier_names[chan->channel2],
1067 						 chan->extend_name,
1068 						 postfix);
1069 		else
1070 			full_postfix = kasprintf(GFP_KERNEL, "%s_%s",
1071 						 iio_modifier_names[chan->channel2],
1072 						 postfix);
1073 	} else {
1074 		if (chan->extend_name == NULL || shared_by != IIO_SEPARATE)
1075 			full_postfix = kstrdup(postfix, GFP_KERNEL);
1076 		else
1077 			full_postfix = kasprintf(GFP_KERNEL,
1078 						 "%s_%s",
1079 						 chan->extend_name,
1080 						 postfix);
1081 	}
1082 	if (full_postfix == NULL)
1083 		return -ENOMEM;
1084 
1085 	if (chan->differential) { /* Differential can not have modifier */
1086 		switch (shared_by) {
1087 		case IIO_SHARED_BY_ALL:
1088 			name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1089 			break;
1090 		case IIO_SHARED_BY_DIR:
1091 			name = kasprintf(GFP_KERNEL, "%s_%s",
1092 						iio_direction[chan->output],
1093 						full_postfix);
1094 			break;
1095 		case IIO_SHARED_BY_TYPE:
1096 			name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s",
1097 					    iio_direction[chan->output],
1098 					    iio_chan_type_name_spec[chan->type],
1099 					    iio_chan_type_name_spec[chan->type],
1100 					    full_postfix);
1101 			break;
1102 		case IIO_SEPARATE:
1103 			if (!chan->indexed) {
1104 				WARN(1, "Differential channels must be indexed\n");
1105 				ret = -EINVAL;
1106 				goto error_free_full_postfix;
1107 			}
1108 			name = kasprintf(GFP_KERNEL,
1109 					    "%s_%s%d-%s%d_%s",
1110 					    iio_direction[chan->output],
1111 					    iio_chan_type_name_spec[chan->type],
1112 					    chan->channel,
1113 					    iio_chan_type_name_spec[chan->type],
1114 					    chan->channel2,
1115 					    full_postfix);
1116 			break;
1117 		}
1118 	} else { /* Single ended */
1119 		switch (shared_by) {
1120 		case IIO_SHARED_BY_ALL:
1121 			name = kasprintf(GFP_KERNEL, "%s", full_postfix);
1122 			break;
1123 		case IIO_SHARED_BY_DIR:
1124 			name = kasprintf(GFP_KERNEL, "%s_%s",
1125 						iio_direction[chan->output],
1126 						full_postfix);
1127 			break;
1128 		case IIO_SHARED_BY_TYPE:
1129 			name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1130 					    iio_direction[chan->output],
1131 					    iio_chan_type_name_spec[chan->type],
1132 					    full_postfix);
1133 			break;
1134 
1135 		case IIO_SEPARATE:
1136 			if (chan->indexed)
1137 				name = kasprintf(GFP_KERNEL, "%s_%s%d_%s",
1138 						    iio_direction[chan->output],
1139 						    iio_chan_type_name_spec[chan->type],
1140 						    chan->channel,
1141 						    full_postfix);
1142 			else
1143 				name = kasprintf(GFP_KERNEL, "%s_%s_%s",
1144 						    iio_direction[chan->output],
1145 						    iio_chan_type_name_spec[chan->type],
1146 						    full_postfix);
1147 			break;
1148 		}
1149 	}
1150 	if (name == NULL) {
1151 		ret = -ENOMEM;
1152 		goto error_free_full_postfix;
1153 	}
1154 	dev_attr->attr.name = name;
1155 
1156 	if (readfunc) {
1157 		dev_attr->attr.mode |= 0444;
1158 		dev_attr->show = readfunc;
1159 	}
1160 
1161 	if (writefunc) {
1162 		dev_attr->attr.mode |= 0200;
1163 		dev_attr->store = writefunc;
1164 	}
1165 
1166 error_free_full_postfix:
1167 	kfree(full_postfix);
1168 
1169 	return ret;
1170 }
1171 
__iio_device_attr_deinit(struct device_attribute * dev_attr)1172 static void __iio_device_attr_deinit(struct device_attribute *dev_attr)
1173 {
1174 	kfree(dev_attr->attr.name);
1175 }
1176 
__iio_add_chan_devattr(const char * postfix,struct iio_chan_spec const * chan,ssize_t (* readfunc)(struct device * dev,struct device_attribute * attr,char * buf),ssize_t (* writefunc)(struct device * dev,struct device_attribute * attr,const char * buf,size_t len),u64 mask,enum iio_shared_by shared_by,struct device * dev,struct iio_buffer * buffer,struct list_head * attr_list)1177 int __iio_add_chan_devattr(const char *postfix,
1178 			   struct iio_chan_spec const *chan,
1179 			   ssize_t (*readfunc)(struct device *dev,
1180 					       struct device_attribute *attr,
1181 					       char *buf),
1182 			   ssize_t (*writefunc)(struct device *dev,
1183 						struct device_attribute *attr,
1184 						const char *buf,
1185 						size_t len),
1186 			   u64 mask,
1187 			   enum iio_shared_by shared_by,
1188 			   struct device *dev,
1189 			   struct iio_buffer *buffer,
1190 			   struct list_head *attr_list)
1191 {
1192 	int ret;
1193 	struct iio_dev_attr *iio_attr, *t;
1194 
1195 	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1196 	if (iio_attr == NULL)
1197 		return -ENOMEM;
1198 	ret = __iio_device_attr_init(&iio_attr->dev_attr,
1199 				     postfix, chan,
1200 				     readfunc, writefunc, shared_by);
1201 	if (ret)
1202 		goto error_iio_dev_attr_free;
1203 	iio_attr->c = chan;
1204 	iio_attr->address = mask;
1205 	iio_attr->buffer = buffer;
1206 	list_for_each_entry(t, attr_list, l)
1207 		if (strcmp(t->dev_attr.attr.name,
1208 			   iio_attr->dev_attr.attr.name) == 0) {
1209 			if (shared_by == IIO_SEPARATE)
1210 				dev_err(dev, "tried to double register : %s\n",
1211 					t->dev_attr.attr.name);
1212 			ret = -EBUSY;
1213 			goto error_device_attr_deinit;
1214 		}
1215 	list_add(&iio_attr->l, attr_list);
1216 
1217 	return 0;
1218 
1219 error_device_attr_deinit:
1220 	__iio_device_attr_deinit(&iio_attr->dev_attr);
1221 error_iio_dev_attr_free:
1222 	kfree(iio_attr);
1223 	return ret;
1224 }
1225 
iio_device_add_channel_label(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)1226 static int iio_device_add_channel_label(struct iio_dev *indio_dev,
1227 					 struct iio_chan_spec const *chan)
1228 {
1229 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1230 	int ret;
1231 
1232 	if (!indio_dev->info->read_label && !chan->extend_name)
1233 		return 0;
1234 
1235 	ret = __iio_add_chan_devattr("label",
1236 				     chan,
1237 				     &iio_read_channel_label,
1238 				     NULL,
1239 				     0,
1240 				     IIO_SEPARATE,
1241 				     &indio_dev->dev,
1242 				     NULL,
1243 				     &iio_dev_opaque->channel_attr_list);
1244 	if (ret < 0)
1245 		return ret;
1246 
1247 	return 1;
1248 }
1249 
iio_device_add_info_mask_type(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,enum iio_shared_by shared_by,const unsigned long * infomask)1250 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev,
1251 					 struct iio_chan_spec const *chan,
1252 					 enum iio_shared_by shared_by,
1253 					 const unsigned long *infomask)
1254 {
1255 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1256 	int i, ret, attrcount = 0;
1257 
1258 	for_each_set_bit(i, infomask, sizeof(*infomask)*8) {
1259 		if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1260 			return -EINVAL;
1261 		ret = __iio_add_chan_devattr(iio_chan_info_postfix[i],
1262 					     chan,
1263 					     &iio_read_channel_info,
1264 					     &iio_write_channel_info,
1265 					     i,
1266 					     shared_by,
1267 					     &indio_dev->dev,
1268 					     NULL,
1269 					     &iio_dev_opaque->channel_attr_list);
1270 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1271 			continue;
1272 		if (ret < 0)
1273 			return ret;
1274 		attrcount++;
1275 	}
1276 
1277 	return attrcount;
1278 }
1279 
iio_device_add_info_mask_type_avail(struct iio_dev * indio_dev,struct iio_chan_spec const * chan,enum iio_shared_by shared_by,const unsigned long * infomask)1280 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev,
1281 					       struct iio_chan_spec const *chan,
1282 					       enum iio_shared_by shared_by,
1283 					       const unsigned long *infomask)
1284 {
1285 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1286 	int i, ret, attrcount = 0;
1287 	char *avail_postfix;
1288 
1289 	for_each_set_bit(i, infomask, sizeof(*infomask) * 8) {
1290 		if (i >= ARRAY_SIZE(iio_chan_info_postfix))
1291 			return -EINVAL;
1292 		avail_postfix = kasprintf(GFP_KERNEL,
1293 					  "%s_available",
1294 					  iio_chan_info_postfix[i]);
1295 		if (!avail_postfix)
1296 			return -ENOMEM;
1297 
1298 		ret = __iio_add_chan_devattr(avail_postfix,
1299 					     chan,
1300 					     &iio_read_channel_info_avail,
1301 					     NULL,
1302 					     i,
1303 					     shared_by,
1304 					     &indio_dev->dev,
1305 					     NULL,
1306 					     &iio_dev_opaque->channel_attr_list);
1307 		kfree(avail_postfix);
1308 		if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
1309 			continue;
1310 		if (ret < 0)
1311 			return ret;
1312 		attrcount++;
1313 	}
1314 
1315 	return attrcount;
1316 }
1317 
iio_device_add_channel_sysfs(struct iio_dev * indio_dev,struct iio_chan_spec const * chan)1318 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev,
1319 					struct iio_chan_spec const *chan)
1320 {
1321 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1322 	int ret, attrcount = 0;
1323 	const struct iio_chan_spec_ext_info *ext_info;
1324 
1325 	if (chan->channel < 0)
1326 		return 0;
1327 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1328 					    IIO_SEPARATE,
1329 					    &chan->info_mask_separate);
1330 	if (ret < 0)
1331 		return ret;
1332 	attrcount += ret;
1333 
1334 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1335 						  IIO_SEPARATE,
1336 						  &chan->info_mask_separate_available);
1337 	if (ret < 0)
1338 		return ret;
1339 	attrcount += ret;
1340 
1341 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1342 					    IIO_SHARED_BY_TYPE,
1343 					    &chan->info_mask_shared_by_type);
1344 	if (ret < 0)
1345 		return ret;
1346 	attrcount += ret;
1347 
1348 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1349 						  IIO_SHARED_BY_TYPE,
1350 						  &chan->info_mask_shared_by_type_available);
1351 	if (ret < 0)
1352 		return ret;
1353 	attrcount += ret;
1354 
1355 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1356 					    IIO_SHARED_BY_DIR,
1357 					    &chan->info_mask_shared_by_dir);
1358 	if (ret < 0)
1359 		return ret;
1360 	attrcount += ret;
1361 
1362 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1363 						  IIO_SHARED_BY_DIR,
1364 						  &chan->info_mask_shared_by_dir_available);
1365 	if (ret < 0)
1366 		return ret;
1367 	attrcount += ret;
1368 
1369 	ret = iio_device_add_info_mask_type(indio_dev, chan,
1370 					    IIO_SHARED_BY_ALL,
1371 					    &chan->info_mask_shared_by_all);
1372 	if (ret < 0)
1373 		return ret;
1374 	attrcount += ret;
1375 
1376 	ret = iio_device_add_info_mask_type_avail(indio_dev, chan,
1377 						  IIO_SHARED_BY_ALL,
1378 						  &chan->info_mask_shared_by_all_available);
1379 	if (ret < 0)
1380 		return ret;
1381 	attrcount += ret;
1382 
1383 	ret = iio_device_add_channel_label(indio_dev, chan);
1384 	if (ret < 0)
1385 		return ret;
1386 	attrcount += ret;
1387 
1388 	if (chan->ext_info) {
1389 		unsigned int i = 0;
1390 
1391 		for (ext_info = chan->ext_info; ext_info->name; ext_info++) {
1392 			ret = __iio_add_chan_devattr(ext_info->name,
1393 					chan,
1394 					ext_info->read ?
1395 					    &iio_read_channel_ext_info : NULL,
1396 					ext_info->write ?
1397 					    &iio_write_channel_ext_info : NULL,
1398 					i,
1399 					ext_info->shared,
1400 					&indio_dev->dev,
1401 					NULL,
1402 					&iio_dev_opaque->channel_attr_list);
1403 			i++;
1404 			if (ret == -EBUSY && ext_info->shared)
1405 				continue;
1406 
1407 			if (ret)
1408 				return ret;
1409 
1410 			attrcount++;
1411 		}
1412 	}
1413 
1414 	return attrcount;
1415 }
1416 
1417 /**
1418  * iio_free_chan_devattr_list() - Free a list of IIO device attributes
1419  * @attr_list: List of IIO device attributes
1420  *
1421  * This function frees the memory allocated for each of the IIO device
1422  * attributes in the list.
1423  */
iio_free_chan_devattr_list(struct list_head * attr_list)1424 void iio_free_chan_devattr_list(struct list_head *attr_list)
1425 {
1426 	struct iio_dev_attr *p, *n;
1427 
1428 	list_for_each_entry_safe(p, n, attr_list, l) {
1429 		kfree_const(p->dev_attr.attr.name);
1430 		list_del(&p->l);
1431 		kfree(p);
1432 	}
1433 }
1434 
name_show(struct device * dev,struct device_attribute * attr,char * buf)1435 static ssize_t name_show(struct device *dev, struct device_attribute *attr,
1436 			 char *buf)
1437 {
1438 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1439 
1440 	return sysfs_emit(buf, "%s\n", indio_dev->name);
1441 }
1442 
1443 static DEVICE_ATTR_RO(name);
1444 
label_show(struct device * dev,struct device_attribute * attr,char * buf)1445 static ssize_t label_show(struct device *dev, struct device_attribute *attr,
1446 			  char *buf)
1447 {
1448 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1449 
1450 	return sysfs_emit(buf, "%s\n", indio_dev->label);
1451 }
1452 
1453 static DEVICE_ATTR_RO(label);
1454 
1455 static const char * const clock_names[] = {
1456 	[CLOCK_REALTIME]	 	= "realtime",
1457 	[CLOCK_MONOTONIC]	 	= "monotonic",
1458 	[CLOCK_PROCESS_CPUTIME_ID]	= "process_cputime_id",
1459 	[CLOCK_THREAD_CPUTIME_ID]	= "thread_cputime_id",
1460 	[CLOCK_MONOTONIC_RAW]	 	= "monotonic_raw",
1461 	[CLOCK_REALTIME_COARSE]	 	= "realtime_coarse",
1462 	[CLOCK_MONOTONIC_COARSE] 	= "monotonic_coarse",
1463 	[CLOCK_BOOTTIME]	 	= "boottime",
1464 	[CLOCK_REALTIME_ALARM]		= "realtime_alarm",
1465 	[CLOCK_BOOTTIME_ALARM]		= "boottime_alarm",
1466 	[CLOCK_SGI_CYCLE]		= "sgi_cycle",
1467 	[CLOCK_TAI]		 	= "tai",
1468 };
1469 
current_timestamp_clock_show(struct device * dev,struct device_attribute * attr,char * buf)1470 static ssize_t current_timestamp_clock_show(struct device *dev,
1471 					    struct device_attribute *attr,
1472 					    char *buf)
1473 {
1474 	const struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1475 	const clockid_t clk = iio_device_get_clock(indio_dev);
1476 
1477 	switch (clk) {
1478 	case CLOCK_REALTIME:
1479 	case CLOCK_MONOTONIC:
1480 	case CLOCK_MONOTONIC_RAW:
1481 	case CLOCK_REALTIME_COARSE:
1482 	case CLOCK_MONOTONIC_COARSE:
1483 	case CLOCK_BOOTTIME:
1484 	case CLOCK_TAI:
1485 		break;
1486 	default:
1487 		BUG();
1488 	}
1489 
1490 	return sysfs_emit(buf, "%s\n", clock_names[clk]);
1491 }
1492 
current_timestamp_clock_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1493 static ssize_t current_timestamp_clock_store(struct device *dev,
1494 					     struct device_attribute *attr,
1495 					     const char *buf, size_t len)
1496 {
1497 	clockid_t clk;
1498 	int ret;
1499 
1500 	ret = sysfs_match_string(clock_names, buf);
1501 	if (ret < 0)
1502 		return ret;
1503 	clk = ret;
1504 
1505 	switch (clk) {
1506 	case CLOCK_REALTIME:
1507 	case CLOCK_MONOTONIC:
1508 	case CLOCK_MONOTONIC_RAW:
1509 	case CLOCK_REALTIME_COARSE:
1510 	case CLOCK_MONOTONIC_COARSE:
1511 	case CLOCK_BOOTTIME:
1512 	case CLOCK_TAI:
1513 		break;
1514 	default:
1515 		return -EINVAL;
1516 	}
1517 
1518 	ret = iio_device_set_clock(dev_to_iio_dev(dev), clk);
1519 	if (ret)
1520 		return ret;
1521 
1522 	return len;
1523 }
1524 
iio_device_register_sysfs_group(struct iio_dev * indio_dev,const struct attribute_group * group)1525 int iio_device_register_sysfs_group(struct iio_dev *indio_dev,
1526 				    const struct attribute_group *group)
1527 {
1528 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1529 	const struct attribute_group **new, **old = iio_dev_opaque->groups;
1530 	unsigned int cnt = iio_dev_opaque->groupcounter;
1531 
1532 	new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL);
1533 	if (!new)
1534 		return -ENOMEM;
1535 
1536 	new[iio_dev_opaque->groupcounter++] = group;
1537 	new[iio_dev_opaque->groupcounter] = NULL;
1538 
1539 	iio_dev_opaque->groups = new;
1540 
1541 	return 0;
1542 }
1543 
1544 static DEVICE_ATTR_RW(current_timestamp_clock);
1545 
iio_device_register_sysfs(struct iio_dev * indio_dev)1546 static int iio_device_register_sysfs(struct iio_dev *indio_dev)
1547 {
1548 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1549 	int i, ret = 0, attrcount, attrn, attrcount_orig = 0;
1550 	struct iio_dev_attr *p;
1551 	struct attribute **attr, *clk = NULL;
1552 
1553 	/* First count elements in any existing group */
1554 	if (indio_dev->info->attrs) {
1555 		attr = indio_dev->info->attrs->attrs;
1556 		while (*attr++ != NULL)
1557 			attrcount_orig++;
1558 	}
1559 	attrcount = attrcount_orig;
1560 	/*
1561 	 * New channel registration method - relies on the fact a group does
1562 	 * not need to be initialized if its name is NULL.
1563 	 */
1564 	if (indio_dev->channels)
1565 		for (i = 0; i < indio_dev->num_channels; i++) {
1566 			const struct iio_chan_spec *chan =
1567 				&indio_dev->channels[i];
1568 
1569 			if (chan->type == IIO_TIMESTAMP)
1570 				clk = &dev_attr_current_timestamp_clock.attr;
1571 
1572 			ret = iio_device_add_channel_sysfs(indio_dev, chan);
1573 			if (ret < 0)
1574 				goto error_clear_attrs;
1575 			attrcount += ret;
1576 		}
1577 
1578 	if (iio_dev_opaque->event_interface)
1579 		clk = &dev_attr_current_timestamp_clock.attr;
1580 
1581 	if (indio_dev->name)
1582 		attrcount++;
1583 	if (indio_dev->label)
1584 		attrcount++;
1585 	if (clk)
1586 		attrcount++;
1587 
1588 	iio_dev_opaque->chan_attr_group.attrs =
1589 		kcalloc(attrcount + 1,
1590 			sizeof(iio_dev_opaque->chan_attr_group.attrs[0]),
1591 			GFP_KERNEL);
1592 	if (iio_dev_opaque->chan_attr_group.attrs == NULL) {
1593 		ret = -ENOMEM;
1594 		goto error_clear_attrs;
1595 	}
1596 	/* Copy across original attributes, and point to original binary attributes */
1597 	if (indio_dev->info->attrs) {
1598 		memcpy(iio_dev_opaque->chan_attr_group.attrs,
1599 		       indio_dev->info->attrs->attrs,
1600 		       sizeof(iio_dev_opaque->chan_attr_group.attrs[0])
1601 		       *attrcount_orig);
1602 		iio_dev_opaque->chan_attr_group.is_visible =
1603 			indio_dev->info->attrs->is_visible;
1604 		iio_dev_opaque->chan_attr_group.bin_attrs =
1605 			indio_dev->info->attrs->bin_attrs;
1606 	}
1607 	attrn = attrcount_orig;
1608 	/* Add all elements from the list. */
1609 	list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l)
1610 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr;
1611 	if (indio_dev->name)
1612 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr;
1613 	if (indio_dev->label)
1614 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr;
1615 	if (clk)
1616 		iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk;
1617 
1618 	ret = iio_device_register_sysfs_group(indio_dev,
1619 					      &iio_dev_opaque->chan_attr_group);
1620 	if (ret)
1621 		goto error_free_chan_attrs;
1622 
1623 	return 0;
1624 
1625 error_free_chan_attrs:
1626 	kfree(iio_dev_opaque->chan_attr_group.attrs);
1627 	iio_dev_opaque->chan_attr_group.attrs = NULL;
1628 error_clear_attrs:
1629 	iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1630 
1631 	return ret;
1632 }
1633 
iio_device_unregister_sysfs(struct iio_dev * indio_dev)1634 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
1635 {
1636 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1637 
1638 	iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list);
1639 	kfree(iio_dev_opaque->chan_attr_group.attrs);
1640 	iio_dev_opaque->chan_attr_group.attrs = NULL;
1641 	kfree(iio_dev_opaque->groups);
1642 	iio_dev_opaque->groups = NULL;
1643 }
1644 
iio_dev_release(struct device * device)1645 static void iio_dev_release(struct device *device)
1646 {
1647 	struct iio_dev *indio_dev = dev_to_iio_dev(device);
1648 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1649 
1650 	if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
1651 		iio_device_unregister_trigger_consumer(indio_dev);
1652 	iio_device_unregister_eventset(indio_dev);
1653 	iio_device_unregister_sysfs(indio_dev);
1654 
1655 	iio_device_detach_buffers(indio_dev);
1656 
1657 	mutex_destroy(&iio_dev_opaque->info_exist_lock);
1658 	mutex_destroy(&iio_dev_opaque->mlock);
1659 
1660 	lockdep_unregister_key(&iio_dev_opaque->mlock_key);
1661 
1662 	ida_free(&iio_ida, iio_dev_opaque->id);
1663 	kfree(iio_dev_opaque);
1664 }
1665 
1666 const struct device_type iio_device_type = {
1667 	.name = "iio_device",
1668 	.release = iio_dev_release,
1669 };
1670 
1671 /**
1672  * iio_device_alloc() - allocate an iio_dev from a driver
1673  * @parent:		Parent device.
1674  * @sizeof_priv:	Space to allocate for private structure.
1675  *
1676  * Returns:
1677  * Pointer to allocated iio_dev on success, NULL on failure.
1678  */
iio_device_alloc(struct device * parent,int sizeof_priv)1679 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv)
1680 {
1681 	struct iio_dev_opaque *iio_dev_opaque;
1682 	struct iio_dev *indio_dev;
1683 	size_t alloc_size;
1684 
1685 	if (sizeof_priv)
1686 		alloc_size = ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN) + sizeof_priv;
1687 	else
1688 		alloc_size = sizeof(*iio_dev_opaque);
1689 
1690 	iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL);
1691 	if (!iio_dev_opaque)
1692 		return NULL;
1693 
1694 	indio_dev = &iio_dev_opaque->indio_dev;
1695 
1696 	if (sizeof_priv)
1697 		ACCESS_PRIVATE(indio_dev, priv) = (char *)iio_dev_opaque +
1698 			ALIGN(sizeof(*iio_dev_opaque), IIO_DMA_MINALIGN);
1699 
1700 	INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list);
1701 
1702 	iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL);
1703 	if (iio_dev_opaque->id < 0) {
1704 		/* cannot use a dev_err as the name isn't available */
1705 		pr_err("failed to get device id\n");
1706 		kfree(iio_dev_opaque);
1707 		return NULL;
1708 	}
1709 
1710 	if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) {
1711 		ida_free(&iio_ida, iio_dev_opaque->id);
1712 		kfree(iio_dev_opaque);
1713 		return NULL;
1714 	}
1715 
1716 	INIT_LIST_HEAD(&iio_dev_opaque->buffer_list);
1717 	INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers);
1718 
1719 	lockdep_register_key(&iio_dev_opaque->mlock_key);
1720 
1721 	mutex_init_with_key(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key);
1722 	mutex_init(&iio_dev_opaque->info_exist_lock);
1723 
1724 	indio_dev->dev.parent = parent;
1725 	indio_dev->dev.type = &iio_device_type;
1726 	indio_dev->dev.bus = &iio_bus_type;
1727 	device_initialize(&indio_dev->dev);
1728 
1729 	return indio_dev;
1730 }
1731 EXPORT_SYMBOL(iio_device_alloc);
1732 
1733 /**
1734  * iio_device_free() - free an iio_dev from a driver
1735  * @dev:		the iio_dev associated with the device
1736  */
iio_device_free(struct iio_dev * dev)1737 void iio_device_free(struct iio_dev *dev)
1738 {
1739 	if (dev)
1740 		put_device(&dev->dev);
1741 }
1742 EXPORT_SYMBOL(iio_device_free);
1743 
devm_iio_device_release(void * iio_dev)1744 static void devm_iio_device_release(void *iio_dev)
1745 {
1746 	iio_device_free(iio_dev);
1747 }
1748 
1749 /**
1750  * devm_iio_device_alloc - Resource-managed iio_device_alloc()
1751  * @parent:		Device to allocate iio_dev for, and parent for this IIO device
1752  * @sizeof_priv:	Space to allocate for private structure.
1753  *
1754  * Managed iio_device_alloc. iio_dev allocated with this function is
1755  * automatically freed on driver detach.
1756  *
1757  * Returns:
1758  * Pointer to allocated iio_dev on success, NULL on failure.
1759  */
devm_iio_device_alloc(struct device * parent,int sizeof_priv)1760 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv)
1761 {
1762 	struct iio_dev *iio_dev;
1763 	int ret;
1764 
1765 	iio_dev = iio_device_alloc(parent, sizeof_priv);
1766 	if (!iio_dev)
1767 		return NULL;
1768 
1769 	ret = devm_add_action_or_reset(parent, devm_iio_device_release,
1770 				       iio_dev);
1771 	if (ret)
1772 		return NULL;
1773 
1774 	return iio_dev;
1775 }
1776 EXPORT_SYMBOL_GPL(devm_iio_device_alloc);
1777 
1778 /**
1779  * iio_chrdev_open() - chrdev file open for buffer access and ioctls
1780  * @inode:	Inode structure for identifying the device in the file system
1781  * @filp:	File structure for iio device used to keep and later access
1782  *		private data
1783  *
1784  * Returns: 0 on success or -EBUSY if the device is already opened
1785  */
iio_chrdev_open(struct inode * inode,struct file * filp)1786 static int iio_chrdev_open(struct inode *inode, struct file *filp)
1787 {
1788 	struct iio_dev_opaque *iio_dev_opaque =
1789 		container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1790 	struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1791 	struct iio_dev_buffer_pair *ib;
1792 
1793 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags))
1794 		return -EBUSY;
1795 
1796 	iio_device_get(indio_dev);
1797 
1798 	ib = kmalloc(sizeof(*ib), GFP_KERNEL);
1799 	if (!ib) {
1800 		iio_device_put(indio_dev);
1801 		clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1802 		return -ENOMEM;
1803 	}
1804 
1805 	ib->indio_dev = indio_dev;
1806 	ib->buffer = indio_dev->buffer;
1807 
1808 	filp->private_data = ib;
1809 
1810 	return 0;
1811 }
1812 
1813 /**
1814  * iio_chrdev_release() - chrdev file close buffer access and ioctls
1815  * @inode:	Inode structure pointer for the char device
1816  * @filp:	File structure pointer for the char device
1817  *
1818  * Returns: 0 for successful release.
1819  */
iio_chrdev_release(struct inode * inode,struct file * filp)1820 static int iio_chrdev_release(struct inode *inode, struct file *filp)
1821 {
1822 	struct iio_dev_buffer_pair *ib = filp->private_data;
1823 	struct iio_dev_opaque *iio_dev_opaque =
1824 		container_of(inode->i_cdev, struct iio_dev_opaque, chrdev);
1825 	struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev;
1826 
1827 	kfree(ib);
1828 	clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags);
1829 	iio_device_put(indio_dev);
1830 
1831 	return 0;
1832 }
1833 
iio_device_ioctl_handler_register(struct iio_dev * indio_dev,struct iio_ioctl_handler * h)1834 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev,
1835 				       struct iio_ioctl_handler *h)
1836 {
1837 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1838 
1839 	list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers);
1840 }
1841 
iio_device_ioctl_handler_unregister(struct iio_ioctl_handler * h)1842 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h)
1843 {
1844 	list_del(&h->entry);
1845 }
1846 
iio_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)1847 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
1848 {
1849 	struct iio_dev_buffer_pair *ib = filp->private_data;
1850 	struct iio_dev *indio_dev = ib->indio_dev;
1851 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1852 	struct iio_ioctl_handler *h;
1853 	int ret;
1854 
1855 	guard(mutex)(&iio_dev_opaque->info_exist_lock);
1856 	/*
1857 	 * The NULL check here is required to prevent crashing when a device
1858 	 * is being removed while userspace would still have open file handles
1859 	 * to try to access this device.
1860 	 */
1861 	if (!indio_dev->info)
1862 		return -ENODEV;
1863 
1864 	list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) {
1865 		ret = h->ioctl(indio_dev, filp, cmd, arg);
1866 		if (ret != IIO_IOCTL_UNHANDLED)
1867 			return ret;
1868 	}
1869 
1870 	return -ENODEV;
1871 }
1872 
1873 static const struct file_operations iio_buffer_fileops = {
1874 	.owner = THIS_MODULE,
1875 	.llseek = noop_llseek,
1876 	.read = iio_buffer_read_outer_addr,
1877 	.write = iio_buffer_write_outer_addr,
1878 	.poll = iio_buffer_poll_addr,
1879 	.unlocked_ioctl = iio_ioctl,
1880 	.compat_ioctl = compat_ptr_ioctl,
1881 	.open = iio_chrdev_open,
1882 	.release = iio_chrdev_release,
1883 };
1884 
1885 static const struct file_operations iio_event_fileops = {
1886 	.owner = THIS_MODULE,
1887 	.llseek = noop_llseek,
1888 	.unlocked_ioctl = iio_ioctl,
1889 	.compat_ioctl = compat_ptr_ioctl,
1890 	.open = iio_chrdev_open,
1891 	.release = iio_chrdev_release,
1892 };
1893 
iio_check_unique_scan_index(struct iio_dev * indio_dev)1894 static int iio_check_unique_scan_index(struct iio_dev *indio_dev)
1895 {
1896 	int i, j;
1897 	const struct iio_chan_spec *channels = indio_dev->channels;
1898 
1899 	if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES))
1900 		return 0;
1901 
1902 	for (i = 0; i < indio_dev->num_channels - 1; i++) {
1903 		if (channels[i].scan_index < 0)
1904 			continue;
1905 		for (j = i + 1; j < indio_dev->num_channels; j++)
1906 			if (channels[i].scan_index == channels[j].scan_index) {
1907 				dev_err(&indio_dev->dev,
1908 					"Duplicate scan index %d\n",
1909 					channels[i].scan_index);
1910 				return -EINVAL;
1911 			}
1912 	}
1913 
1914 	return 0;
1915 }
1916 
iio_check_extended_name(const struct iio_dev * indio_dev)1917 static int iio_check_extended_name(const struct iio_dev *indio_dev)
1918 {
1919 	unsigned int i;
1920 
1921 	if (!indio_dev->info->read_label)
1922 		return 0;
1923 
1924 	for (i = 0; i < indio_dev->num_channels; i++) {
1925 		if (indio_dev->channels[i].extend_name) {
1926 			dev_err(&indio_dev->dev,
1927 				"Cannot use labels and extend_name at the same time\n");
1928 			return -EINVAL;
1929 		}
1930 	}
1931 
1932 	return 0;
1933 }
1934 
1935 static const struct iio_buffer_setup_ops noop_ring_setup_ops;
1936 
iio_sanity_check_avail_scan_masks(struct iio_dev * indio_dev)1937 static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev)
1938 {
1939 	unsigned int num_masks, masklength, longs_per_mask;
1940 	const unsigned long *av_masks;
1941 	int i;
1942 
1943 	av_masks = indio_dev->available_scan_masks;
1944 	masklength = iio_get_masklength(indio_dev);
1945 	longs_per_mask = BITS_TO_LONGS(masklength);
1946 
1947 	/*
1948 	 * The code determining how many available_scan_masks is in the array
1949 	 * will be assuming the end of masks when first long with all bits
1950 	 * zeroed is encountered. This is incorrect for masks where mask
1951 	 * consists of more than one long, and where some of the available masks
1952 	 * has long worth of bits zeroed (but has subsequent bit(s) set). This
1953 	 * is a safety measure against bug where array of masks is terminated by
1954 	 * a single zero while mask width is greater than width of a long.
1955 	 */
1956 	if (longs_per_mask > 1)
1957 		dev_warn(indio_dev->dev.parent,
1958 			 "multi long available scan masks not fully supported\n");
1959 
1960 	if (bitmap_empty(av_masks, masklength))
1961 		dev_warn(indio_dev->dev.parent, "empty scan mask\n");
1962 
1963 	for (num_masks = 0; *av_masks; num_masks++)
1964 		av_masks += longs_per_mask;
1965 
1966 	if (num_masks < 2)
1967 		return;
1968 
1969 	av_masks = indio_dev->available_scan_masks;
1970 
1971 	/*
1972 	 * Go through all the masks from first to one before the last, and see
1973 	 * that no mask found later from the available_scan_masks array is a
1974 	 * subset of mask found earlier. If this happens, then the mask found
1975 	 * later will never get used because scanning the array is stopped when
1976 	 * the first suitable mask is found. Drivers should order the array of
1977 	 * available masks in the order of preference (presumably the least
1978 	 * costy to access masks first).
1979 	 */
1980 	for (i = 0; i < num_masks - 1; i++) {
1981 		const unsigned long *mask1;
1982 		int j;
1983 
1984 		mask1 = av_masks + i * longs_per_mask;
1985 		for (j = i + 1; j < num_masks; j++) {
1986 			const unsigned long *mask2;
1987 
1988 			mask2 = av_masks + j * longs_per_mask;
1989 			if (bitmap_subset(mask2, mask1, masklength))
1990 				dev_warn(indio_dev->dev.parent,
1991 					 "available_scan_mask %d subset of %d. Never used\n",
1992 					 j, i);
1993 		}
1994 	}
1995 }
1996 
1997 /**
1998  * iio_active_scan_mask_index - Get index of the active scan mask inside the
1999  * available scan masks array
2000  * @indio_dev: the IIO device containing the active and available scan masks
2001  *
2002  * Returns: the index or -EINVAL if  active_scan_mask is not set
2003  */
iio_active_scan_mask_index(struct iio_dev * indio_dev)2004 int iio_active_scan_mask_index(struct iio_dev *indio_dev)
2005 
2006 {
2007 	const unsigned long *av_masks;
2008 	unsigned int masklength = iio_get_masklength(indio_dev);
2009 	int i = 0;
2010 
2011 	if (!indio_dev->active_scan_mask)
2012 		return -EINVAL;
2013 
2014 	/*
2015 	 * As in iio_scan_mask_match and iio_sanity_check_avail_scan_masks,
2016 	 * the condition here do not handle multi-long masks correctly.
2017 	 * It only checks the first long to be zero, and will use such mask
2018 	 * as a terminator even if there was bits set after the first long.
2019 	 *
2020 	 * This should be fine since the available_scan_mask has already been
2021 	 * sanity tested using iio_sanity_check_avail_scan_masks.
2022 	 *
2023 	 * See iio_scan_mask_match and iio_sanity_check_avail_scan_masks for
2024 	 * more details
2025 	 */
2026 	av_masks = indio_dev->available_scan_masks;
2027 	while (*av_masks) {
2028 		if (indio_dev->active_scan_mask == av_masks)
2029 			return i;
2030 		av_masks += BITS_TO_LONGS(masklength);
2031 		i++;
2032 	}
2033 
2034 	dev_warn(indio_dev->dev.parent,
2035 		 "active scan mask is not part of the available scan masks\n");
2036 	return -EINVAL;
2037 }
2038 EXPORT_SYMBOL_GPL(iio_active_scan_mask_index);
2039 
__iio_device_register(struct iio_dev * indio_dev,struct module * this_mod)2040 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod)
2041 {
2042 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2043 	struct fwnode_handle *fwnode = NULL;
2044 	int ret;
2045 
2046 	if (!indio_dev->info)
2047 		return -EINVAL;
2048 
2049 	iio_dev_opaque->driver_module = this_mod;
2050 
2051 	/* If the calling driver did not initialize firmware node, do it here */
2052 	if (dev_fwnode(&indio_dev->dev))
2053 		fwnode = dev_fwnode(&indio_dev->dev);
2054 	/* The default dummy IIO device has no parent */
2055 	else if (indio_dev->dev.parent)
2056 		fwnode = dev_fwnode(indio_dev->dev.parent);
2057 	device_set_node(&indio_dev->dev, fwnode);
2058 
2059 	fwnode_property_read_string(fwnode, "label", &indio_dev->label);
2060 
2061 	ret = iio_check_unique_scan_index(indio_dev);
2062 	if (ret < 0)
2063 		return ret;
2064 
2065 	ret = iio_check_extended_name(indio_dev);
2066 	if (ret < 0)
2067 		return ret;
2068 
2069 	iio_device_register_debugfs(indio_dev);
2070 
2071 	ret = iio_buffers_alloc_sysfs_and_mask(indio_dev);
2072 	if (ret) {
2073 		dev_err(indio_dev->dev.parent,
2074 			"Failed to create buffer sysfs interfaces\n");
2075 		goto error_unreg_debugfs;
2076 	}
2077 
2078 	if (indio_dev->available_scan_masks)
2079 		iio_sanity_check_avail_scan_masks(indio_dev);
2080 
2081 	ret = iio_device_register_sysfs(indio_dev);
2082 	if (ret) {
2083 		dev_err(indio_dev->dev.parent,
2084 			"Failed to register sysfs interfaces\n");
2085 		goto error_buffer_free_sysfs;
2086 	}
2087 	ret = iio_device_register_eventset(indio_dev);
2088 	if (ret) {
2089 		dev_err(indio_dev->dev.parent,
2090 			"Failed to register event set\n");
2091 		goto error_free_sysfs;
2092 	}
2093 	if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES)
2094 		iio_device_register_trigger_consumer(indio_dev);
2095 
2096 	if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) &&
2097 		indio_dev->setup_ops == NULL)
2098 		indio_dev->setup_ops = &noop_ring_setup_ops;
2099 
2100 	if (iio_dev_opaque->attached_buffers_cnt)
2101 		cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops);
2102 	else if (iio_dev_opaque->event_interface)
2103 		cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops);
2104 
2105 	if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) {
2106 		indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id);
2107 		iio_dev_opaque->chrdev.owner = this_mod;
2108 	}
2109 
2110 	/* assign device groups now; they should be all registered now */
2111 	indio_dev->dev.groups = iio_dev_opaque->groups;
2112 
2113 	ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev);
2114 	if (ret < 0)
2115 		goto error_unreg_eventset;
2116 
2117 	return 0;
2118 
2119 error_unreg_eventset:
2120 	iio_device_unregister_eventset(indio_dev);
2121 error_free_sysfs:
2122 	iio_device_unregister_sysfs(indio_dev);
2123 error_buffer_free_sysfs:
2124 	iio_buffers_free_sysfs_and_mask(indio_dev);
2125 error_unreg_debugfs:
2126 	iio_device_unregister_debugfs(indio_dev);
2127 	return ret;
2128 }
2129 EXPORT_SYMBOL(__iio_device_register);
2130 
2131 /**
2132  * iio_device_unregister() - unregister a device from the IIO subsystem
2133  * @indio_dev:		Device structure representing the device.
2134  */
iio_device_unregister(struct iio_dev * indio_dev)2135 void iio_device_unregister(struct iio_dev *indio_dev)
2136 {
2137 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2138 
2139 	cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev);
2140 
2141 	scoped_guard(mutex, &iio_dev_opaque->info_exist_lock) {
2142 		iio_device_unregister_debugfs(indio_dev);
2143 
2144 		iio_disable_all_buffers(indio_dev);
2145 
2146 		indio_dev->info = NULL;
2147 
2148 		iio_device_wakeup_eventset(indio_dev);
2149 		iio_buffer_wakeup_poll(indio_dev);
2150 	}
2151 
2152 	iio_buffers_free_sysfs_and_mask(indio_dev);
2153 }
2154 EXPORT_SYMBOL(iio_device_unregister);
2155 
devm_iio_device_unreg(void * indio_dev)2156 static void devm_iio_device_unreg(void *indio_dev)
2157 {
2158 	iio_device_unregister(indio_dev);
2159 }
2160 
__devm_iio_device_register(struct device * dev,struct iio_dev * indio_dev,struct module * this_mod)2161 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev,
2162 			       struct module *this_mod)
2163 {
2164 	int ret;
2165 
2166 	ret = __iio_device_register(indio_dev, this_mod);
2167 	if (ret)
2168 		return ret;
2169 
2170 	return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev);
2171 }
2172 EXPORT_SYMBOL_GPL(__devm_iio_device_register);
2173 
2174 /**
2175  * __iio_device_claim_direct - Keep device in direct mode
2176  * @indio_dev:	the iio_dev associated with the device
2177  *
2178  * If the device is in direct mode it is guaranteed to stay
2179  * that way until __iio_device_release_direct() is called.
2180  *
2181  * Use with __iio_device_release_direct().
2182  *
2183  * Drivers should only call iio_device_claim_direct().
2184  *
2185  * Returns: true on success, false on failure.
2186  */
__iio_device_claim_direct(struct iio_dev * indio_dev)2187 bool __iio_device_claim_direct(struct iio_dev *indio_dev)
2188 {
2189 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2190 
2191 	mutex_lock(&iio_dev_opaque->mlock);
2192 
2193 	if (iio_buffer_enabled(indio_dev)) {
2194 		mutex_unlock(&iio_dev_opaque->mlock);
2195 		return false;
2196 	}
2197 	return true;
2198 }
2199 EXPORT_SYMBOL_GPL(__iio_device_claim_direct);
2200 
2201 /**
2202  * __iio_device_release_direct - releases claim on direct mode
2203  * @indio_dev:	the iio_dev associated with the device
2204  *
2205  * Release the claim. Device is no longer guaranteed to stay
2206  * in direct mode.
2207  *
2208  * Drivers should only call iio_device_release_direct().
2209  *
2210  * Use with __iio_device_claim_direct()
2211  */
__iio_device_release_direct(struct iio_dev * indio_dev)2212 void __iio_device_release_direct(struct iio_dev *indio_dev)
2213 {
2214 	mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
2215 }
2216 EXPORT_SYMBOL_GPL(__iio_device_release_direct);
2217 
2218 /**
2219  * iio_device_claim_buffer_mode - Keep device in buffer mode
2220  * @indio_dev:	the iio_dev associated with the device
2221  *
2222  * If the device is in buffer mode it is guaranteed to stay
2223  * that way until iio_device_release_buffer_mode() is called.
2224  *
2225  * Use with iio_device_release_buffer_mode().
2226  *
2227  * Returns: 0 on success, -EBUSY on failure.
2228  */
iio_device_claim_buffer_mode(struct iio_dev * indio_dev)2229 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev)
2230 {
2231 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2232 
2233 	mutex_lock(&iio_dev_opaque->mlock);
2234 
2235 	if (iio_buffer_enabled(indio_dev))
2236 		return 0;
2237 
2238 	mutex_unlock(&iio_dev_opaque->mlock);
2239 	return -EBUSY;
2240 }
2241 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode);
2242 
2243 /**
2244  * iio_device_release_buffer_mode - releases claim on buffer mode
2245  * @indio_dev:	the iio_dev associated with the device
2246  *
2247  * Release the claim. Device is no longer guaranteed to stay
2248  * in buffer mode.
2249  *
2250  * Use with iio_device_claim_buffer_mode().
2251  */
iio_device_release_buffer_mode(struct iio_dev * indio_dev)2252 void iio_device_release_buffer_mode(struct iio_dev *indio_dev)
2253 {
2254 	mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock);
2255 }
2256 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode);
2257 
2258 /**
2259  * iio_device_get_current_mode() - helper function providing read-only access to
2260  *				   the opaque @currentmode variable
2261  * @indio_dev:			   IIO device structure for device
2262  */
iio_device_get_current_mode(struct iio_dev * indio_dev)2263 int iio_device_get_current_mode(struct iio_dev *indio_dev)
2264 {
2265 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2266 
2267 	return iio_dev_opaque->currentmode;
2268 }
2269 EXPORT_SYMBOL_GPL(iio_device_get_current_mode);
2270 
2271 subsys_initcall(iio_init);
2272 module_exit(iio_exit);
2273 
2274 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>");
2275 MODULE_DESCRIPTION("Industrial I/O core");
2276 MODULE_LICENSE("GPL");
2277