1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Based on elements of hwmon and input subsystems. 7 */ 8 9 #define pr_fmt(fmt) "iio-core: " fmt 10 11 #include <linux/kernel.h> 12 #include <linux/module.h> 13 #include <linux/idr.h> 14 #include <linux/kdev_t.h> 15 #include <linux/err.h> 16 #include <linux/device.h> 17 #include <linux/fs.h> 18 #include <linux/poll.h> 19 #include <linux/property.h> 20 #include <linux/sched.h> 21 #include <linux/wait.h> 22 #include <linux/cdev.h> 23 #include <linux/slab.h> 24 #include <linux/anon_inodes.h> 25 #include <linux/debugfs.h> 26 #include <linux/mutex.h> 27 #include <linux/iio/iio.h> 28 #include <linux/iio/iio-opaque.h> 29 #include "iio_core.h" 30 #include "iio_core_trigger.h" 31 #include <linux/iio/sysfs.h> 32 #include <linux/iio/events.h> 33 #include <linux/iio/buffer.h> 34 #include <linux/iio/buffer_impl.h> 35 36 /* IDA to assign each registered device a unique id */ 37 static DEFINE_IDA(iio_ida); 38 39 static dev_t iio_devt; 40 41 #define IIO_DEV_MAX 256 42 struct bus_type iio_bus_type = { 43 .name = "iio", 44 }; 45 EXPORT_SYMBOL(iio_bus_type); 46 47 static struct dentry *iio_debugfs_dentry; 48 49 static const char * const iio_direction[] = { 50 [0] = "in", 51 [1] = "out", 52 }; 53 54 static const char * const iio_chan_type_name_spec[] = { 55 [IIO_VOLTAGE] = "voltage", 56 [IIO_CURRENT] = "current", 57 [IIO_POWER] = "power", 58 [IIO_ACCEL] = "accel", 59 [IIO_ANGL_VEL] = "anglvel", 60 [IIO_MAGN] = "magn", 61 [IIO_LIGHT] = "illuminance", 62 [IIO_INTENSITY] = "intensity", 63 [IIO_PROXIMITY] = "proximity", 64 [IIO_TEMP] = "temp", 65 [IIO_INCLI] = "incli", 66 [IIO_ROT] = "rot", 67 [IIO_ANGL] = "angl", 68 [IIO_TIMESTAMP] = "timestamp", 69 [IIO_CAPACITANCE] = "capacitance", 70 [IIO_ALTVOLTAGE] = "altvoltage", 71 [IIO_CCT] = "cct", 72 [IIO_PRESSURE] = "pressure", 73 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 74 [IIO_ACTIVITY] = "activity", 75 [IIO_STEPS] = "steps", 76 [IIO_ENERGY] = "energy", 77 [IIO_DISTANCE] = "distance", 78 [IIO_VELOCITY] = "velocity", 79 [IIO_CONCENTRATION] = "concentration", 80 [IIO_RESISTANCE] = "resistance", 81 [IIO_PH] = "ph", 82 [IIO_UVINDEX] = "uvindex", 83 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 84 [IIO_COUNT] = "count", 85 [IIO_INDEX] = "index", 86 [IIO_GRAVITY] = "gravity", 87 [IIO_POSITIONRELATIVE] = "positionrelative", 88 [IIO_PHASE] = "phase", 89 [IIO_MASSCONCENTRATION] = "massconcentration", 90 }; 91 92 static const char * const iio_modifier_names[] = { 93 [IIO_MOD_X] = "x", 94 [IIO_MOD_Y] = "y", 95 [IIO_MOD_Z] = "z", 96 [IIO_MOD_X_AND_Y] = "x&y", 97 [IIO_MOD_X_AND_Z] = "x&z", 98 [IIO_MOD_Y_AND_Z] = "y&z", 99 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 100 [IIO_MOD_X_OR_Y] = "x|y", 101 [IIO_MOD_X_OR_Z] = "x|z", 102 [IIO_MOD_Y_OR_Z] = "y|z", 103 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 104 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 105 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 106 [IIO_MOD_LIGHT_BOTH] = "both", 107 [IIO_MOD_LIGHT_IR] = "ir", 108 [IIO_MOD_LIGHT_CLEAR] = "clear", 109 [IIO_MOD_LIGHT_RED] = "red", 110 [IIO_MOD_LIGHT_GREEN] = "green", 111 [IIO_MOD_LIGHT_BLUE] = "blue", 112 [IIO_MOD_LIGHT_UV] = "uv", 113 [IIO_MOD_LIGHT_DUV] = "duv", 114 [IIO_MOD_QUATERNION] = "quaternion", 115 [IIO_MOD_TEMP_AMBIENT] = "ambient", 116 [IIO_MOD_TEMP_OBJECT] = "object", 117 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 118 [IIO_MOD_NORTH_TRUE] = "from_north_true", 119 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 120 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 121 [IIO_MOD_RUNNING] = "running", 122 [IIO_MOD_JOGGING] = "jogging", 123 [IIO_MOD_WALKING] = "walking", 124 [IIO_MOD_STILL] = "still", 125 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 126 [IIO_MOD_I] = "i", 127 [IIO_MOD_Q] = "q", 128 [IIO_MOD_CO2] = "co2", 129 [IIO_MOD_VOC] = "voc", 130 [IIO_MOD_PM1] = "pm1", 131 [IIO_MOD_PM2P5] = "pm2p5", 132 [IIO_MOD_PM4] = "pm4", 133 [IIO_MOD_PM10] = "pm10", 134 [IIO_MOD_ETHANOL] = "ethanol", 135 [IIO_MOD_H2] = "h2", 136 [IIO_MOD_O2] = "o2", 137 }; 138 139 /* relies on pairs of these shared then separate */ 140 static const char * const iio_chan_info_postfix[] = { 141 [IIO_CHAN_INFO_RAW] = "raw", 142 [IIO_CHAN_INFO_PROCESSED] = "input", 143 [IIO_CHAN_INFO_SCALE] = "scale", 144 [IIO_CHAN_INFO_OFFSET] = "offset", 145 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 146 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 147 [IIO_CHAN_INFO_PEAK] = "peak_raw", 148 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 149 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 150 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 151 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 152 = "filter_low_pass_3db_frequency", 153 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 154 = "filter_high_pass_3db_frequency", 155 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 156 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 157 [IIO_CHAN_INFO_PHASE] = "phase", 158 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 159 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 160 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 161 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 162 [IIO_CHAN_INFO_ENABLE] = "en", 163 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 164 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 165 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 166 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 167 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 168 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 169 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 170 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 171 [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint", 172 }; 173 /** 174 * iio_device_id() - query the unique ID for the device 175 * @indio_dev: Device structure whose ID is being queried 176 * 177 * The IIO device ID is a unique index used for example for the naming 178 * of the character device /dev/iio\:device[ID] 179 */ 180 int iio_device_id(struct iio_dev *indio_dev) 181 { 182 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 183 184 return iio_dev_opaque->id; 185 } 186 EXPORT_SYMBOL_GPL(iio_device_id); 187 188 /** 189 * iio_buffer_enabled() - helper function to test if the buffer is enabled 190 * @indio_dev: IIO device structure for device 191 */ 192 bool iio_buffer_enabled(struct iio_dev *indio_dev) 193 { 194 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 195 196 return iio_dev_opaque->currentmode 197 & (INDIO_BUFFER_TRIGGERED | INDIO_BUFFER_HARDWARE | 198 INDIO_BUFFER_SOFTWARE); 199 } 200 EXPORT_SYMBOL_GPL(iio_buffer_enabled); 201 202 /** 203 * iio_sysfs_match_string_with_gaps - matches given string in an array with gaps 204 * @array: array of strings 205 * @n: number of strings in the array 206 * @str: string to match with 207 * 208 * Returns index of @str in the @array or -EINVAL, similar to match_string(). 209 * Uses sysfs_streq instead of strcmp for matching. 210 * 211 * This routine will look for a string in an array of strings. 212 * The search will continue until the element is found or the n-th element 213 * is reached, regardless of any NULL elements in the array. 214 */ 215 static int iio_sysfs_match_string_with_gaps(const char * const *array, size_t n, 216 const char *str) 217 { 218 const char *item; 219 int index; 220 221 for (index = 0; index < n; index++) { 222 item = array[index]; 223 if (!item) 224 continue; 225 if (sysfs_streq(item, str)) 226 return index; 227 } 228 229 return -EINVAL; 230 } 231 232 #if defined(CONFIG_DEBUG_FS) 233 /* 234 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 235 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 236 */ 237 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 238 { 239 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 240 241 return iio_dev_opaque->debugfs_dentry; 242 } 243 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 244 #endif 245 246 /** 247 * iio_find_channel_from_si() - get channel from its scan index 248 * @indio_dev: device 249 * @si: scan index to match 250 */ 251 const struct iio_chan_spec 252 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 253 { 254 int i; 255 256 for (i = 0; i < indio_dev->num_channels; i++) 257 if (indio_dev->channels[i].scan_index == si) 258 return &indio_dev->channels[i]; 259 return NULL; 260 } 261 262 /* This turns up an awful lot */ 263 ssize_t iio_read_const_attr(struct device *dev, 264 struct device_attribute *attr, 265 char *buf) 266 { 267 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 268 } 269 EXPORT_SYMBOL(iio_read_const_attr); 270 271 /** 272 * iio_device_set_clock() - Set current timestamping clock for the device 273 * @indio_dev: IIO device structure containing the device 274 * @clock_id: timestamping clock posix identifier to set. 275 */ 276 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 277 { 278 int ret; 279 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 280 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 281 282 ret = mutex_lock_interruptible(&indio_dev->mlock); 283 if (ret) 284 return ret; 285 if ((ev_int && iio_event_enabled(ev_int)) || 286 iio_buffer_enabled(indio_dev)) { 287 mutex_unlock(&indio_dev->mlock); 288 return -EBUSY; 289 } 290 iio_dev_opaque->clock_id = clock_id; 291 mutex_unlock(&indio_dev->mlock); 292 293 return 0; 294 } 295 EXPORT_SYMBOL(iio_device_set_clock); 296 297 /** 298 * iio_device_get_clock() - Retrieve current timestamping clock for the device 299 * @indio_dev: IIO device structure containing the device 300 */ 301 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 302 { 303 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 304 305 return iio_dev_opaque->clock_id; 306 } 307 EXPORT_SYMBOL(iio_device_get_clock); 308 309 /** 310 * iio_get_time_ns() - utility function to get a time stamp for events etc 311 * @indio_dev: device 312 */ 313 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 314 { 315 struct timespec64 tp; 316 317 switch (iio_device_get_clock(indio_dev)) { 318 case CLOCK_REALTIME: 319 return ktime_get_real_ns(); 320 case CLOCK_MONOTONIC: 321 return ktime_get_ns(); 322 case CLOCK_MONOTONIC_RAW: 323 return ktime_get_raw_ns(); 324 case CLOCK_REALTIME_COARSE: 325 return ktime_to_ns(ktime_get_coarse_real()); 326 case CLOCK_MONOTONIC_COARSE: 327 ktime_get_coarse_ts64(&tp); 328 return timespec64_to_ns(&tp); 329 case CLOCK_BOOTTIME: 330 return ktime_get_boottime_ns(); 331 case CLOCK_TAI: 332 return ktime_get_clocktai_ns(); 333 default: 334 BUG(); 335 } 336 } 337 EXPORT_SYMBOL(iio_get_time_ns); 338 339 static int __init iio_init(void) 340 { 341 int ret; 342 343 /* Register sysfs bus */ 344 ret = bus_register(&iio_bus_type); 345 if (ret < 0) { 346 pr_err("could not register bus type\n"); 347 goto error_nothing; 348 } 349 350 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 351 if (ret < 0) { 352 pr_err("failed to allocate char dev region\n"); 353 goto error_unregister_bus_type; 354 } 355 356 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 357 358 return 0; 359 360 error_unregister_bus_type: 361 bus_unregister(&iio_bus_type); 362 error_nothing: 363 return ret; 364 } 365 366 static void __exit iio_exit(void) 367 { 368 if (iio_devt) 369 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 370 bus_unregister(&iio_bus_type); 371 debugfs_remove(iio_debugfs_dentry); 372 } 373 374 #if defined(CONFIG_DEBUG_FS) 375 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 376 size_t count, loff_t *ppos) 377 { 378 struct iio_dev *indio_dev = file->private_data; 379 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 380 unsigned int val = 0; 381 int ret; 382 383 if (*ppos > 0) 384 return simple_read_from_buffer(userbuf, count, ppos, 385 iio_dev_opaque->read_buf, 386 iio_dev_opaque->read_buf_len); 387 388 ret = indio_dev->info->debugfs_reg_access(indio_dev, 389 iio_dev_opaque->cached_reg_addr, 390 0, &val); 391 if (ret) { 392 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 393 return ret; 394 } 395 396 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 397 sizeof(iio_dev_opaque->read_buf), 398 "0x%X\n", val); 399 400 return simple_read_from_buffer(userbuf, count, ppos, 401 iio_dev_opaque->read_buf, 402 iio_dev_opaque->read_buf_len); 403 } 404 405 static ssize_t iio_debugfs_write_reg(struct file *file, 406 const char __user *userbuf, size_t count, loff_t *ppos) 407 { 408 struct iio_dev *indio_dev = file->private_data; 409 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 410 unsigned int reg, val; 411 char buf[80]; 412 int ret; 413 414 count = min_t(size_t, count, (sizeof(buf)-1)); 415 if (copy_from_user(buf, userbuf, count)) 416 return -EFAULT; 417 418 buf[count] = 0; 419 420 ret = sscanf(buf, "%i %i", ®, &val); 421 422 switch (ret) { 423 case 1: 424 iio_dev_opaque->cached_reg_addr = reg; 425 break; 426 case 2: 427 iio_dev_opaque->cached_reg_addr = reg; 428 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 429 val, NULL); 430 if (ret) { 431 dev_err(indio_dev->dev.parent, "%s: write failed\n", 432 __func__); 433 return ret; 434 } 435 break; 436 default: 437 return -EINVAL; 438 } 439 440 return count; 441 } 442 443 static const struct file_operations iio_debugfs_reg_fops = { 444 .open = simple_open, 445 .read = iio_debugfs_read_reg, 446 .write = iio_debugfs_write_reg, 447 }; 448 449 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 450 { 451 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 452 453 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 454 } 455 456 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 457 { 458 struct iio_dev_opaque *iio_dev_opaque; 459 460 if (indio_dev->info->debugfs_reg_access == NULL) 461 return; 462 463 if (!iio_debugfs_dentry) 464 return; 465 466 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 467 468 iio_dev_opaque->debugfs_dentry = 469 debugfs_create_dir(dev_name(&indio_dev->dev), 470 iio_debugfs_dentry); 471 472 debugfs_create_file("direct_reg_access", 0644, 473 iio_dev_opaque->debugfs_dentry, indio_dev, 474 &iio_debugfs_reg_fops); 475 } 476 #else 477 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 478 { 479 } 480 481 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 482 { 483 } 484 #endif /* CONFIG_DEBUG_FS */ 485 486 static ssize_t iio_read_channel_ext_info(struct device *dev, 487 struct device_attribute *attr, 488 char *buf) 489 { 490 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 491 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 492 const struct iio_chan_spec_ext_info *ext_info; 493 494 ext_info = &this_attr->c->ext_info[this_attr->address]; 495 496 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 497 } 498 499 static ssize_t iio_write_channel_ext_info(struct device *dev, 500 struct device_attribute *attr, 501 const char *buf, 502 size_t len) 503 { 504 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 505 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 506 const struct iio_chan_spec_ext_info *ext_info; 507 508 ext_info = &this_attr->c->ext_info[this_attr->address]; 509 510 return ext_info->write(indio_dev, ext_info->private, 511 this_attr->c, buf, len); 512 } 513 514 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 515 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 516 { 517 const struct iio_enum *e = (const struct iio_enum *)priv; 518 unsigned int i; 519 size_t len = 0; 520 521 if (!e->num_items) 522 return 0; 523 524 for (i = 0; i < e->num_items; ++i) { 525 if (!e->items[i]) 526 continue; 527 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 528 } 529 530 /* replace last space with a newline */ 531 buf[len - 1] = '\n'; 532 533 return len; 534 } 535 EXPORT_SYMBOL_GPL(iio_enum_available_read); 536 537 ssize_t iio_enum_read(struct iio_dev *indio_dev, 538 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 539 { 540 const struct iio_enum *e = (const struct iio_enum *)priv; 541 int i; 542 543 if (!e->get) 544 return -EINVAL; 545 546 i = e->get(indio_dev, chan); 547 if (i < 0) 548 return i; 549 else if (i >= e->num_items || !e->items[i]) 550 return -EINVAL; 551 552 return sysfs_emit(buf, "%s\n", e->items[i]); 553 } 554 EXPORT_SYMBOL_GPL(iio_enum_read); 555 556 ssize_t iio_enum_write(struct iio_dev *indio_dev, 557 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 558 size_t len) 559 { 560 const struct iio_enum *e = (const struct iio_enum *)priv; 561 int ret; 562 563 if (!e->set) 564 return -EINVAL; 565 566 ret = iio_sysfs_match_string_with_gaps(e->items, e->num_items, buf); 567 if (ret < 0) 568 return ret; 569 570 ret = e->set(indio_dev, chan, ret); 571 return ret ? ret : len; 572 } 573 EXPORT_SYMBOL_GPL(iio_enum_write); 574 575 static const struct iio_mount_matrix iio_mount_idmatrix = { 576 .rotation = { 577 "1", "0", "0", 578 "0", "1", "0", 579 "0", "0", "1" 580 } 581 }; 582 583 static int iio_setup_mount_idmatrix(const struct device *dev, 584 struct iio_mount_matrix *matrix) 585 { 586 *matrix = iio_mount_idmatrix; 587 dev_info(dev, "mounting matrix not found: using identity...\n"); 588 return 0; 589 } 590 591 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 592 const struct iio_chan_spec *chan, char *buf) 593 { 594 const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *) 595 priv)(indio_dev, chan); 596 597 if (IS_ERR(mtx)) 598 return PTR_ERR(mtx); 599 600 if (!mtx) 601 mtx = &iio_mount_idmatrix; 602 603 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 604 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 605 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 606 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 607 } 608 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 609 610 /** 611 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 612 * device "mount-matrix" property 613 * @dev: device the mounting matrix property is assigned to 614 * @matrix: where to store retrieved matrix 615 * 616 * If device is assigned no mounting matrix property, a default 3x3 identity 617 * matrix will be filled in. 618 * 619 * Return: 0 if success, or a negative error code on failure. 620 */ 621 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 622 { 623 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 624 int err; 625 626 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 627 if (err == len) 628 return 0; 629 630 if (err >= 0) 631 /* Invalid number of matrix entries. */ 632 return -EINVAL; 633 634 if (err != -EINVAL) 635 /* Invalid matrix declaration format. */ 636 return err; 637 638 /* Matrix was not declared at all: fallback to identity. */ 639 return iio_setup_mount_idmatrix(dev, matrix); 640 } 641 EXPORT_SYMBOL(iio_read_mount_matrix); 642 643 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 644 int size, const int *vals) 645 { 646 int tmp0, tmp1; 647 s64 tmp2; 648 bool scale_db = false; 649 650 switch (type) { 651 case IIO_VAL_INT: 652 return sysfs_emit_at(buf, offset, "%d", vals[0]); 653 case IIO_VAL_INT_PLUS_MICRO_DB: 654 scale_db = true; 655 fallthrough; 656 case IIO_VAL_INT_PLUS_MICRO: 657 if (vals[1] < 0) 658 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 659 abs(vals[0]), -vals[1], 660 scale_db ? " dB" : ""); 661 else 662 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 663 vals[1], scale_db ? " dB" : ""); 664 case IIO_VAL_INT_PLUS_NANO: 665 if (vals[1] < 0) 666 return sysfs_emit_at(buf, offset, "-%d.%09u", 667 abs(vals[0]), -vals[1]); 668 else 669 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 670 vals[1]); 671 case IIO_VAL_FRACTIONAL: 672 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 673 tmp1 = vals[1]; 674 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 675 if ((tmp2 < 0) && (tmp0 == 0)) 676 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 677 else 678 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 679 abs(tmp1)); 680 case IIO_VAL_FRACTIONAL_LOG2: 681 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 682 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 683 if (tmp0 == 0 && tmp2 < 0) 684 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 685 else 686 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 687 abs(tmp1)); 688 case IIO_VAL_INT_MULTIPLE: 689 { 690 int i; 691 int l = 0; 692 693 for (i = 0; i < size; ++i) 694 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 695 return l; 696 } 697 case IIO_VAL_CHAR: 698 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 699 case IIO_VAL_INT_64: 700 tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); 701 return sysfs_emit_at(buf, offset, "%lld", tmp2); 702 default: 703 return 0; 704 } 705 } 706 707 /** 708 * iio_format_value() - Formats a IIO value into its string representation 709 * @buf: The buffer to which the formatted value gets written 710 * which is assumed to be big enough (i.e. PAGE_SIZE). 711 * @type: One of the IIO_VAL_* constants. This decides how the val 712 * and val2 parameters are formatted. 713 * @size: Number of IIO value entries contained in vals 714 * @vals: Pointer to the values, exact meaning depends on the 715 * type parameter. 716 * 717 * Return: 0 by default, a negative number on failure or the 718 * total number of characters written for a type that belongs 719 * to the IIO_VAL_* constant. 720 */ 721 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 722 { 723 ssize_t len; 724 725 len = __iio_format_value(buf, 0, type, size, vals); 726 if (len >= PAGE_SIZE - 1) 727 return -EFBIG; 728 729 return len + sysfs_emit_at(buf, len, "\n"); 730 } 731 EXPORT_SYMBOL_GPL(iio_format_value); 732 733 static ssize_t iio_read_channel_label(struct device *dev, 734 struct device_attribute *attr, 735 char *buf) 736 { 737 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 738 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 739 740 if (indio_dev->info->read_label) 741 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 742 743 if (this_attr->c->extend_name) 744 return sysfs_emit(buf, "%s\n", this_attr->c->extend_name); 745 746 return -EINVAL; 747 } 748 749 static ssize_t iio_read_channel_info(struct device *dev, 750 struct device_attribute *attr, 751 char *buf) 752 { 753 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 754 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 755 int vals[INDIO_MAX_RAW_ELEMENTS]; 756 int ret; 757 int val_len = 2; 758 759 if (indio_dev->info->read_raw_multi) 760 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 761 INDIO_MAX_RAW_ELEMENTS, 762 vals, &val_len, 763 this_attr->address); 764 else 765 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 766 &vals[0], &vals[1], this_attr->address); 767 768 if (ret < 0) 769 return ret; 770 771 return iio_format_value(buf, ret, val_len, vals); 772 } 773 774 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 775 const char *prefix, const char *suffix) 776 { 777 ssize_t len; 778 int stride; 779 int i; 780 781 switch (type) { 782 case IIO_VAL_INT: 783 stride = 1; 784 break; 785 default: 786 stride = 2; 787 break; 788 } 789 790 len = sysfs_emit(buf, prefix); 791 792 for (i = 0; i <= length - stride; i += stride) { 793 if (i != 0) { 794 len += sysfs_emit_at(buf, len, " "); 795 if (len >= PAGE_SIZE) 796 return -EFBIG; 797 } 798 799 len += __iio_format_value(buf, len, type, stride, &vals[i]); 800 if (len >= PAGE_SIZE) 801 return -EFBIG; 802 } 803 804 len += sysfs_emit_at(buf, len, "%s\n", suffix); 805 806 return len; 807 } 808 809 static ssize_t iio_format_avail_list(char *buf, const int *vals, 810 int type, int length) 811 { 812 813 return iio_format_list(buf, vals, type, length, "", ""); 814 } 815 816 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 817 { 818 int length; 819 820 /* 821 * length refers to the array size , not the number of elements. 822 * The purpose is to print the range [min , step ,max] so length should 823 * be 3 in case of int, and 6 for other types. 824 */ 825 switch (type) { 826 case IIO_VAL_INT: 827 length = 3; 828 break; 829 default: 830 length = 6; 831 break; 832 } 833 834 return iio_format_list(buf, vals, type, length, "[", "]"); 835 } 836 837 static ssize_t iio_read_channel_info_avail(struct device *dev, 838 struct device_attribute *attr, 839 char *buf) 840 { 841 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 842 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 843 const int *vals; 844 int ret; 845 int length; 846 int type; 847 848 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 849 &vals, &type, &length, 850 this_attr->address); 851 852 if (ret < 0) 853 return ret; 854 switch (ret) { 855 case IIO_AVAIL_LIST: 856 return iio_format_avail_list(buf, vals, type, length); 857 case IIO_AVAIL_RANGE: 858 return iio_format_avail_range(buf, vals, type); 859 default: 860 return -EINVAL; 861 } 862 } 863 864 /** 865 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 866 * @str: The string to parse 867 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 868 * @integer: The integer part of the number 869 * @fract: The fractional part of the number 870 * @scale_db: True if this should parse as dB 871 * 872 * Returns 0 on success, or a negative error code if the string could not be 873 * parsed. 874 */ 875 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 876 int *integer, int *fract, bool scale_db) 877 { 878 int i = 0, f = 0; 879 bool integer_part = true, negative = false; 880 881 if (fract_mult == 0) { 882 *fract = 0; 883 884 return kstrtoint(str, 0, integer); 885 } 886 887 if (str[0] == '-') { 888 negative = true; 889 str++; 890 } else if (str[0] == '+') { 891 str++; 892 } 893 894 while (*str) { 895 if ('0' <= *str && *str <= '9') { 896 if (integer_part) { 897 i = i * 10 + *str - '0'; 898 } else { 899 f += fract_mult * (*str - '0'); 900 fract_mult /= 10; 901 } 902 } else if (*str == '\n') { 903 if (*(str + 1) == '\0') 904 break; 905 return -EINVAL; 906 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 907 /* Ignore the dB suffix */ 908 str += sizeof(" dB") - 1; 909 continue; 910 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 911 /* Ignore the dB suffix */ 912 str += sizeof("dB") - 1; 913 continue; 914 } else if (*str == '.' && integer_part) { 915 integer_part = false; 916 } else { 917 return -EINVAL; 918 } 919 str++; 920 } 921 922 if (negative) { 923 if (i) 924 i = -i; 925 else 926 f = -f; 927 } 928 929 *integer = i; 930 *fract = f; 931 932 return 0; 933 } 934 935 /** 936 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 937 * @str: The string to parse 938 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 939 * @integer: The integer part of the number 940 * @fract: The fractional part of the number 941 * 942 * Returns 0 on success, or a negative error code if the string could not be 943 * parsed. 944 */ 945 int iio_str_to_fixpoint(const char *str, int fract_mult, 946 int *integer, int *fract) 947 { 948 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 949 } 950 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 951 952 static ssize_t iio_write_channel_info(struct device *dev, 953 struct device_attribute *attr, 954 const char *buf, 955 size_t len) 956 { 957 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 958 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 959 int ret, fract_mult = 100000; 960 int integer, fract = 0; 961 bool is_char = false; 962 bool scale_db = false; 963 964 /* Assumes decimal - precision based on number of digits */ 965 if (!indio_dev->info->write_raw) 966 return -EINVAL; 967 968 if (indio_dev->info->write_raw_get_fmt) 969 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 970 this_attr->c, this_attr->address)) { 971 case IIO_VAL_INT: 972 fract_mult = 0; 973 break; 974 case IIO_VAL_INT_PLUS_MICRO_DB: 975 scale_db = true; 976 fallthrough; 977 case IIO_VAL_INT_PLUS_MICRO: 978 fract_mult = 100000; 979 break; 980 case IIO_VAL_INT_PLUS_NANO: 981 fract_mult = 100000000; 982 break; 983 case IIO_VAL_CHAR: 984 is_char = true; 985 break; 986 default: 987 return -EINVAL; 988 } 989 990 if (is_char) { 991 char ch; 992 993 if (sscanf(buf, "%c", &ch) != 1) 994 return -EINVAL; 995 integer = ch; 996 } else { 997 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 998 scale_db); 999 if (ret) 1000 return ret; 1001 } 1002 1003 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 1004 integer, fract, this_attr->address); 1005 if (ret) 1006 return ret; 1007 1008 return len; 1009 } 1010 1011 static 1012 int __iio_device_attr_init(struct device_attribute *dev_attr, 1013 const char *postfix, 1014 struct iio_chan_spec const *chan, 1015 ssize_t (*readfunc)(struct device *dev, 1016 struct device_attribute *attr, 1017 char *buf), 1018 ssize_t (*writefunc)(struct device *dev, 1019 struct device_attribute *attr, 1020 const char *buf, 1021 size_t len), 1022 enum iio_shared_by shared_by) 1023 { 1024 int ret = 0; 1025 char *name = NULL; 1026 char *full_postfix; 1027 1028 sysfs_attr_init(&dev_attr->attr); 1029 1030 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1031 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1032 if (chan->extend_name) 1033 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1034 iio_modifier_names[chan 1035 ->channel2], 1036 chan->extend_name, 1037 postfix); 1038 else 1039 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1040 iio_modifier_names[chan 1041 ->channel2], 1042 postfix); 1043 } else { 1044 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1045 full_postfix = kstrdup(postfix, GFP_KERNEL); 1046 else 1047 full_postfix = kasprintf(GFP_KERNEL, 1048 "%s_%s", 1049 chan->extend_name, 1050 postfix); 1051 } 1052 if (full_postfix == NULL) 1053 return -ENOMEM; 1054 1055 if (chan->differential) { /* Differential can not have modifier */ 1056 switch (shared_by) { 1057 case IIO_SHARED_BY_ALL: 1058 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1059 break; 1060 case IIO_SHARED_BY_DIR: 1061 name = kasprintf(GFP_KERNEL, "%s_%s", 1062 iio_direction[chan->output], 1063 full_postfix); 1064 break; 1065 case IIO_SHARED_BY_TYPE: 1066 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1067 iio_direction[chan->output], 1068 iio_chan_type_name_spec[chan->type], 1069 iio_chan_type_name_spec[chan->type], 1070 full_postfix); 1071 break; 1072 case IIO_SEPARATE: 1073 if (!chan->indexed) { 1074 WARN(1, "Differential channels must be indexed\n"); 1075 ret = -EINVAL; 1076 goto error_free_full_postfix; 1077 } 1078 name = kasprintf(GFP_KERNEL, 1079 "%s_%s%d-%s%d_%s", 1080 iio_direction[chan->output], 1081 iio_chan_type_name_spec[chan->type], 1082 chan->channel, 1083 iio_chan_type_name_spec[chan->type], 1084 chan->channel2, 1085 full_postfix); 1086 break; 1087 } 1088 } else { /* Single ended */ 1089 switch (shared_by) { 1090 case IIO_SHARED_BY_ALL: 1091 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1092 break; 1093 case IIO_SHARED_BY_DIR: 1094 name = kasprintf(GFP_KERNEL, "%s_%s", 1095 iio_direction[chan->output], 1096 full_postfix); 1097 break; 1098 case IIO_SHARED_BY_TYPE: 1099 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1100 iio_direction[chan->output], 1101 iio_chan_type_name_spec[chan->type], 1102 full_postfix); 1103 break; 1104 1105 case IIO_SEPARATE: 1106 if (chan->indexed) 1107 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1108 iio_direction[chan->output], 1109 iio_chan_type_name_spec[chan->type], 1110 chan->channel, 1111 full_postfix); 1112 else 1113 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1114 iio_direction[chan->output], 1115 iio_chan_type_name_spec[chan->type], 1116 full_postfix); 1117 break; 1118 } 1119 } 1120 if (name == NULL) { 1121 ret = -ENOMEM; 1122 goto error_free_full_postfix; 1123 } 1124 dev_attr->attr.name = name; 1125 1126 if (readfunc) { 1127 dev_attr->attr.mode |= 0444; 1128 dev_attr->show = readfunc; 1129 } 1130 1131 if (writefunc) { 1132 dev_attr->attr.mode |= 0200; 1133 dev_attr->store = writefunc; 1134 } 1135 1136 error_free_full_postfix: 1137 kfree(full_postfix); 1138 1139 return ret; 1140 } 1141 1142 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1143 { 1144 kfree(dev_attr->attr.name); 1145 } 1146 1147 int __iio_add_chan_devattr(const char *postfix, 1148 struct iio_chan_spec const *chan, 1149 ssize_t (*readfunc)(struct device *dev, 1150 struct device_attribute *attr, 1151 char *buf), 1152 ssize_t (*writefunc)(struct device *dev, 1153 struct device_attribute *attr, 1154 const char *buf, 1155 size_t len), 1156 u64 mask, 1157 enum iio_shared_by shared_by, 1158 struct device *dev, 1159 struct iio_buffer *buffer, 1160 struct list_head *attr_list) 1161 { 1162 int ret; 1163 struct iio_dev_attr *iio_attr, *t; 1164 1165 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1166 if (iio_attr == NULL) 1167 return -ENOMEM; 1168 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1169 postfix, chan, 1170 readfunc, writefunc, shared_by); 1171 if (ret) 1172 goto error_iio_dev_attr_free; 1173 iio_attr->c = chan; 1174 iio_attr->address = mask; 1175 iio_attr->buffer = buffer; 1176 list_for_each_entry(t, attr_list, l) 1177 if (strcmp(t->dev_attr.attr.name, 1178 iio_attr->dev_attr.attr.name) == 0) { 1179 if (shared_by == IIO_SEPARATE) 1180 dev_err(dev, "tried to double register : %s\n", 1181 t->dev_attr.attr.name); 1182 ret = -EBUSY; 1183 goto error_device_attr_deinit; 1184 } 1185 list_add(&iio_attr->l, attr_list); 1186 1187 return 0; 1188 1189 error_device_attr_deinit: 1190 __iio_device_attr_deinit(&iio_attr->dev_attr); 1191 error_iio_dev_attr_free: 1192 kfree(iio_attr); 1193 return ret; 1194 } 1195 1196 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1197 struct iio_chan_spec const *chan) 1198 { 1199 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1200 int ret; 1201 1202 if (!indio_dev->info->read_label && !chan->extend_name) 1203 return 0; 1204 1205 ret = __iio_add_chan_devattr("label", 1206 chan, 1207 &iio_read_channel_label, 1208 NULL, 1209 0, 1210 IIO_SEPARATE, 1211 &indio_dev->dev, 1212 NULL, 1213 &iio_dev_opaque->channel_attr_list); 1214 if (ret < 0) 1215 return ret; 1216 1217 return 1; 1218 } 1219 1220 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1221 struct iio_chan_spec const *chan, 1222 enum iio_shared_by shared_by, 1223 const long *infomask) 1224 { 1225 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1226 int i, ret, attrcount = 0; 1227 1228 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1229 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1230 return -EINVAL; 1231 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1232 chan, 1233 &iio_read_channel_info, 1234 &iio_write_channel_info, 1235 i, 1236 shared_by, 1237 &indio_dev->dev, 1238 NULL, 1239 &iio_dev_opaque->channel_attr_list); 1240 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1241 continue; 1242 else if (ret < 0) 1243 return ret; 1244 attrcount++; 1245 } 1246 1247 return attrcount; 1248 } 1249 1250 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1251 struct iio_chan_spec const *chan, 1252 enum iio_shared_by shared_by, 1253 const long *infomask) 1254 { 1255 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1256 int i, ret, attrcount = 0; 1257 char *avail_postfix; 1258 1259 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1260 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1261 return -EINVAL; 1262 avail_postfix = kasprintf(GFP_KERNEL, 1263 "%s_available", 1264 iio_chan_info_postfix[i]); 1265 if (!avail_postfix) 1266 return -ENOMEM; 1267 1268 ret = __iio_add_chan_devattr(avail_postfix, 1269 chan, 1270 &iio_read_channel_info_avail, 1271 NULL, 1272 i, 1273 shared_by, 1274 &indio_dev->dev, 1275 NULL, 1276 &iio_dev_opaque->channel_attr_list); 1277 kfree(avail_postfix); 1278 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1279 continue; 1280 else if (ret < 0) 1281 return ret; 1282 attrcount++; 1283 } 1284 1285 return attrcount; 1286 } 1287 1288 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1289 struct iio_chan_spec const *chan) 1290 { 1291 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1292 int ret, attrcount = 0; 1293 const struct iio_chan_spec_ext_info *ext_info; 1294 1295 if (chan->channel < 0) 1296 return 0; 1297 ret = iio_device_add_info_mask_type(indio_dev, chan, 1298 IIO_SEPARATE, 1299 &chan->info_mask_separate); 1300 if (ret < 0) 1301 return ret; 1302 attrcount += ret; 1303 1304 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1305 IIO_SEPARATE, 1306 &chan->info_mask_separate_available); 1307 if (ret < 0) 1308 return ret; 1309 attrcount += ret; 1310 1311 ret = iio_device_add_info_mask_type(indio_dev, chan, 1312 IIO_SHARED_BY_TYPE, 1313 &chan->info_mask_shared_by_type); 1314 if (ret < 0) 1315 return ret; 1316 attrcount += ret; 1317 1318 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1319 IIO_SHARED_BY_TYPE, 1320 &chan->info_mask_shared_by_type_available); 1321 if (ret < 0) 1322 return ret; 1323 attrcount += ret; 1324 1325 ret = iio_device_add_info_mask_type(indio_dev, chan, 1326 IIO_SHARED_BY_DIR, 1327 &chan->info_mask_shared_by_dir); 1328 if (ret < 0) 1329 return ret; 1330 attrcount += ret; 1331 1332 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1333 IIO_SHARED_BY_DIR, 1334 &chan->info_mask_shared_by_dir_available); 1335 if (ret < 0) 1336 return ret; 1337 attrcount += ret; 1338 1339 ret = iio_device_add_info_mask_type(indio_dev, chan, 1340 IIO_SHARED_BY_ALL, 1341 &chan->info_mask_shared_by_all); 1342 if (ret < 0) 1343 return ret; 1344 attrcount += ret; 1345 1346 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1347 IIO_SHARED_BY_ALL, 1348 &chan->info_mask_shared_by_all_available); 1349 if (ret < 0) 1350 return ret; 1351 attrcount += ret; 1352 1353 ret = iio_device_add_channel_label(indio_dev, chan); 1354 if (ret < 0) 1355 return ret; 1356 attrcount += ret; 1357 1358 if (chan->ext_info) { 1359 unsigned int i = 0; 1360 1361 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1362 ret = __iio_add_chan_devattr(ext_info->name, 1363 chan, 1364 ext_info->read ? 1365 &iio_read_channel_ext_info : NULL, 1366 ext_info->write ? 1367 &iio_write_channel_ext_info : NULL, 1368 i, 1369 ext_info->shared, 1370 &indio_dev->dev, 1371 NULL, 1372 &iio_dev_opaque->channel_attr_list); 1373 i++; 1374 if (ret == -EBUSY && ext_info->shared) 1375 continue; 1376 1377 if (ret) 1378 return ret; 1379 1380 attrcount++; 1381 } 1382 } 1383 1384 return attrcount; 1385 } 1386 1387 /** 1388 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1389 * @attr_list: List of IIO device attributes 1390 * 1391 * This function frees the memory allocated for each of the IIO device 1392 * attributes in the list. 1393 */ 1394 void iio_free_chan_devattr_list(struct list_head *attr_list) 1395 { 1396 struct iio_dev_attr *p, *n; 1397 1398 list_for_each_entry_safe(p, n, attr_list, l) { 1399 kfree_const(p->dev_attr.attr.name); 1400 list_del(&p->l); 1401 kfree(p); 1402 } 1403 } 1404 1405 static ssize_t name_show(struct device *dev, struct device_attribute *attr, 1406 char *buf) 1407 { 1408 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1409 1410 return sysfs_emit(buf, "%s\n", indio_dev->name); 1411 } 1412 1413 static DEVICE_ATTR_RO(name); 1414 1415 static ssize_t label_show(struct device *dev, struct device_attribute *attr, 1416 char *buf) 1417 { 1418 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1419 1420 return sysfs_emit(buf, "%s\n", indio_dev->label); 1421 } 1422 1423 static DEVICE_ATTR_RO(label); 1424 1425 static ssize_t current_timestamp_clock_show(struct device *dev, 1426 struct device_attribute *attr, 1427 char *buf) 1428 { 1429 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1430 const clockid_t clk = iio_device_get_clock(indio_dev); 1431 const char *name; 1432 ssize_t sz; 1433 1434 switch (clk) { 1435 case CLOCK_REALTIME: 1436 name = "realtime\n"; 1437 sz = sizeof("realtime\n"); 1438 break; 1439 case CLOCK_MONOTONIC: 1440 name = "monotonic\n"; 1441 sz = sizeof("monotonic\n"); 1442 break; 1443 case CLOCK_MONOTONIC_RAW: 1444 name = "monotonic_raw\n"; 1445 sz = sizeof("monotonic_raw\n"); 1446 break; 1447 case CLOCK_REALTIME_COARSE: 1448 name = "realtime_coarse\n"; 1449 sz = sizeof("realtime_coarse\n"); 1450 break; 1451 case CLOCK_MONOTONIC_COARSE: 1452 name = "monotonic_coarse\n"; 1453 sz = sizeof("monotonic_coarse\n"); 1454 break; 1455 case CLOCK_BOOTTIME: 1456 name = "boottime\n"; 1457 sz = sizeof("boottime\n"); 1458 break; 1459 case CLOCK_TAI: 1460 name = "tai\n"; 1461 sz = sizeof("tai\n"); 1462 break; 1463 default: 1464 BUG(); 1465 } 1466 1467 memcpy(buf, name, sz); 1468 return sz; 1469 } 1470 1471 static ssize_t current_timestamp_clock_store(struct device *dev, 1472 struct device_attribute *attr, 1473 const char *buf, size_t len) 1474 { 1475 clockid_t clk; 1476 int ret; 1477 1478 if (sysfs_streq(buf, "realtime")) 1479 clk = CLOCK_REALTIME; 1480 else if (sysfs_streq(buf, "monotonic")) 1481 clk = CLOCK_MONOTONIC; 1482 else if (sysfs_streq(buf, "monotonic_raw")) 1483 clk = CLOCK_MONOTONIC_RAW; 1484 else if (sysfs_streq(buf, "realtime_coarse")) 1485 clk = CLOCK_REALTIME_COARSE; 1486 else if (sysfs_streq(buf, "monotonic_coarse")) 1487 clk = CLOCK_MONOTONIC_COARSE; 1488 else if (sysfs_streq(buf, "boottime")) 1489 clk = CLOCK_BOOTTIME; 1490 else if (sysfs_streq(buf, "tai")) 1491 clk = CLOCK_TAI; 1492 else 1493 return -EINVAL; 1494 1495 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1496 if (ret) 1497 return ret; 1498 1499 return len; 1500 } 1501 1502 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1503 const struct attribute_group *group) 1504 { 1505 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1506 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1507 unsigned int cnt = iio_dev_opaque->groupcounter; 1508 1509 new = krealloc(old, sizeof(*new) * (cnt + 2), GFP_KERNEL); 1510 if (!new) 1511 return -ENOMEM; 1512 1513 new[iio_dev_opaque->groupcounter++] = group; 1514 new[iio_dev_opaque->groupcounter] = NULL; 1515 1516 iio_dev_opaque->groups = new; 1517 1518 return 0; 1519 } 1520 1521 static DEVICE_ATTR_RW(current_timestamp_clock); 1522 1523 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1524 { 1525 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1526 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1527 struct iio_dev_attr *p; 1528 struct attribute **attr, *clk = NULL; 1529 1530 /* First count elements in any existing group */ 1531 if (indio_dev->info->attrs) { 1532 attr = indio_dev->info->attrs->attrs; 1533 while (*attr++ != NULL) 1534 attrcount_orig++; 1535 } 1536 attrcount = attrcount_orig; 1537 /* 1538 * New channel registration method - relies on the fact a group does 1539 * not need to be initialized if its name is NULL. 1540 */ 1541 if (indio_dev->channels) 1542 for (i = 0; i < indio_dev->num_channels; i++) { 1543 const struct iio_chan_spec *chan = 1544 &indio_dev->channels[i]; 1545 1546 if (chan->type == IIO_TIMESTAMP) 1547 clk = &dev_attr_current_timestamp_clock.attr; 1548 1549 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1550 if (ret < 0) 1551 goto error_clear_attrs; 1552 attrcount += ret; 1553 } 1554 1555 if (iio_dev_opaque->event_interface) 1556 clk = &dev_attr_current_timestamp_clock.attr; 1557 1558 if (indio_dev->name) 1559 attrcount++; 1560 if (indio_dev->label) 1561 attrcount++; 1562 if (clk) 1563 attrcount++; 1564 1565 iio_dev_opaque->chan_attr_group.attrs = 1566 kcalloc(attrcount + 1, 1567 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1568 GFP_KERNEL); 1569 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1570 ret = -ENOMEM; 1571 goto error_clear_attrs; 1572 } 1573 /* Copy across original attributes */ 1574 if (indio_dev->info->attrs) { 1575 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1576 indio_dev->info->attrs->attrs, 1577 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1578 *attrcount_orig); 1579 iio_dev_opaque->chan_attr_group.is_visible = 1580 indio_dev->info->attrs->is_visible; 1581 } 1582 attrn = attrcount_orig; 1583 /* Add all elements from the list. */ 1584 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1585 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1586 if (indio_dev->name) 1587 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1588 if (indio_dev->label) 1589 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1590 if (clk) 1591 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1592 1593 ret = iio_device_register_sysfs_group(indio_dev, 1594 &iio_dev_opaque->chan_attr_group); 1595 if (ret) 1596 goto error_clear_attrs; 1597 1598 return 0; 1599 1600 error_clear_attrs: 1601 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1602 1603 return ret; 1604 } 1605 1606 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1607 { 1608 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1609 1610 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1611 kfree(iio_dev_opaque->chan_attr_group.attrs); 1612 iio_dev_opaque->chan_attr_group.attrs = NULL; 1613 kfree(iio_dev_opaque->groups); 1614 iio_dev_opaque->groups = NULL; 1615 } 1616 1617 static void iio_dev_release(struct device *device) 1618 { 1619 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1620 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1621 1622 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1623 iio_device_unregister_trigger_consumer(indio_dev); 1624 iio_device_unregister_eventset(indio_dev); 1625 iio_device_unregister_sysfs(indio_dev); 1626 1627 iio_device_detach_buffers(indio_dev); 1628 1629 lockdep_unregister_key(&iio_dev_opaque->mlock_key); 1630 1631 ida_free(&iio_ida, iio_dev_opaque->id); 1632 kfree(iio_dev_opaque); 1633 } 1634 1635 const struct device_type iio_device_type = { 1636 .name = "iio_device", 1637 .release = iio_dev_release, 1638 }; 1639 1640 /** 1641 * iio_device_alloc() - allocate an iio_dev from a driver 1642 * @parent: Parent device. 1643 * @sizeof_priv: Space to allocate for private structure. 1644 **/ 1645 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1646 { 1647 struct iio_dev_opaque *iio_dev_opaque; 1648 struct iio_dev *indio_dev; 1649 size_t alloc_size; 1650 1651 alloc_size = sizeof(struct iio_dev_opaque); 1652 if (sizeof_priv) { 1653 alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN); 1654 alloc_size += sizeof_priv; 1655 } 1656 1657 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1658 if (!iio_dev_opaque) 1659 return NULL; 1660 1661 indio_dev = &iio_dev_opaque->indio_dev; 1662 indio_dev->priv = (char *)iio_dev_opaque + 1663 ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN); 1664 1665 indio_dev->dev.parent = parent; 1666 indio_dev->dev.type = &iio_device_type; 1667 indio_dev->dev.bus = &iio_bus_type; 1668 device_initialize(&indio_dev->dev); 1669 mutex_init(&indio_dev->mlock); 1670 mutex_init(&iio_dev_opaque->info_exist_lock); 1671 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1672 1673 iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL); 1674 if (iio_dev_opaque->id < 0) { 1675 /* cannot use a dev_err as the name isn't available */ 1676 pr_err("failed to get device id\n"); 1677 kfree(iio_dev_opaque); 1678 return NULL; 1679 } 1680 1681 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) { 1682 ida_free(&iio_ida, iio_dev_opaque->id); 1683 kfree(iio_dev_opaque); 1684 return NULL; 1685 } 1686 1687 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1688 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1689 1690 lockdep_register_key(&iio_dev_opaque->mlock_key); 1691 lockdep_set_class(&indio_dev->mlock, &iio_dev_opaque->mlock_key); 1692 1693 return indio_dev; 1694 } 1695 EXPORT_SYMBOL(iio_device_alloc); 1696 1697 /** 1698 * iio_device_free() - free an iio_dev from a driver 1699 * @dev: the iio_dev associated with the device 1700 **/ 1701 void iio_device_free(struct iio_dev *dev) 1702 { 1703 if (dev) 1704 put_device(&dev->dev); 1705 } 1706 EXPORT_SYMBOL(iio_device_free); 1707 1708 static void devm_iio_device_release(void *iio_dev) 1709 { 1710 iio_device_free(iio_dev); 1711 } 1712 1713 /** 1714 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1715 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1716 * @sizeof_priv: Space to allocate for private structure. 1717 * 1718 * Managed iio_device_alloc. iio_dev allocated with this function is 1719 * automatically freed on driver detach. 1720 * 1721 * RETURNS: 1722 * Pointer to allocated iio_dev on success, NULL on failure. 1723 */ 1724 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1725 { 1726 struct iio_dev *iio_dev; 1727 int ret; 1728 1729 iio_dev = iio_device_alloc(parent, sizeof_priv); 1730 if (!iio_dev) 1731 return NULL; 1732 1733 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1734 iio_dev); 1735 if (ret) 1736 return NULL; 1737 1738 return iio_dev; 1739 } 1740 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1741 1742 /** 1743 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1744 * @inode: Inode structure for identifying the device in the file system 1745 * @filp: File structure for iio device used to keep and later access 1746 * private data 1747 * 1748 * Return: 0 on success or -EBUSY if the device is already opened 1749 **/ 1750 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1751 { 1752 struct iio_dev_opaque *iio_dev_opaque = 1753 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1754 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1755 struct iio_dev_buffer_pair *ib; 1756 1757 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1758 return -EBUSY; 1759 1760 iio_device_get(indio_dev); 1761 1762 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1763 if (!ib) { 1764 iio_device_put(indio_dev); 1765 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1766 return -ENOMEM; 1767 } 1768 1769 ib->indio_dev = indio_dev; 1770 ib->buffer = indio_dev->buffer; 1771 1772 filp->private_data = ib; 1773 1774 return 0; 1775 } 1776 1777 /** 1778 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1779 * @inode: Inode structure pointer for the char device 1780 * @filp: File structure pointer for the char device 1781 * 1782 * Return: 0 for successful release 1783 */ 1784 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1785 { 1786 struct iio_dev_buffer_pair *ib = filp->private_data; 1787 struct iio_dev_opaque *iio_dev_opaque = 1788 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1789 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1790 1791 kfree(ib); 1792 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1793 iio_device_put(indio_dev); 1794 1795 return 0; 1796 } 1797 1798 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1799 struct iio_ioctl_handler *h) 1800 { 1801 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1802 1803 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1804 } 1805 1806 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1807 { 1808 list_del(&h->entry); 1809 } 1810 1811 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1812 { 1813 struct iio_dev_buffer_pair *ib = filp->private_data; 1814 struct iio_dev *indio_dev = ib->indio_dev; 1815 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1816 struct iio_ioctl_handler *h; 1817 int ret = -ENODEV; 1818 1819 mutex_lock(&iio_dev_opaque->info_exist_lock); 1820 1821 /** 1822 * The NULL check here is required to prevent crashing when a device 1823 * is being removed while userspace would still have open file handles 1824 * to try to access this device. 1825 */ 1826 if (!indio_dev->info) 1827 goto out_unlock; 1828 1829 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1830 ret = h->ioctl(indio_dev, filp, cmd, arg); 1831 if (ret != IIO_IOCTL_UNHANDLED) 1832 break; 1833 } 1834 1835 if (ret == IIO_IOCTL_UNHANDLED) 1836 ret = -ENODEV; 1837 1838 out_unlock: 1839 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1840 1841 return ret; 1842 } 1843 1844 static const struct file_operations iio_buffer_fileops = { 1845 .owner = THIS_MODULE, 1846 .llseek = noop_llseek, 1847 .read = iio_buffer_read_outer_addr, 1848 .write = iio_buffer_write_outer_addr, 1849 .poll = iio_buffer_poll_addr, 1850 .unlocked_ioctl = iio_ioctl, 1851 .compat_ioctl = compat_ptr_ioctl, 1852 .open = iio_chrdev_open, 1853 .release = iio_chrdev_release, 1854 }; 1855 1856 static const struct file_operations iio_event_fileops = { 1857 .owner = THIS_MODULE, 1858 .llseek = noop_llseek, 1859 .unlocked_ioctl = iio_ioctl, 1860 .compat_ioctl = compat_ptr_ioctl, 1861 .open = iio_chrdev_open, 1862 .release = iio_chrdev_release, 1863 }; 1864 1865 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1866 { 1867 int i, j; 1868 const struct iio_chan_spec *channels = indio_dev->channels; 1869 1870 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1871 return 0; 1872 1873 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1874 if (channels[i].scan_index < 0) 1875 continue; 1876 for (j = i + 1; j < indio_dev->num_channels; j++) 1877 if (channels[i].scan_index == channels[j].scan_index) { 1878 dev_err(&indio_dev->dev, 1879 "Duplicate scan index %d\n", 1880 channels[i].scan_index); 1881 return -EINVAL; 1882 } 1883 } 1884 1885 return 0; 1886 } 1887 1888 static int iio_check_extended_name(const struct iio_dev *indio_dev) 1889 { 1890 unsigned int i; 1891 1892 if (!indio_dev->info->read_label) 1893 return 0; 1894 1895 for (i = 0; i < indio_dev->num_channels; i++) { 1896 if (indio_dev->channels[i].extend_name) { 1897 dev_err(&indio_dev->dev, 1898 "Cannot use labels and extend_name at the same time\n"); 1899 return -EINVAL; 1900 } 1901 } 1902 1903 return 0; 1904 } 1905 1906 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1907 1908 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1909 { 1910 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1911 struct fwnode_handle *fwnode; 1912 int ret; 1913 1914 if (!indio_dev->info) 1915 return -EINVAL; 1916 1917 iio_dev_opaque->driver_module = this_mod; 1918 1919 /* If the calling driver did not initialize firmware node, do it here */ 1920 if (dev_fwnode(&indio_dev->dev)) 1921 fwnode = dev_fwnode(&indio_dev->dev); 1922 else 1923 fwnode = dev_fwnode(indio_dev->dev.parent); 1924 device_set_node(&indio_dev->dev, fwnode); 1925 1926 fwnode_property_read_string(fwnode, "label", &indio_dev->label); 1927 1928 ret = iio_check_unique_scan_index(indio_dev); 1929 if (ret < 0) 1930 return ret; 1931 1932 ret = iio_check_extended_name(indio_dev); 1933 if (ret < 0) 1934 return ret; 1935 1936 iio_device_register_debugfs(indio_dev); 1937 1938 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1939 if (ret) { 1940 dev_err(indio_dev->dev.parent, 1941 "Failed to create buffer sysfs interfaces\n"); 1942 goto error_unreg_debugfs; 1943 } 1944 1945 ret = iio_device_register_sysfs(indio_dev); 1946 if (ret) { 1947 dev_err(indio_dev->dev.parent, 1948 "Failed to register sysfs interfaces\n"); 1949 goto error_buffer_free_sysfs; 1950 } 1951 ret = iio_device_register_eventset(indio_dev); 1952 if (ret) { 1953 dev_err(indio_dev->dev.parent, 1954 "Failed to register event set\n"); 1955 goto error_free_sysfs; 1956 } 1957 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1958 iio_device_register_trigger_consumer(indio_dev); 1959 1960 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 1961 indio_dev->setup_ops == NULL) 1962 indio_dev->setup_ops = &noop_ring_setup_ops; 1963 1964 if (iio_dev_opaque->attached_buffers_cnt) 1965 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 1966 else if (iio_dev_opaque->event_interface) 1967 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 1968 1969 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 1970 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 1971 iio_dev_opaque->chrdev.owner = this_mod; 1972 } 1973 1974 /* assign device groups now; they should be all registered now */ 1975 indio_dev->dev.groups = iio_dev_opaque->groups; 1976 1977 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 1978 if (ret < 0) 1979 goto error_unreg_eventset; 1980 1981 return 0; 1982 1983 error_unreg_eventset: 1984 iio_device_unregister_eventset(indio_dev); 1985 error_free_sysfs: 1986 iio_device_unregister_sysfs(indio_dev); 1987 error_buffer_free_sysfs: 1988 iio_buffers_free_sysfs_and_mask(indio_dev); 1989 error_unreg_debugfs: 1990 iio_device_unregister_debugfs(indio_dev); 1991 return ret; 1992 } 1993 EXPORT_SYMBOL(__iio_device_register); 1994 1995 /** 1996 * iio_device_unregister() - unregister a device from the IIO subsystem 1997 * @indio_dev: Device structure representing the device. 1998 **/ 1999 void iio_device_unregister(struct iio_dev *indio_dev) 2000 { 2001 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2002 2003 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 2004 2005 mutex_lock(&iio_dev_opaque->info_exist_lock); 2006 2007 iio_device_unregister_debugfs(indio_dev); 2008 2009 iio_disable_all_buffers(indio_dev); 2010 2011 indio_dev->info = NULL; 2012 2013 iio_device_wakeup_eventset(indio_dev); 2014 iio_buffer_wakeup_poll(indio_dev); 2015 2016 mutex_unlock(&iio_dev_opaque->info_exist_lock); 2017 2018 iio_buffers_free_sysfs_and_mask(indio_dev); 2019 } 2020 EXPORT_SYMBOL(iio_device_unregister); 2021 2022 static void devm_iio_device_unreg(void *indio_dev) 2023 { 2024 iio_device_unregister(indio_dev); 2025 } 2026 2027 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 2028 struct module *this_mod) 2029 { 2030 int ret; 2031 2032 ret = __iio_device_register(indio_dev, this_mod); 2033 if (ret) 2034 return ret; 2035 2036 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 2037 } 2038 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 2039 2040 /** 2041 * iio_device_claim_direct_mode - Keep device in direct mode 2042 * @indio_dev: the iio_dev associated with the device 2043 * 2044 * If the device is in direct mode it is guaranteed to stay 2045 * that way until iio_device_release_direct_mode() is called. 2046 * 2047 * Use with iio_device_release_direct_mode() 2048 * 2049 * Returns: 0 on success, -EBUSY on failure 2050 */ 2051 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2052 { 2053 mutex_lock(&indio_dev->mlock); 2054 2055 if (iio_buffer_enabled(indio_dev)) { 2056 mutex_unlock(&indio_dev->mlock); 2057 return -EBUSY; 2058 } 2059 return 0; 2060 } 2061 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2062 2063 /** 2064 * iio_device_release_direct_mode - releases claim on direct mode 2065 * @indio_dev: the iio_dev associated with the device 2066 * 2067 * Release the claim. Device is no longer guaranteed to stay 2068 * in direct mode. 2069 * 2070 * Use with iio_device_claim_direct_mode() 2071 */ 2072 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2073 { 2074 mutex_unlock(&indio_dev->mlock); 2075 } 2076 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2077 2078 /** 2079 * iio_device_get_current_mode() - helper function providing read-only access to 2080 * the opaque @currentmode variable 2081 * @indio_dev: IIO device structure for device 2082 */ 2083 int iio_device_get_current_mode(struct iio_dev *indio_dev) 2084 { 2085 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2086 2087 return iio_dev_opaque->currentmode; 2088 } 2089 EXPORT_SYMBOL_GPL(iio_device_get_current_mode); 2090 2091 subsys_initcall(iio_init); 2092 module_exit(iio_exit); 2093 2094 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2095 MODULE_DESCRIPTION("Industrial I/O core"); 2096 MODULE_LICENSE("GPL"); 2097