1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * The industrial I/O core 4 * 5 * Copyright (c) 2008 Jonathan Cameron 6 * 7 * Based on elements of hwmon and input subsystems. 8 */ 9 10 #define pr_fmt(fmt) "iio-core: " fmt 11 12 #include <linux/anon_inodes.h> 13 #include <linux/cdev.h> 14 #include <linux/debugfs.h> 15 #include <linux/device.h> 16 #include <linux/err.h> 17 #include <linux/fs.h> 18 #include <linux/idr.h> 19 #include <linux/kdev_t.h> 20 #include <linux/kernel.h> 21 #include <linux/module.h> 22 #include <linux/mutex.h> 23 #include <linux/poll.h> 24 #include <linux/property.h> 25 #include <linux/sched.h> 26 #include <linux/slab.h> 27 #include <linux/wait.h> 28 29 #include <linux/iio/buffer.h> 30 #include <linux/iio/buffer_impl.h> 31 #include <linux/iio/events.h> 32 #include <linux/iio/iio-opaque.h> 33 #include <linux/iio/iio.h> 34 #include <linux/iio/sysfs.h> 35 36 #include "iio_core.h" 37 #include "iio_core_trigger.h" 38 39 /* IDA to assign each registered device a unique id */ 40 static DEFINE_IDA(iio_ida); 41 42 static dev_t iio_devt; 43 44 #define IIO_DEV_MAX 256 45 struct bus_type iio_bus_type = { 46 .name = "iio", 47 }; 48 EXPORT_SYMBOL(iio_bus_type); 49 50 static struct dentry *iio_debugfs_dentry; 51 52 static const char * const iio_direction[] = { 53 [0] = "in", 54 [1] = "out", 55 }; 56 57 static const char * const iio_chan_type_name_spec[] = { 58 [IIO_VOLTAGE] = "voltage", 59 [IIO_CURRENT] = "current", 60 [IIO_POWER] = "power", 61 [IIO_ACCEL] = "accel", 62 [IIO_ANGL_VEL] = "anglvel", 63 [IIO_MAGN] = "magn", 64 [IIO_LIGHT] = "illuminance", 65 [IIO_INTENSITY] = "intensity", 66 [IIO_PROXIMITY] = "proximity", 67 [IIO_TEMP] = "temp", 68 [IIO_INCLI] = "incli", 69 [IIO_ROT] = "rot", 70 [IIO_ANGL] = "angl", 71 [IIO_TIMESTAMP] = "timestamp", 72 [IIO_CAPACITANCE] = "capacitance", 73 [IIO_ALTVOLTAGE] = "altvoltage", 74 [IIO_CCT] = "cct", 75 [IIO_PRESSURE] = "pressure", 76 [IIO_HUMIDITYRELATIVE] = "humidityrelative", 77 [IIO_ACTIVITY] = "activity", 78 [IIO_STEPS] = "steps", 79 [IIO_ENERGY] = "energy", 80 [IIO_DISTANCE] = "distance", 81 [IIO_VELOCITY] = "velocity", 82 [IIO_CONCENTRATION] = "concentration", 83 [IIO_RESISTANCE] = "resistance", 84 [IIO_PH] = "ph", 85 [IIO_UVINDEX] = "uvindex", 86 [IIO_ELECTRICALCONDUCTIVITY] = "electricalconductivity", 87 [IIO_COUNT] = "count", 88 [IIO_INDEX] = "index", 89 [IIO_GRAVITY] = "gravity", 90 [IIO_POSITIONRELATIVE] = "positionrelative", 91 [IIO_PHASE] = "phase", 92 [IIO_MASSCONCENTRATION] = "massconcentration", 93 [IIO_DELTA_ANGL] = "deltaangl", 94 [IIO_DELTA_VELOCITY] = "deltavelocity", 95 [IIO_COLORTEMP] = "colortemp", 96 [IIO_CHROMATICITY] = "chromaticity", 97 }; 98 99 static const char * const iio_modifier_names[] = { 100 [IIO_MOD_X] = "x", 101 [IIO_MOD_Y] = "y", 102 [IIO_MOD_Z] = "z", 103 [IIO_MOD_X_AND_Y] = "x&y", 104 [IIO_MOD_X_AND_Z] = "x&z", 105 [IIO_MOD_Y_AND_Z] = "y&z", 106 [IIO_MOD_X_AND_Y_AND_Z] = "x&y&z", 107 [IIO_MOD_X_OR_Y] = "x|y", 108 [IIO_MOD_X_OR_Z] = "x|z", 109 [IIO_MOD_Y_OR_Z] = "y|z", 110 [IIO_MOD_X_OR_Y_OR_Z] = "x|y|z", 111 [IIO_MOD_ROOT_SUM_SQUARED_X_Y] = "sqrt(x^2+y^2)", 112 [IIO_MOD_SUM_SQUARED_X_Y_Z] = "x^2+y^2+z^2", 113 [IIO_MOD_LIGHT_BOTH] = "both", 114 [IIO_MOD_LIGHT_IR] = "ir", 115 [IIO_MOD_LIGHT_CLEAR] = "clear", 116 [IIO_MOD_LIGHT_RED] = "red", 117 [IIO_MOD_LIGHT_GREEN] = "green", 118 [IIO_MOD_LIGHT_BLUE] = "blue", 119 [IIO_MOD_LIGHT_UV] = "uv", 120 [IIO_MOD_LIGHT_UVA] = "uva", 121 [IIO_MOD_LIGHT_UVB] = "uvb", 122 [IIO_MOD_LIGHT_DUV] = "duv", 123 [IIO_MOD_QUATERNION] = "quaternion", 124 [IIO_MOD_TEMP_AMBIENT] = "ambient", 125 [IIO_MOD_TEMP_OBJECT] = "object", 126 [IIO_MOD_NORTH_MAGN] = "from_north_magnetic", 127 [IIO_MOD_NORTH_TRUE] = "from_north_true", 128 [IIO_MOD_NORTH_MAGN_TILT_COMP] = "from_north_magnetic_tilt_comp", 129 [IIO_MOD_NORTH_TRUE_TILT_COMP] = "from_north_true_tilt_comp", 130 [IIO_MOD_RUNNING] = "running", 131 [IIO_MOD_JOGGING] = "jogging", 132 [IIO_MOD_WALKING] = "walking", 133 [IIO_MOD_STILL] = "still", 134 [IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)", 135 [IIO_MOD_I] = "i", 136 [IIO_MOD_Q] = "q", 137 [IIO_MOD_CO2] = "co2", 138 [IIO_MOD_VOC] = "voc", 139 [IIO_MOD_PM1] = "pm1", 140 [IIO_MOD_PM2P5] = "pm2p5", 141 [IIO_MOD_PM4] = "pm4", 142 [IIO_MOD_PM10] = "pm10", 143 [IIO_MOD_ETHANOL] = "ethanol", 144 [IIO_MOD_H2] = "h2", 145 [IIO_MOD_O2] = "o2", 146 [IIO_MOD_LINEAR_X] = "linear_x", 147 [IIO_MOD_LINEAR_Y] = "linear_y", 148 [IIO_MOD_LINEAR_Z] = "linear_z", 149 [IIO_MOD_PITCH] = "pitch", 150 [IIO_MOD_YAW] = "yaw", 151 [IIO_MOD_ROLL] = "roll", 152 }; 153 154 /* relies on pairs of these shared then separate */ 155 static const char * const iio_chan_info_postfix[] = { 156 [IIO_CHAN_INFO_RAW] = "raw", 157 [IIO_CHAN_INFO_PROCESSED] = "input", 158 [IIO_CHAN_INFO_SCALE] = "scale", 159 [IIO_CHAN_INFO_OFFSET] = "offset", 160 [IIO_CHAN_INFO_CALIBSCALE] = "calibscale", 161 [IIO_CHAN_INFO_CALIBBIAS] = "calibbias", 162 [IIO_CHAN_INFO_PEAK] = "peak_raw", 163 [IIO_CHAN_INFO_PEAK_SCALE] = "peak_scale", 164 [IIO_CHAN_INFO_QUADRATURE_CORRECTION_RAW] = "quadrature_correction_raw", 165 [IIO_CHAN_INFO_AVERAGE_RAW] = "mean_raw", 166 [IIO_CHAN_INFO_LOW_PASS_FILTER_3DB_FREQUENCY] 167 = "filter_low_pass_3db_frequency", 168 [IIO_CHAN_INFO_HIGH_PASS_FILTER_3DB_FREQUENCY] 169 = "filter_high_pass_3db_frequency", 170 [IIO_CHAN_INFO_SAMP_FREQ] = "sampling_frequency", 171 [IIO_CHAN_INFO_FREQUENCY] = "frequency", 172 [IIO_CHAN_INFO_PHASE] = "phase", 173 [IIO_CHAN_INFO_HARDWAREGAIN] = "hardwaregain", 174 [IIO_CHAN_INFO_HYSTERESIS] = "hysteresis", 175 [IIO_CHAN_INFO_HYSTERESIS_RELATIVE] = "hysteresis_relative", 176 [IIO_CHAN_INFO_INT_TIME] = "integration_time", 177 [IIO_CHAN_INFO_ENABLE] = "en", 178 [IIO_CHAN_INFO_CALIBHEIGHT] = "calibheight", 179 [IIO_CHAN_INFO_CALIBWEIGHT] = "calibweight", 180 [IIO_CHAN_INFO_DEBOUNCE_COUNT] = "debounce_count", 181 [IIO_CHAN_INFO_DEBOUNCE_TIME] = "debounce_time", 182 [IIO_CHAN_INFO_CALIBEMISSIVITY] = "calibemissivity", 183 [IIO_CHAN_INFO_OVERSAMPLING_RATIO] = "oversampling_ratio", 184 [IIO_CHAN_INFO_THERMOCOUPLE_TYPE] = "thermocouple_type", 185 [IIO_CHAN_INFO_CALIBAMBIENT] = "calibambient", 186 [IIO_CHAN_INFO_ZEROPOINT] = "zeropoint", 187 [IIO_CHAN_INFO_TROUGH] = "trough_raw", 188 }; 189 /** 190 * iio_device_id() - query the unique ID for the device 191 * @indio_dev: Device structure whose ID is being queried 192 * 193 * The IIO device ID is a unique index used for example for the naming 194 * of the character device /dev/iio\:device[ID]. 195 * 196 * Returns: Unique ID for the device. 197 */ 198 int iio_device_id(struct iio_dev *indio_dev) 199 { 200 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 201 202 return iio_dev_opaque->id; 203 } 204 EXPORT_SYMBOL_GPL(iio_device_id); 205 206 /** 207 * iio_buffer_enabled() - helper function to test if the buffer is enabled 208 * @indio_dev: IIO device structure for device 209 * 210 * Returns: True, if the buffer is enabled. 211 */ 212 bool iio_buffer_enabled(struct iio_dev *indio_dev) 213 { 214 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 215 216 return iio_dev_opaque->currentmode & 217 (INDIO_BUFFER_HARDWARE | INDIO_BUFFER_SOFTWARE | 218 INDIO_BUFFER_TRIGGERED); 219 } 220 EXPORT_SYMBOL_GPL(iio_buffer_enabled); 221 222 #if defined(CONFIG_DEBUG_FS) 223 /* 224 * There's also a CONFIG_DEBUG_FS guard in include/linux/iio/iio.h for 225 * iio_get_debugfs_dentry() to make it inline if CONFIG_DEBUG_FS is undefined 226 */ 227 struct dentry *iio_get_debugfs_dentry(struct iio_dev *indio_dev) 228 { 229 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 230 231 return iio_dev_opaque->debugfs_dentry; 232 } 233 EXPORT_SYMBOL_GPL(iio_get_debugfs_dentry); 234 #endif 235 236 /** 237 * iio_find_channel_from_si() - get channel from its scan index 238 * @indio_dev: device 239 * @si: scan index to match 240 * 241 * Returns: 242 * Constant pointer to iio_chan_spec, if scan index matches, NULL on failure. 243 */ 244 const struct iio_chan_spec 245 *iio_find_channel_from_si(struct iio_dev *indio_dev, int si) 246 { 247 int i; 248 249 for (i = 0; i < indio_dev->num_channels; i++) 250 if (indio_dev->channels[i].scan_index == si) 251 return &indio_dev->channels[i]; 252 return NULL; 253 } 254 255 /* This turns up an awful lot */ 256 ssize_t iio_read_const_attr(struct device *dev, 257 struct device_attribute *attr, 258 char *buf) 259 { 260 return sysfs_emit(buf, "%s\n", to_iio_const_attr(attr)->string); 261 } 262 EXPORT_SYMBOL(iio_read_const_attr); 263 264 /** 265 * iio_device_set_clock() - Set current timestamping clock for the device 266 * @indio_dev: IIO device structure containing the device 267 * @clock_id: timestamping clock POSIX identifier to set. 268 * 269 * Returns: 0 on success, or a negative error code. 270 */ 271 int iio_device_set_clock(struct iio_dev *indio_dev, clockid_t clock_id) 272 { 273 int ret; 274 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 275 const struct iio_event_interface *ev_int = iio_dev_opaque->event_interface; 276 277 ret = mutex_lock_interruptible(&iio_dev_opaque->mlock); 278 if (ret) 279 return ret; 280 if ((ev_int && iio_event_enabled(ev_int)) || 281 iio_buffer_enabled(indio_dev)) { 282 mutex_unlock(&iio_dev_opaque->mlock); 283 return -EBUSY; 284 } 285 iio_dev_opaque->clock_id = clock_id; 286 mutex_unlock(&iio_dev_opaque->mlock); 287 288 return 0; 289 } 290 EXPORT_SYMBOL(iio_device_set_clock); 291 292 /** 293 * iio_device_get_clock() - Retrieve current timestamping clock for the device 294 * @indio_dev: IIO device structure containing the device 295 * 296 * Returns: Clock ID of the current timestamping clock for the device. 297 */ 298 clockid_t iio_device_get_clock(const struct iio_dev *indio_dev) 299 { 300 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 301 302 return iio_dev_opaque->clock_id; 303 } 304 EXPORT_SYMBOL(iio_device_get_clock); 305 306 /** 307 * iio_get_time_ns() - utility function to get a time stamp for events etc 308 * @indio_dev: device 309 * 310 * Returns: Timestamp of the event in nanoseconds. 311 */ 312 s64 iio_get_time_ns(const struct iio_dev *indio_dev) 313 { 314 struct timespec64 tp; 315 316 switch (iio_device_get_clock(indio_dev)) { 317 case CLOCK_REALTIME: 318 return ktime_get_real_ns(); 319 case CLOCK_MONOTONIC: 320 return ktime_get_ns(); 321 case CLOCK_MONOTONIC_RAW: 322 return ktime_get_raw_ns(); 323 case CLOCK_REALTIME_COARSE: 324 return ktime_to_ns(ktime_get_coarse_real()); 325 case CLOCK_MONOTONIC_COARSE: 326 ktime_get_coarse_ts64(&tp); 327 return timespec64_to_ns(&tp); 328 case CLOCK_BOOTTIME: 329 return ktime_get_boottime_ns(); 330 case CLOCK_TAI: 331 return ktime_get_clocktai_ns(); 332 default: 333 BUG(); 334 } 335 } 336 EXPORT_SYMBOL(iio_get_time_ns); 337 338 static int __init iio_init(void) 339 { 340 int ret; 341 342 /* Register sysfs bus */ 343 ret = bus_register(&iio_bus_type); 344 if (ret < 0) { 345 pr_err("could not register bus type\n"); 346 goto error_nothing; 347 } 348 349 ret = alloc_chrdev_region(&iio_devt, 0, IIO_DEV_MAX, "iio"); 350 if (ret < 0) { 351 pr_err("failed to allocate char dev region\n"); 352 goto error_unregister_bus_type; 353 } 354 355 iio_debugfs_dentry = debugfs_create_dir("iio", NULL); 356 357 return 0; 358 359 error_unregister_bus_type: 360 bus_unregister(&iio_bus_type); 361 error_nothing: 362 return ret; 363 } 364 365 static void __exit iio_exit(void) 366 { 367 if (iio_devt) 368 unregister_chrdev_region(iio_devt, IIO_DEV_MAX); 369 bus_unregister(&iio_bus_type); 370 debugfs_remove(iio_debugfs_dentry); 371 } 372 373 #if defined(CONFIG_DEBUG_FS) 374 static ssize_t iio_debugfs_read_reg(struct file *file, char __user *userbuf, 375 size_t count, loff_t *ppos) 376 { 377 struct iio_dev *indio_dev = file->private_data; 378 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 379 unsigned int val = 0; 380 int ret; 381 382 if (*ppos > 0) 383 return simple_read_from_buffer(userbuf, count, ppos, 384 iio_dev_opaque->read_buf, 385 iio_dev_opaque->read_buf_len); 386 387 ret = indio_dev->info->debugfs_reg_access(indio_dev, 388 iio_dev_opaque->cached_reg_addr, 389 0, &val); 390 if (ret) { 391 dev_err(indio_dev->dev.parent, "%s: read failed\n", __func__); 392 return ret; 393 } 394 395 iio_dev_opaque->read_buf_len = snprintf(iio_dev_opaque->read_buf, 396 sizeof(iio_dev_opaque->read_buf), 397 "0x%X\n", val); 398 399 return simple_read_from_buffer(userbuf, count, ppos, 400 iio_dev_opaque->read_buf, 401 iio_dev_opaque->read_buf_len); 402 } 403 404 static ssize_t iio_debugfs_write_reg(struct file *file, 405 const char __user *userbuf, size_t count, loff_t *ppos) 406 { 407 struct iio_dev *indio_dev = file->private_data; 408 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 409 unsigned int reg, val; 410 char buf[80]; 411 int ret; 412 413 count = min(count, sizeof(buf) - 1); 414 if (copy_from_user(buf, userbuf, count)) 415 return -EFAULT; 416 417 buf[count] = 0; 418 419 ret = sscanf(buf, "%i %i", ®, &val); 420 421 switch (ret) { 422 case 1: 423 iio_dev_opaque->cached_reg_addr = reg; 424 break; 425 case 2: 426 iio_dev_opaque->cached_reg_addr = reg; 427 ret = indio_dev->info->debugfs_reg_access(indio_dev, reg, 428 val, NULL); 429 if (ret) { 430 dev_err(indio_dev->dev.parent, "%s: write failed\n", 431 __func__); 432 return ret; 433 } 434 break; 435 default: 436 return -EINVAL; 437 } 438 439 return count; 440 } 441 442 static const struct file_operations iio_debugfs_reg_fops = { 443 .open = simple_open, 444 .read = iio_debugfs_read_reg, 445 .write = iio_debugfs_write_reg, 446 }; 447 448 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 449 { 450 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 451 452 debugfs_remove_recursive(iio_dev_opaque->debugfs_dentry); 453 } 454 455 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 456 { 457 struct iio_dev_opaque *iio_dev_opaque; 458 459 if (indio_dev->info->debugfs_reg_access == NULL) 460 return; 461 462 if (!iio_debugfs_dentry) 463 return; 464 465 iio_dev_opaque = to_iio_dev_opaque(indio_dev); 466 467 iio_dev_opaque->debugfs_dentry = 468 debugfs_create_dir(dev_name(&indio_dev->dev), 469 iio_debugfs_dentry); 470 471 debugfs_create_file("direct_reg_access", 0644, 472 iio_dev_opaque->debugfs_dentry, indio_dev, 473 &iio_debugfs_reg_fops); 474 } 475 #else 476 static void iio_device_register_debugfs(struct iio_dev *indio_dev) 477 { 478 } 479 480 static void iio_device_unregister_debugfs(struct iio_dev *indio_dev) 481 { 482 } 483 #endif /* CONFIG_DEBUG_FS */ 484 485 static ssize_t iio_read_channel_ext_info(struct device *dev, 486 struct device_attribute *attr, 487 char *buf) 488 { 489 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 490 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 491 const struct iio_chan_spec_ext_info *ext_info; 492 493 ext_info = &this_attr->c->ext_info[this_attr->address]; 494 495 return ext_info->read(indio_dev, ext_info->private, this_attr->c, buf); 496 } 497 498 static ssize_t iio_write_channel_ext_info(struct device *dev, 499 struct device_attribute *attr, 500 const char *buf, size_t len) 501 { 502 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 503 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 504 const struct iio_chan_spec_ext_info *ext_info; 505 506 ext_info = &this_attr->c->ext_info[this_attr->address]; 507 508 return ext_info->write(indio_dev, ext_info->private, 509 this_attr->c, buf, len); 510 } 511 512 ssize_t iio_enum_available_read(struct iio_dev *indio_dev, 513 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 514 { 515 const struct iio_enum *e = (const struct iio_enum *)priv; 516 unsigned int i; 517 size_t len = 0; 518 519 if (!e->num_items) 520 return 0; 521 522 for (i = 0; i < e->num_items; ++i) { 523 if (!e->items[i]) 524 continue; 525 len += sysfs_emit_at(buf, len, "%s ", e->items[i]); 526 } 527 528 /* replace last space with a newline */ 529 buf[len - 1] = '\n'; 530 531 return len; 532 } 533 EXPORT_SYMBOL_GPL(iio_enum_available_read); 534 535 ssize_t iio_enum_read(struct iio_dev *indio_dev, 536 uintptr_t priv, const struct iio_chan_spec *chan, char *buf) 537 { 538 const struct iio_enum *e = (const struct iio_enum *)priv; 539 int i; 540 541 if (!e->get) 542 return -EINVAL; 543 544 i = e->get(indio_dev, chan); 545 if (i < 0) 546 return i; 547 if (i >= e->num_items || !e->items[i]) 548 return -EINVAL; 549 550 return sysfs_emit(buf, "%s\n", e->items[i]); 551 } 552 EXPORT_SYMBOL_GPL(iio_enum_read); 553 554 ssize_t iio_enum_write(struct iio_dev *indio_dev, 555 uintptr_t priv, const struct iio_chan_spec *chan, const char *buf, 556 size_t len) 557 { 558 const struct iio_enum *e = (const struct iio_enum *)priv; 559 int ret; 560 561 if (!e->set) 562 return -EINVAL; 563 564 ret = __sysfs_match_string(e->items, e->num_items, buf); 565 if (ret < 0) 566 return ret; 567 568 ret = e->set(indio_dev, chan, ret); 569 return ret ? ret : len; 570 } 571 EXPORT_SYMBOL_GPL(iio_enum_write); 572 573 static const struct iio_mount_matrix iio_mount_idmatrix = { 574 .rotation = { 575 "1", "0", "0", 576 "0", "1", "0", 577 "0", "0", "1" 578 } 579 }; 580 581 static int iio_setup_mount_idmatrix(const struct device *dev, 582 struct iio_mount_matrix *matrix) 583 { 584 *matrix = iio_mount_idmatrix; 585 dev_info(dev, "mounting matrix not found: using identity...\n"); 586 return 0; 587 } 588 589 ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv, 590 const struct iio_chan_spec *chan, char *buf) 591 { 592 const struct iio_mount_matrix *mtx; 593 594 mtx = ((iio_get_mount_matrix_t *)priv)(indio_dev, chan); 595 if (IS_ERR(mtx)) 596 return PTR_ERR(mtx); 597 598 if (!mtx) 599 mtx = &iio_mount_idmatrix; 600 601 return sysfs_emit(buf, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n", 602 mtx->rotation[0], mtx->rotation[1], mtx->rotation[2], 603 mtx->rotation[3], mtx->rotation[4], mtx->rotation[5], 604 mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]); 605 } 606 EXPORT_SYMBOL_GPL(iio_show_mount_matrix); 607 608 /** 609 * iio_read_mount_matrix() - retrieve iio device mounting matrix from 610 * device "mount-matrix" property 611 * @dev: device the mounting matrix property is assigned to 612 * @matrix: where to store retrieved matrix 613 * 614 * If device is assigned no mounting matrix property, a default 3x3 identity 615 * matrix will be filled in. 616 * 617 * Returns: 0 if success, or a negative error code on failure. 618 */ 619 int iio_read_mount_matrix(struct device *dev, struct iio_mount_matrix *matrix) 620 { 621 size_t len = ARRAY_SIZE(iio_mount_idmatrix.rotation); 622 int err; 623 624 err = device_property_read_string_array(dev, "mount-matrix", matrix->rotation, len); 625 if (err == len) 626 return 0; 627 628 if (err >= 0) 629 /* Invalid number of matrix entries. */ 630 return -EINVAL; 631 632 if (err != -EINVAL) 633 /* Invalid matrix declaration format. */ 634 return err; 635 636 /* Matrix was not declared at all: fallback to identity. */ 637 return iio_setup_mount_idmatrix(dev, matrix); 638 } 639 EXPORT_SYMBOL(iio_read_mount_matrix); 640 641 static ssize_t __iio_format_value(char *buf, size_t offset, unsigned int type, 642 int size, const int *vals) 643 { 644 int tmp0, tmp1; 645 s64 tmp2; 646 bool scale_db = false; 647 648 switch (type) { 649 case IIO_VAL_INT: 650 return sysfs_emit_at(buf, offset, "%d", vals[0]); 651 case IIO_VAL_INT_PLUS_MICRO_DB: 652 scale_db = true; 653 fallthrough; 654 case IIO_VAL_INT_PLUS_MICRO: 655 if (vals[1] < 0) 656 return sysfs_emit_at(buf, offset, "-%d.%06u%s", 657 abs(vals[0]), -vals[1], 658 scale_db ? " dB" : ""); 659 else 660 return sysfs_emit_at(buf, offset, "%d.%06u%s", vals[0], 661 vals[1], scale_db ? " dB" : ""); 662 case IIO_VAL_INT_PLUS_NANO: 663 if (vals[1] < 0) 664 return sysfs_emit_at(buf, offset, "-%d.%09u", 665 abs(vals[0]), -vals[1]); 666 else 667 return sysfs_emit_at(buf, offset, "%d.%09u", vals[0], 668 vals[1]); 669 case IIO_VAL_FRACTIONAL: 670 tmp2 = div_s64((s64)vals[0] * 1000000000LL, vals[1]); 671 tmp1 = vals[1]; 672 tmp0 = (int)div_s64_rem(tmp2, 1000000000, &tmp1); 673 if ((tmp2 < 0) && (tmp0 == 0)) 674 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 675 else 676 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 677 abs(tmp1)); 678 case IIO_VAL_FRACTIONAL_LOG2: 679 tmp2 = shift_right((s64)vals[0] * 1000000000LL, vals[1]); 680 tmp0 = (int)div_s64_rem(tmp2, 1000000000LL, &tmp1); 681 if (tmp0 == 0 && tmp2 < 0) 682 return sysfs_emit_at(buf, offset, "-0.%09u", abs(tmp1)); 683 else 684 return sysfs_emit_at(buf, offset, "%d.%09u", tmp0, 685 abs(tmp1)); 686 case IIO_VAL_INT_MULTIPLE: 687 { 688 int i; 689 int l = 0; 690 691 for (i = 0; i < size; ++i) 692 l += sysfs_emit_at(buf, offset + l, "%d ", vals[i]); 693 return l; 694 } 695 case IIO_VAL_CHAR: 696 return sysfs_emit_at(buf, offset, "%c", (char)vals[0]); 697 case IIO_VAL_INT_64: 698 tmp2 = (s64)((((u64)vals[1]) << 32) | (u32)vals[0]); 699 return sysfs_emit_at(buf, offset, "%lld", tmp2); 700 default: 701 return 0; 702 } 703 } 704 705 /** 706 * iio_format_value() - Formats a IIO value into its string representation 707 * @buf: The buffer to which the formatted value gets written 708 * which is assumed to be big enough (i.e. PAGE_SIZE). 709 * @type: One of the IIO_VAL_* constants. This decides how the val 710 * and val2 parameters are formatted. 711 * @size: Number of IIO value entries contained in vals 712 * @vals: Pointer to the values, exact meaning depends on the 713 * type parameter. 714 * 715 * Returns: 716 * 0 by default, a negative number on failure or the total number of characters 717 * written for a type that belongs to the IIO_VAL_* constant. 718 */ 719 ssize_t iio_format_value(char *buf, unsigned int type, int size, int *vals) 720 { 721 ssize_t len; 722 723 len = __iio_format_value(buf, 0, type, size, vals); 724 if (len >= PAGE_SIZE - 1) 725 return -EFBIG; 726 727 return len + sysfs_emit_at(buf, len, "\n"); 728 } 729 EXPORT_SYMBOL_GPL(iio_format_value); 730 731 static ssize_t iio_read_channel_label(struct device *dev, 732 struct device_attribute *attr, 733 char *buf) 734 { 735 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 736 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 737 738 if (indio_dev->info->read_label) 739 return indio_dev->info->read_label(indio_dev, this_attr->c, buf); 740 741 if (this_attr->c->extend_name) 742 return sysfs_emit(buf, "%s\n", this_attr->c->extend_name); 743 744 return -EINVAL; 745 } 746 747 static ssize_t iio_read_channel_info(struct device *dev, 748 struct device_attribute *attr, 749 char *buf) 750 { 751 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 752 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 753 int vals[INDIO_MAX_RAW_ELEMENTS]; 754 int ret; 755 int val_len = 2; 756 757 if (indio_dev->info->read_raw_multi) 758 ret = indio_dev->info->read_raw_multi(indio_dev, this_attr->c, 759 INDIO_MAX_RAW_ELEMENTS, 760 vals, &val_len, 761 this_attr->address); 762 else 763 ret = indio_dev->info->read_raw(indio_dev, this_attr->c, 764 &vals[0], &vals[1], this_attr->address); 765 766 if (ret < 0) 767 return ret; 768 769 return iio_format_value(buf, ret, val_len, vals); 770 } 771 772 static ssize_t iio_format_list(char *buf, const int *vals, int type, int length, 773 const char *prefix, const char *suffix) 774 { 775 ssize_t len; 776 int stride; 777 int i; 778 779 switch (type) { 780 case IIO_VAL_INT: 781 stride = 1; 782 break; 783 default: 784 stride = 2; 785 break; 786 } 787 788 len = sysfs_emit(buf, prefix); 789 790 for (i = 0; i <= length - stride; i += stride) { 791 if (i != 0) { 792 len += sysfs_emit_at(buf, len, " "); 793 if (len >= PAGE_SIZE) 794 return -EFBIG; 795 } 796 797 len += __iio_format_value(buf, len, type, stride, &vals[i]); 798 if (len >= PAGE_SIZE) 799 return -EFBIG; 800 } 801 802 len += sysfs_emit_at(buf, len, "%s\n", suffix); 803 804 return len; 805 } 806 807 static ssize_t iio_format_avail_list(char *buf, const int *vals, 808 int type, int length) 809 { 810 811 return iio_format_list(buf, vals, type, length, "", ""); 812 } 813 814 static ssize_t iio_format_avail_range(char *buf, const int *vals, int type) 815 { 816 int length; 817 818 /* 819 * length refers to the array size , not the number of elements. 820 * The purpose is to print the range [min , step ,max] so length should 821 * be 3 in case of int, and 6 for other types. 822 */ 823 switch (type) { 824 case IIO_VAL_INT: 825 length = 3; 826 break; 827 default: 828 length = 6; 829 break; 830 } 831 832 return iio_format_list(buf, vals, type, length, "[", "]"); 833 } 834 835 static ssize_t iio_read_channel_info_avail(struct device *dev, 836 struct device_attribute *attr, 837 char *buf) 838 { 839 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 840 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 841 const int *vals; 842 int ret; 843 int length; 844 int type; 845 846 ret = indio_dev->info->read_avail(indio_dev, this_attr->c, 847 &vals, &type, &length, 848 this_attr->address); 849 850 if (ret < 0) 851 return ret; 852 switch (ret) { 853 case IIO_AVAIL_LIST: 854 return iio_format_avail_list(buf, vals, type, length); 855 case IIO_AVAIL_RANGE: 856 return iio_format_avail_range(buf, vals, type); 857 default: 858 return -EINVAL; 859 } 860 } 861 862 /** 863 * __iio_str_to_fixpoint() - Parse a fixed-point number from a string 864 * @str: The string to parse 865 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 866 * @integer: The integer part of the number 867 * @fract: The fractional part of the number 868 * @scale_db: True if this should parse as dB 869 * 870 * Returns: 871 * 0 on success, or a negative error code if the string could not be parsed. 872 */ 873 static int __iio_str_to_fixpoint(const char *str, int fract_mult, 874 int *integer, int *fract, bool scale_db) 875 { 876 int i = 0, f = 0; 877 bool integer_part = true, negative = false; 878 879 if (fract_mult == 0) { 880 *fract = 0; 881 882 return kstrtoint(str, 0, integer); 883 } 884 885 if (str[0] == '-') { 886 negative = true; 887 str++; 888 } else if (str[0] == '+') { 889 str++; 890 } 891 892 while (*str) { 893 if ('0' <= *str && *str <= '9') { 894 if (integer_part) { 895 i = i * 10 + *str - '0'; 896 } else { 897 f += fract_mult * (*str - '0'); 898 fract_mult /= 10; 899 } 900 } else if (*str == '\n') { 901 if (*(str + 1) == '\0') 902 break; 903 return -EINVAL; 904 } else if (!strncmp(str, " dB", sizeof(" dB") - 1) && scale_db) { 905 /* Ignore the dB suffix */ 906 str += sizeof(" dB") - 1; 907 continue; 908 } else if (!strncmp(str, "dB", sizeof("dB") - 1) && scale_db) { 909 /* Ignore the dB suffix */ 910 str += sizeof("dB") - 1; 911 continue; 912 } else if (*str == '.' && integer_part) { 913 integer_part = false; 914 } else { 915 return -EINVAL; 916 } 917 str++; 918 } 919 920 if (negative) { 921 if (i) 922 i = -i; 923 else 924 f = -f; 925 } 926 927 *integer = i; 928 *fract = f; 929 930 return 0; 931 } 932 933 /** 934 * iio_str_to_fixpoint() - Parse a fixed-point number from a string 935 * @str: The string to parse 936 * @fract_mult: Multiplier for the first decimal place, should be a power of 10 937 * @integer: The integer part of the number 938 * @fract: The fractional part of the number 939 * 940 * Returns: 941 * 0 on success, or a negative error code if the string could not be parsed. 942 */ 943 int iio_str_to_fixpoint(const char *str, int fract_mult, 944 int *integer, int *fract) 945 { 946 return __iio_str_to_fixpoint(str, fract_mult, integer, fract, false); 947 } 948 EXPORT_SYMBOL_GPL(iio_str_to_fixpoint); 949 950 static ssize_t iio_write_channel_info(struct device *dev, 951 struct device_attribute *attr, 952 const char *buf, 953 size_t len) 954 { 955 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 956 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 957 int ret, fract_mult = 100000; 958 int integer, fract = 0; 959 bool is_char = false; 960 bool scale_db = false; 961 962 /* Assumes decimal - precision based on number of digits */ 963 if (!indio_dev->info->write_raw) 964 return -EINVAL; 965 966 if (indio_dev->info->write_raw_get_fmt) 967 switch (indio_dev->info->write_raw_get_fmt(indio_dev, 968 this_attr->c, this_attr->address)) { 969 case IIO_VAL_INT: 970 fract_mult = 0; 971 break; 972 case IIO_VAL_INT_PLUS_MICRO_DB: 973 scale_db = true; 974 fallthrough; 975 case IIO_VAL_INT_PLUS_MICRO: 976 fract_mult = 100000; 977 break; 978 case IIO_VAL_INT_PLUS_NANO: 979 fract_mult = 100000000; 980 break; 981 case IIO_VAL_CHAR: 982 is_char = true; 983 break; 984 default: 985 return -EINVAL; 986 } 987 988 if (is_char) { 989 char ch; 990 991 if (sscanf(buf, "%c", &ch) != 1) 992 return -EINVAL; 993 integer = ch; 994 } else { 995 ret = __iio_str_to_fixpoint(buf, fract_mult, &integer, &fract, 996 scale_db); 997 if (ret) 998 return ret; 999 } 1000 1001 ret = indio_dev->info->write_raw(indio_dev, this_attr->c, 1002 integer, fract, this_attr->address); 1003 if (ret) 1004 return ret; 1005 1006 return len; 1007 } 1008 1009 static 1010 int __iio_device_attr_init(struct device_attribute *dev_attr, 1011 const char *postfix, 1012 struct iio_chan_spec const *chan, 1013 ssize_t (*readfunc)(struct device *dev, 1014 struct device_attribute *attr, 1015 char *buf), 1016 ssize_t (*writefunc)(struct device *dev, 1017 struct device_attribute *attr, 1018 const char *buf, 1019 size_t len), 1020 enum iio_shared_by shared_by) 1021 { 1022 int ret = 0; 1023 char *name = NULL; 1024 char *full_postfix; 1025 1026 sysfs_attr_init(&dev_attr->attr); 1027 1028 /* Build up postfix of <extend_name>_<modifier>_postfix */ 1029 if (chan->modified && (shared_by == IIO_SEPARATE)) { 1030 if (chan->extend_name) 1031 full_postfix = kasprintf(GFP_KERNEL, "%s_%s_%s", 1032 iio_modifier_names[chan->channel2], 1033 chan->extend_name, 1034 postfix); 1035 else 1036 full_postfix = kasprintf(GFP_KERNEL, "%s_%s", 1037 iio_modifier_names[chan->channel2], 1038 postfix); 1039 } else { 1040 if (chan->extend_name == NULL || shared_by != IIO_SEPARATE) 1041 full_postfix = kstrdup(postfix, GFP_KERNEL); 1042 else 1043 full_postfix = kasprintf(GFP_KERNEL, 1044 "%s_%s", 1045 chan->extend_name, 1046 postfix); 1047 } 1048 if (full_postfix == NULL) 1049 return -ENOMEM; 1050 1051 if (chan->differential) { /* Differential can not have modifier */ 1052 switch (shared_by) { 1053 case IIO_SHARED_BY_ALL: 1054 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1055 break; 1056 case IIO_SHARED_BY_DIR: 1057 name = kasprintf(GFP_KERNEL, "%s_%s", 1058 iio_direction[chan->output], 1059 full_postfix); 1060 break; 1061 case IIO_SHARED_BY_TYPE: 1062 name = kasprintf(GFP_KERNEL, "%s_%s-%s_%s", 1063 iio_direction[chan->output], 1064 iio_chan_type_name_spec[chan->type], 1065 iio_chan_type_name_spec[chan->type], 1066 full_postfix); 1067 break; 1068 case IIO_SEPARATE: 1069 if (!chan->indexed) { 1070 WARN(1, "Differential channels must be indexed\n"); 1071 ret = -EINVAL; 1072 goto error_free_full_postfix; 1073 } 1074 name = kasprintf(GFP_KERNEL, 1075 "%s_%s%d-%s%d_%s", 1076 iio_direction[chan->output], 1077 iio_chan_type_name_spec[chan->type], 1078 chan->channel, 1079 iio_chan_type_name_spec[chan->type], 1080 chan->channel2, 1081 full_postfix); 1082 break; 1083 } 1084 } else { /* Single ended */ 1085 switch (shared_by) { 1086 case IIO_SHARED_BY_ALL: 1087 name = kasprintf(GFP_KERNEL, "%s", full_postfix); 1088 break; 1089 case IIO_SHARED_BY_DIR: 1090 name = kasprintf(GFP_KERNEL, "%s_%s", 1091 iio_direction[chan->output], 1092 full_postfix); 1093 break; 1094 case IIO_SHARED_BY_TYPE: 1095 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1096 iio_direction[chan->output], 1097 iio_chan_type_name_spec[chan->type], 1098 full_postfix); 1099 break; 1100 1101 case IIO_SEPARATE: 1102 if (chan->indexed) 1103 name = kasprintf(GFP_KERNEL, "%s_%s%d_%s", 1104 iio_direction[chan->output], 1105 iio_chan_type_name_spec[chan->type], 1106 chan->channel, 1107 full_postfix); 1108 else 1109 name = kasprintf(GFP_KERNEL, "%s_%s_%s", 1110 iio_direction[chan->output], 1111 iio_chan_type_name_spec[chan->type], 1112 full_postfix); 1113 break; 1114 } 1115 } 1116 if (name == NULL) { 1117 ret = -ENOMEM; 1118 goto error_free_full_postfix; 1119 } 1120 dev_attr->attr.name = name; 1121 1122 if (readfunc) { 1123 dev_attr->attr.mode |= 0444; 1124 dev_attr->show = readfunc; 1125 } 1126 1127 if (writefunc) { 1128 dev_attr->attr.mode |= 0200; 1129 dev_attr->store = writefunc; 1130 } 1131 1132 error_free_full_postfix: 1133 kfree(full_postfix); 1134 1135 return ret; 1136 } 1137 1138 static void __iio_device_attr_deinit(struct device_attribute *dev_attr) 1139 { 1140 kfree(dev_attr->attr.name); 1141 } 1142 1143 int __iio_add_chan_devattr(const char *postfix, 1144 struct iio_chan_spec const *chan, 1145 ssize_t (*readfunc)(struct device *dev, 1146 struct device_attribute *attr, 1147 char *buf), 1148 ssize_t (*writefunc)(struct device *dev, 1149 struct device_attribute *attr, 1150 const char *buf, 1151 size_t len), 1152 u64 mask, 1153 enum iio_shared_by shared_by, 1154 struct device *dev, 1155 struct iio_buffer *buffer, 1156 struct list_head *attr_list) 1157 { 1158 int ret; 1159 struct iio_dev_attr *iio_attr, *t; 1160 1161 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1162 if (iio_attr == NULL) 1163 return -ENOMEM; 1164 ret = __iio_device_attr_init(&iio_attr->dev_attr, 1165 postfix, chan, 1166 readfunc, writefunc, shared_by); 1167 if (ret) 1168 goto error_iio_dev_attr_free; 1169 iio_attr->c = chan; 1170 iio_attr->address = mask; 1171 iio_attr->buffer = buffer; 1172 list_for_each_entry(t, attr_list, l) 1173 if (strcmp(t->dev_attr.attr.name, 1174 iio_attr->dev_attr.attr.name) == 0) { 1175 if (shared_by == IIO_SEPARATE) 1176 dev_err(dev, "tried to double register : %s\n", 1177 t->dev_attr.attr.name); 1178 ret = -EBUSY; 1179 goto error_device_attr_deinit; 1180 } 1181 list_add(&iio_attr->l, attr_list); 1182 1183 return 0; 1184 1185 error_device_attr_deinit: 1186 __iio_device_attr_deinit(&iio_attr->dev_attr); 1187 error_iio_dev_attr_free: 1188 kfree(iio_attr); 1189 return ret; 1190 } 1191 1192 static int iio_device_add_channel_label(struct iio_dev *indio_dev, 1193 struct iio_chan_spec const *chan) 1194 { 1195 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1196 int ret; 1197 1198 if (!indio_dev->info->read_label && !chan->extend_name) 1199 return 0; 1200 1201 ret = __iio_add_chan_devattr("label", 1202 chan, 1203 &iio_read_channel_label, 1204 NULL, 1205 0, 1206 IIO_SEPARATE, 1207 &indio_dev->dev, 1208 NULL, 1209 &iio_dev_opaque->channel_attr_list); 1210 if (ret < 0) 1211 return ret; 1212 1213 return 1; 1214 } 1215 1216 static int iio_device_add_info_mask_type(struct iio_dev *indio_dev, 1217 struct iio_chan_spec const *chan, 1218 enum iio_shared_by shared_by, 1219 const long *infomask) 1220 { 1221 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1222 int i, ret, attrcount = 0; 1223 1224 for_each_set_bit(i, infomask, sizeof(*infomask)*8) { 1225 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1226 return -EINVAL; 1227 ret = __iio_add_chan_devattr(iio_chan_info_postfix[i], 1228 chan, 1229 &iio_read_channel_info, 1230 &iio_write_channel_info, 1231 i, 1232 shared_by, 1233 &indio_dev->dev, 1234 NULL, 1235 &iio_dev_opaque->channel_attr_list); 1236 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1237 continue; 1238 if (ret < 0) 1239 return ret; 1240 attrcount++; 1241 } 1242 1243 return attrcount; 1244 } 1245 1246 static int iio_device_add_info_mask_type_avail(struct iio_dev *indio_dev, 1247 struct iio_chan_spec const *chan, 1248 enum iio_shared_by shared_by, 1249 const long *infomask) 1250 { 1251 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1252 int i, ret, attrcount = 0; 1253 char *avail_postfix; 1254 1255 for_each_set_bit(i, infomask, sizeof(*infomask) * 8) { 1256 if (i >= ARRAY_SIZE(iio_chan_info_postfix)) 1257 return -EINVAL; 1258 avail_postfix = kasprintf(GFP_KERNEL, 1259 "%s_available", 1260 iio_chan_info_postfix[i]); 1261 if (!avail_postfix) 1262 return -ENOMEM; 1263 1264 ret = __iio_add_chan_devattr(avail_postfix, 1265 chan, 1266 &iio_read_channel_info_avail, 1267 NULL, 1268 i, 1269 shared_by, 1270 &indio_dev->dev, 1271 NULL, 1272 &iio_dev_opaque->channel_attr_list); 1273 kfree(avail_postfix); 1274 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE)) 1275 continue; 1276 if (ret < 0) 1277 return ret; 1278 attrcount++; 1279 } 1280 1281 return attrcount; 1282 } 1283 1284 static int iio_device_add_channel_sysfs(struct iio_dev *indio_dev, 1285 struct iio_chan_spec const *chan) 1286 { 1287 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1288 int ret, attrcount = 0; 1289 const struct iio_chan_spec_ext_info *ext_info; 1290 1291 if (chan->channel < 0) 1292 return 0; 1293 ret = iio_device_add_info_mask_type(indio_dev, chan, 1294 IIO_SEPARATE, 1295 &chan->info_mask_separate); 1296 if (ret < 0) 1297 return ret; 1298 attrcount += ret; 1299 1300 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1301 IIO_SEPARATE, 1302 &chan->info_mask_separate_available); 1303 if (ret < 0) 1304 return ret; 1305 attrcount += ret; 1306 1307 ret = iio_device_add_info_mask_type(indio_dev, chan, 1308 IIO_SHARED_BY_TYPE, 1309 &chan->info_mask_shared_by_type); 1310 if (ret < 0) 1311 return ret; 1312 attrcount += ret; 1313 1314 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1315 IIO_SHARED_BY_TYPE, 1316 &chan->info_mask_shared_by_type_available); 1317 if (ret < 0) 1318 return ret; 1319 attrcount += ret; 1320 1321 ret = iio_device_add_info_mask_type(indio_dev, chan, 1322 IIO_SHARED_BY_DIR, 1323 &chan->info_mask_shared_by_dir); 1324 if (ret < 0) 1325 return ret; 1326 attrcount += ret; 1327 1328 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1329 IIO_SHARED_BY_DIR, 1330 &chan->info_mask_shared_by_dir_available); 1331 if (ret < 0) 1332 return ret; 1333 attrcount += ret; 1334 1335 ret = iio_device_add_info_mask_type(indio_dev, chan, 1336 IIO_SHARED_BY_ALL, 1337 &chan->info_mask_shared_by_all); 1338 if (ret < 0) 1339 return ret; 1340 attrcount += ret; 1341 1342 ret = iio_device_add_info_mask_type_avail(indio_dev, chan, 1343 IIO_SHARED_BY_ALL, 1344 &chan->info_mask_shared_by_all_available); 1345 if (ret < 0) 1346 return ret; 1347 attrcount += ret; 1348 1349 ret = iio_device_add_channel_label(indio_dev, chan); 1350 if (ret < 0) 1351 return ret; 1352 attrcount += ret; 1353 1354 if (chan->ext_info) { 1355 unsigned int i = 0; 1356 1357 for (ext_info = chan->ext_info; ext_info->name; ext_info++) { 1358 ret = __iio_add_chan_devattr(ext_info->name, 1359 chan, 1360 ext_info->read ? 1361 &iio_read_channel_ext_info : NULL, 1362 ext_info->write ? 1363 &iio_write_channel_ext_info : NULL, 1364 i, 1365 ext_info->shared, 1366 &indio_dev->dev, 1367 NULL, 1368 &iio_dev_opaque->channel_attr_list); 1369 i++; 1370 if (ret == -EBUSY && ext_info->shared) 1371 continue; 1372 1373 if (ret) 1374 return ret; 1375 1376 attrcount++; 1377 } 1378 } 1379 1380 return attrcount; 1381 } 1382 1383 /** 1384 * iio_free_chan_devattr_list() - Free a list of IIO device attributes 1385 * @attr_list: List of IIO device attributes 1386 * 1387 * This function frees the memory allocated for each of the IIO device 1388 * attributes in the list. 1389 */ 1390 void iio_free_chan_devattr_list(struct list_head *attr_list) 1391 { 1392 struct iio_dev_attr *p, *n; 1393 1394 list_for_each_entry_safe(p, n, attr_list, l) { 1395 kfree_const(p->dev_attr.attr.name); 1396 list_del(&p->l); 1397 kfree(p); 1398 } 1399 } 1400 1401 static ssize_t name_show(struct device *dev, struct device_attribute *attr, 1402 char *buf) 1403 { 1404 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1405 1406 return sysfs_emit(buf, "%s\n", indio_dev->name); 1407 } 1408 1409 static DEVICE_ATTR_RO(name); 1410 1411 static ssize_t label_show(struct device *dev, struct device_attribute *attr, 1412 char *buf) 1413 { 1414 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1415 1416 return sysfs_emit(buf, "%s\n", indio_dev->label); 1417 } 1418 1419 static DEVICE_ATTR_RO(label); 1420 1421 static const char * const clock_names[] = { 1422 [CLOCK_REALTIME] = "realtime", 1423 [CLOCK_MONOTONIC] = "monotonic", 1424 [CLOCK_PROCESS_CPUTIME_ID] = "process_cputime_id", 1425 [CLOCK_THREAD_CPUTIME_ID] = "thread_cputime_id", 1426 [CLOCK_MONOTONIC_RAW] = "monotonic_raw", 1427 [CLOCK_REALTIME_COARSE] = "realtime_coarse", 1428 [CLOCK_MONOTONIC_COARSE] = "monotonic_coarse", 1429 [CLOCK_BOOTTIME] = "boottime", 1430 [CLOCK_REALTIME_ALARM] = "realtime_alarm", 1431 [CLOCK_BOOTTIME_ALARM] = "boottime_alarm", 1432 [CLOCK_SGI_CYCLE] = "sgi_cycle", 1433 [CLOCK_TAI] = "tai", 1434 }; 1435 1436 static ssize_t current_timestamp_clock_show(struct device *dev, 1437 struct device_attribute *attr, 1438 char *buf) 1439 { 1440 const struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1441 const clockid_t clk = iio_device_get_clock(indio_dev); 1442 1443 switch (clk) { 1444 case CLOCK_REALTIME: 1445 case CLOCK_MONOTONIC: 1446 case CLOCK_MONOTONIC_RAW: 1447 case CLOCK_REALTIME_COARSE: 1448 case CLOCK_MONOTONIC_COARSE: 1449 case CLOCK_BOOTTIME: 1450 case CLOCK_TAI: 1451 break; 1452 default: 1453 BUG(); 1454 } 1455 1456 return sysfs_emit(buf, "%s\n", clock_names[clk]); 1457 } 1458 1459 static ssize_t current_timestamp_clock_store(struct device *dev, 1460 struct device_attribute *attr, 1461 const char *buf, size_t len) 1462 { 1463 clockid_t clk; 1464 int ret; 1465 1466 ret = sysfs_match_string(clock_names, buf); 1467 if (ret < 0) 1468 return ret; 1469 clk = ret; 1470 1471 switch (clk) { 1472 case CLOCK_REALTIME: 1473 case CLOCK_MONOTONIC: 1474 case CLOCK_MONOTONIC_RAW: 1475 case CLOCK_REALTIME_COARSE: 1476 case CLOCK_MONOTONIC_COARSE: 1477 case CLOCK_BOOTTIME: 1478 case CLOCK_TAI: 1479 break; 1480 default: 1481 return -EINVAL; 1482 } 1483 1484 ret = iio_device_set_clock(dev_to_iio_dev(dev), clk); 1485 if (ret) 1486 return ret; 1487 1488 return len; 1489 } 1490 1491 int iio_device_register_sysfs_group(struct iio_dev *indio_dev, 1492 const struct attribute_group *group) 1493 { 1494 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1495 const struct attribute_group **new, **old = iio_dev_opaque->groups; 1496 unsigned int cnt = iio_dev_opaque->groupcounter; 1497 1498 new = krealloc_array(old, cnt + 2, sizeof(*new), GFP_KERNEL); 1499 if (!new) 1500 return -ENOMEM; 1501 1502 new[iio_dev_opaque->groupcounter++] = group; 1503 new[iio_dev_opaque->groupcounter] = NULL; 1504 1505 iio_dev_opaque->groups = new; 1506 1507 return 0; 1508 } 1509 1510 static DEVICE_ATTR_RW(current_timestamp_clock); 1511 1512 static int iio_device_register_sysfs(struct iio_dev *indio_dev) 1513 { 1514 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1515 int i, ret = 0, attrcount, attrn, attrcount_orig = 0; 1516 struct iio_dev_attr *p; 1517 struct attribute **attr, *clk = NULL; 1518 1519 /* First count elements in any existing group */ 1520 if (indio_dev->info->attrs) { 1521 attr = indio_dev->info->attrs->attrs; 1522 while (*attr++ != NULL) 1523 attrcount_orig++; 1524 } 1525 attrcount = attrcount_orig; 1526 /* 1527 * New channel registration method - relies on the fact a group does 1528 * not need to be initialized if its name is NULL. 1529 */ 1530 if (indio_dev->channels) 1531 for (i = 0; i < indio_dev->num_channels; i++) { 1532 const struct iio_chan_spec *chan = 1533 &indio_dev->channels[i]; 1534 1535 if (chan->type == IIO_TIMESTAMP) 1536 clk = &dev_attr_current_timestamp_clock.attr; 1537 1538 ret = iio_device_add_channel_sysfs(indio_dev, chan); 1539 if (ret < 0) 1540 goto error_clear_attrs; 1541 attrcount += ret; 1542 } 1543 1544 if (iio_dev_opaque->event_interface) 1545 clk = &dev_attr_current_timestamp_clock.attr; 1546 1547 if (indio_dev->name) 1548 attrcount++; 1549 if (indio_dev->label) 1550 attrcount++; 1551 if (clk) 1552 attrcount++; 1553 1554 iio_dev_opaque->chan_attr_group.attrs = 1555 kcalloc(attrcount + 1, 1556 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]), 1557 GFP_KERNEL); 1558 if (iio_dev_opaque->chan_attr_group.attrs == NULL) { 1559 ret = -ENOMEM; 1560 goto error_clear_attrs; 1561 } 1562 /* Copy across original attributes, and point to original binary attributes */ 1563 if (indio_dev->info->attrs) { 1564 memcpy(iio_dev_opaque->chan_attr_group.attrs, 1565 indio_dev->info->attrs->attrs, 1566 sizeof(iio_dev_opaque->chan_attr_group.attrs[0]) 1567 *attrcount_orig); 1568 iio_dev_opaque->chan_attr_group.is_visible = 1569 indio_dev->info->attrs->is_visible; 1570 iio_dev_opaque->chan_attr_group.bin_attrs = 1571 indio_dev->info->attrs->bin_attrs; 1572 } 1573 attrn = attrcount_orig; 1574 /* Add all elements from the list. */ 1575 list_for_each_entry(p, &iio_dev_opaque->channel_attr_list, l) 1576 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &p->dev_attr.attr; 1577 if (indio_dev->name) 1578 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_name.attr; 1579 if (indio_dev->label) 1580 iio_dev_opaque->chan_attr_group.attrs[attrn++] = &dev_attr_label.attr; 1581 if (clk) 1582 iio_dev_opaque->chan_attr_group.attrs[attrn++] = clk; 1583 1584 ret = iio_device_register_sysfs_group(indio_dev, 1585 &iio_dev_opaque->chan_attr_group); 1586 if (ret) 1587 goto error_clear_attrs; 1588 1589 return 0; 1590 1591 error_clear_attrs: 1592 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1593 1594 return ret; 1595 } 1596 1597 static void iio_device_unregister_sysfs(struct iio_dev *indio_dev) 1598 { 1599 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1600 1601 iio_free_chan_devattr_list(&iio_dev_opaque->channel_attr_list); 1602 kfree(iio_dev_opaque->chan_attr_group.attrs); 1603 iio_dev_opaque->chan_attr_group.attrs = NULL; 1604 kfree(iio_dev_opaque->groups); 1605 iio_dev_opaque->groups = NULL; 1606 } 1607 1608 static void iio_dev_release(struct device *device) 1609 { 1610 struct iio_dev *indio_dev = dev_to_iio_dev(device); 1611 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1612 1613 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 1614 iio_device_unregister_trigger_consumer(indio_dev); 1615 iio_device_unregister_eventset(indio_dev); 1616 iio_device_unregister_sysfs(indio_dev); 1617 1618 iio_device_detach_buffers(indio_dev); 1619 1620 lockdep_unregister_key(&iio_dev_opaque->mlock_key); 1621 1622 ida_free(&iio_ida, iio_dev_opaque->id); 1623 kfree(iio_dev_opaque); 1624 } 1625 1626 const struct device_type iio_device_type = { 1627 .name = "iio_device", 1628 .release = iio_dev_release, 1629 }; 1630 1631 /** 1632 * iio_device_alloc() - allocate an iio_dev from a driver 1633 * @parent: Parent device. 1634 * @sizeof_priv: Space to allocate for private structure. 1635 * 1636 * Returns: 1637 * Pointer to allocated iio_dev on success, NULL on failure. 1638 */ 1639 struct iio_dev *iio_device_alloc(struct device *parent, int sizeof_priv) 1640 { 1641 struct iio_dev_opaque *iio_dev_opaque; 1642 struct iio_dev *indio_dev; 1643 size_t alloc_size; 1644 1645 alloc_size = sizeof(struct iio_dev_opaque); 1646 if (sizeof_priv) { 1647 alloc_size = ALIGN(alloc_size, IIO_DMA_MINALIGN); 1648 alloc_size += sizeof_priv; 1649 } 1650 1651 iio_dev_opaque = kzalloc(alloc_size, GFP_KERNEL); 1652 if (!iio_dev_opaque) 1653 return NULL; 1654 1655 indio_dev = &iio_dev_opaque->indio_dev; 1656 indio_dev->priv = (char *)iio_dev_opaque + 1657 ALIGN(sizeof(struct iio_dev_opaque), IIO_DMA_MINALIGN); 1658 1659 indio_dev->dev.parent = parent; 1660 indio_dev->dev.type = &iio_device_type; 1661 indio_dev->dev.bus = &iio_bus_type; 1662 device_initialize(&indio_dev->dev); 1663 mutex_init(&iio_dev_opaque->mlock); 1664 mutex_init(&iio_dev_opaque->info_exist_lock); 1665 INIT_LIST_HEAD(&iio_dev_opaque->channel_attr_list); 1666 1667 iio_dev_opaque->id = ida_alloc(&iio_ida, GFP_KERNEL); 1668 if (iio_dev_opaque->id < 0) { 1669 /* cannot use a dev_err as the name isn't available */ 1670 pr_err("failed to get device id\n"); 1671 kfree(iio_dev_opaque); 1672 return NULL; 1673 } 1674 1675 if (dev_set_name(&indio_dev->dev, "iio:device%d", iio_dev_opaque->id)) { 1676 ida_free(&iio_ida, iio_dev_opaque->id); 1677 kfree(iio_dev_opaque); 1678 return NULL; 1679 } 1680 1681 INIT_LIST_HEAD(&iio_dev_opaque->buffer_list); 1682 INIT_LIST_HEAD(&iio_dev_opaque->ioctl_handlers); 1683 1684 lockdep_register_key(&iio_dev_opaque->mlock_key); 1685 lockdep_set_class(&iio_dev_opaque->mlock, &iio_dev_opaque->mlock_key); 1686 1687 return indio_dev; 1688 } 1689 EXPORT_SYMBOL(iio_device_alloc); 1690 1691 /** 1692 * iio_device_free() - free an iio_dev from a driver 1693 * @dev: the iio_dev associated with the device 1694 */ 1695 void iio_device_free(struct iio_dev *dev) 1696 { 1697 if (dev) 1698 put_device(&dev->dev); 1699 } 1700 EXPORT_SYMBOL(iio_device_free); 1701 1702 static void devm_iio_device_release(void *iio_dev) 1703 { 1704 iio_device_free(iio_dev); 1705 } 1706 1707 /** 1708 * devm_iio_device_alloc - Resource-managed iio_device_alloc() 1709 * @parent: Device to allocate iio_dev for, and parent for this IIO device 1710 * @sizeof_priv: Space to allocate for private structure. 1711 * 1712 * Managed iio_device_alloc. iio_dev allocated with this function is 1713 * automatically freed on driver detach. 1714 * 1715 * Returns: 1716 * Pointer to allocated iio_dev on success, NULL on failure. 1717 */ 1718 struct iio_dev *devm_iio_device_alloc(struct device *parent, int sizeof_priv) 1719 { 1720 struct iio_dev *iio_dev; 1721 int ret; 1722 1723 iio_dev = iio_device_alloc(parent, sizeof_priv); 1724 if (!iio_dev) 1725 return NULL; 1726 1727 ret = devm_add_action_or_reset(parent, devm_iio_device_release, 1728 iio_dev); 1729 if (ret) 1730 return NULL; 1731 1732 return iio_dev; 1733 } 1734 EXPORT_SYMBOL_GPL(devm_iio_device_alloc); 1735 1736 /** 1737 * iio_chrdev_open() - chrdev file open for buffer access and ioctls 1738 * @inode: Inode structure for identifying the device in the file system 1739 * @filp: File structure for iio device used to keep and later access 1740 * private data 1741 * 1742 * Returns: 0 on success or -EBUSY if the device is already opened 1743 */ 1744 static int iio_chrdev_open(struct inode *inode, struct file *filp) 1745 { 1746 struct iio_dev_opaque *iio_dev_opaque = 1747 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1748 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1749 struct iio_dev_buffer_pair *ib; 1750 1751 if (test_and_set_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags)) 1752 return -EBUSY; 1753 1754 iio_device_get(indio_dev); 1755 1756 ib = kmalloc(sizeof(*ib), GFP_KERNEL); 1757 if (!ib) { 1758 iio_device_put(indio_dev); 1759 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1760 return -ENOMEM; 1761 } 1762 1763 ib->indio_dev = indio_dev; 1764 ib->buffer = indio_dev->buffer; 1765 1766 filp->private_data = ib; 1767 1768 return 0; 1769 } 1770 1771 /** 1772 * iio_chrdev_release() - chrdev file close buffer access and ioctls 1773 * @inode: Inode structure pointer for the char device 1774 * @filp: File structure pointer for the char device 1775 * 1776 * Returns: 0 for successful release. 1777 */ 1778 static int iio_chrdev_release(struct inode *inode, struct file *filp) 1779 { 1780 struct iio_dev_buffer_pair *ib = filp->private_data; 1781 struct iio_dev_opaque *iio_dev_opaque = 1782 container_of(inode->i_cdev, struct iio_dev_opaque, chrdev); 1783 struct iio_dev *indio_dev = &iio_dev_opaque->indio_dev; 1784 1785 kfree(ib); 1786 clear_bit(IIO_BUSY_BIT_POS, &iio_dev_opaque->flags); 1787 iio_device_put(indio_dev); 1788 1789 return 0; 1790 } 1791 1792 void iio_device_ioctl_handler_register(struct iio_dev *indio_dev, 1793 struct iio_ioctl_handler *h) 1794 { 1795 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1796 1797 list_add_tail(&h->entry, &iio_dev_opaque->ioctl_handlers); 1798 } 1799 1800 void iio_device_ioctl_handler_unregister(struct iio_ioctl_handler *h) 1801 { 1802 list_del(&h->entry); 1803 } 1804 1805 static long iio_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) 1806 { 1807 struct iio_dev_buffer_pair *ib = filp->private_data; 1808 struct iio_dev *indio_dev = ib->indio_dev; 1809 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1810 struct iio_ioctl_handler *h; 1811 int ret = -ENODEV; 1812 1813 mutex_lock(&iio_dev_opaque->info_exist_lock); 1814 1815 /* 1816 * The NULL check here is required to prevent crashing when a device 1817 * is being removed while userspace would still have open file handles 1818 * to try to access this device. 1819 */ 1820 if (!indio_dev->info) 1821 goto out_unlock; 1822 1823 list_for_each_entry(h, &iio_dev_opaque->ioctl_handlers, entry) { 1824 ret = h->ioctl(indio_dev, filp, cmd, arg); 1825 if (ret != IIO_IOCTL_UNHANDLED) 1826 break; 1827 } 1828 1829 if (ret == IIO_IOCTL_UNHANDLED) 1830 ret = -ENODEV; 1831 1832 out_unlock: 1833 mutex_unlock(&iio_dev_opaque->info_exist_lock); 1834 1835 return ret; 1836 } 1837 1838 static const struct file_operations iio_buffer_fileops = { 1839 .owner = THIS_MODULE, 1840 .llseek = noop_llseek, 1841 .read = iio_buffer_read_outer_addr, 1842 .write = iio_buffer_write_outer_addr, 1843 .poll = iio_buffer_poll_addr, 1844 .unlocked_ioctl = iio_ioctl, 1845 .compat_ioctl = compat_ptr_ioctl, 1846 .open = iio_chrdev_open, 1847 .release = iio_chrdev_release, 1848 }; 1849 1850 static const struct file_operations iio_event_fileops = { 1851 .owner = THIS_MODULE, 1852 .llseek = noop_llseek, 1853 .unlocked_ioctl = iio_ioctl, 1854 .compat_ioctl = compat_ptr_ioctl, 1855 .open = iio_chrdev_open, 1856 .release = iio_chrdev_release, 1857 }; 1858 1859 static int iio_check_unique_scan_index(struct iio_dev *indio_dev) 1860 { 1861 int i, j; 1862 const struct iio_chan_spec *channels = indio_dev->channels; 1863 1864 if (!(indio_dev->modes & INDIO_ALL_BUFFER_MODES)) 1865 return 0; 1866 1867 for (i = 0; i < indio_dev->num_channels - 1; i++) { 1868 if (channels[i].scan_index < 0) 1869 continue; 1870 for (j = i + 1; j < indio_dev->num_channels; j++) 1871 if (channels[i].scan_index == channels[j].scan_index) { 1872 dev_err(&indio_dev->dev, 1873 "Duplicate scan index %d\n", 1874 channels[i].scan_index); 1875 return -EINVAL; 1876 } 1877 } 1878 1879 return 0; 1880 } 1881 1882 static int iio_check_extended_name(const struct iio_dev *indio_dev) 1883 { 1884 unsigned int i; 1885 1886 if (!indio_dev->info->read_label) 1887 return 0; 1888 1889 for (i = 0; i < indio_dev->num_channels; i++) { 1890 if (indio_dev->channels[i].extend_name) { 1891 dev_err(&indio_dev->dev, 1892 "Cannot use labels and extend_name at the same time\n"); 1893 return -EINVAL; 1894 } 1895 } 1896 1897 return 0; 1898 } 1899 1900 static const struct iio_buffer_setup_ops noop_ring_setup_ops; 1901 1902 static void iio_sanity_check_avail_scan_masks(struct iio_dev *indio_dev) 1903 { 1904 unsigned int num_masks, masklength, longs_per_mask; 1905 const unsigned long *av_masks; 1906 int i; 1907 1908 av_masks = indio_dev->available_scan_masks; 1909 masklength = indio_dev->masklength; 1910 longs_per_mask = BITS_TO_LONGS(masklength); 1911 1912 /* 1913 * The code determining how many available_scan_masks is in the array 1914 * will be assuming the end of masks when first long with all bits 1915 * zeroed is encountered. This is incorrect for masks where mask 1916 * consists of more than one long, and where some of the available masks 1917 * has long worth of bits zeroed (but has subsequent bit(s) set). This 1918 * is a safety measure against bug where array of masks is terminated by 1919 * a single zero while mask width is greater than width of a long. 1920 */ 1921 if (longs_per_mask > 1) 1922 dev_warn(indio_dev->dev.parent, 1923 "multi long available scan masks not fully supported\n"); 1924 1925 if (bitmap_empty(av_masks, masklength)) 1926 dev_warn(indio_dev->dev.parent, "empty scan mask\n"); 1927 1928 for (num_masks = 0; *av_masks; num_masks++) 1929 av_masks += longs_per_mask; 1930 1931 if (num_masks < 2) 1932 return; 1933 1934 av_masks = indio_dev->available_scan_masks; 1935 1936 /* 1937 * Go through all the masks from first to one before the last, and see 1938 * that no mask found later from the available_scan_masks array is a 1939 * subset of mask found earlier. If this happens, then the mask found 1940 * later will never get used because scanning the array is stopped when 1941 * the first suitable mask is found. Drivers should order the array of 1942 * available masks in the order of preference (presumably the least 1943 * costy to access masks first). 1944 */ 1945 for (i = 0; i < num_masks - 1; i++) { 1946 const unsigned long *mask1; 1947 int j; 1948 1949 mask1 = av_masks + i * longs_per_mask; 1950 for (j = i + 1; j < num_masks; j++) { 1951 const unsigned long *mask2; 1952 1953 mask2 = av_masks + j * longs_per_mask; 1954 if (bitmap_subset(mask2, mask1, masklength)) 1955 dev_warn(indio_dev->dev.parent, 1956 "available_scan_mask %d subset of %d. Never used\n", 1957 j, i); 1958 } 1959 } 1960 } 1961 1962 int __iio_device_register(struct iio_dev *indio_dev, struct module *this_mod) 1963 { 1964 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1965 struct fwnode_handle *fwnode = NULL; 1966 int ret; 1967 1968 if (!indio_dev->info) 1969 return -EINVAL; 1970 1971 iio_dev_opaque->driver_module = this_mod; 1972 1973 /* If the calling driver did not initialize firmware node, do it here */ 1974 if (dev_fwnode(&indio_dev->dev)) 1975 fwnode = dev_fwnode(&indio_dev->dev); 1976 /* The default dummy IIO device has no parent */ 1977 else if (indio_dev->dev.parent) 1978 fwnode = dev_fwnode(indio_dev->dev.parent); 1979 device_set_node(&indio_dev->dev, fwnode); 1980 1981 fwnode_property_read_string(fwnode, "label", &indio_dev->label); 1982 1983 ret = iio_check_unique_scan_index(indio_dev); 1984 if (ret < 0) 1985 return ret; 1986 1987 ret = iio_check_extended_name(indio_dev); 1988 if (ret < 0) 1989 return ret; 1990 1991 iio_device_register_debugfs(indio_dev); 1992 1993 ret = iio_buffers_alloc_sysfs_and_mask(indio_dev); 1994 if (ret) { 1995 dev_err(indio_dev->dev.parent, 1996 "Failed to create buffer sysfs interfaces\n"); 1997 goto error_unreg_debugfs; 1998 } 1999 2000 if (indio_dev->available_scan_masks) 2001 iio_sanity_check_avail_scan_masks(indio_dev); 2002 2003 ret = iio_device_register_sysfs(indio_dev); 2004 if (ret) { 2005 dev_err(indio_dev->dev.parent, 2006 "Failed to register sysfs interfaces\n"); 2007 goto error_buffer_free_sysfs; 2008 } 2009 ret = iio_device_register_eventset(indio_dev); 2010 if (ret) { 2011 dev_err(indio_dev->dev.parent, 2012 "Failed to register event set\n"); 2013 goto error_free_sysfs; 2014 } 2015 if (indio_dev->modes & INDIO_ALL_TRIGGERED_MODES) 2016 iio_device_register_trigger_consumer(indio_dev); 2017 2018 if ((indio_dev->modes & INDIO_ALL_BUFFER_MODES) && 2019 indio_dev->setup_ops == NULL) 2020 indio_dev->setup_ops = &noop_ring_setup_ops; 2021 2022 if (iio_dev_opaque->attached_buffers_cnt) 2023 cdev_init(&iio_dev_opaque->chrdev, &iio_buffer_fileops); 2024 else if (iio_dev_opaque->event_interface) 2025 cdev_init(&iio_dev_opaque->chrdev, &iio_event_fileops); 2026 2027 if (iio_dev_opaque->attached_buffers_cnt || iio_dev_opaque->event_interface) { 2028 indio_dev->dev.devt = MKDEV(MAJOR(iio_devt), iio_dev_opaque->id); 2029 iio_dev_opaque->chrdev.owner = this_mod; 2030 } 2031 2032 /* assign device groups now; they should be all registered now */ 2033 indio_dev->dev.groups = iio_dev_opaque->groups; 2034 2035 ret = cdev_device_add(&iio_dev_opaque->chrdev, &indio_dev->dev); 2036 if (ret < 0) 2037 goto error_unreg_eventset; 2038 2039 return 0; 2040 2041 error_unreg_eventset: 2042 iio_device_unregister_eventset(indio_dev); 2043 error_free_sysfs: 2044 iio_device_unregister_sysfs(indio_dev); 2045 error_buffer_free_sysfs: 2046 iio_buffers_free_sysfs_and_mask(indio_dev); 2047 error_unreg_debugfs: 2048 iio_device_unregister_debugfs(indio_dev); 2049 return ret; 2050 } 2051 EXPORT_SYMBOL(__iio_device_register); 2052 2053 /** 2054 * iio_device_unregister() - unregister a device from the IIO subsystem 2055 * @indio_dev: Device structure representing the device. 2056 */ 2057 void iio_device_unregister(struct iio_dev *indio_dev) 2058 { 2059 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2060 2061 cdev_device_del(&iio_dev_opaque->chrdev, &indio_dev->dev); 2062 2063 mutex_lock(&iio_dev_opaque->info_exist_lock); 2064 2065 iio_device_unregister_debugfs(indio_dev); 2066 2067 iio_disable_all_buffers(indio_dev); 2068 2069 indio_dev->info = NULL; 2070 2071 iio_device_wakeup_eventset(indio_dev); 2072 iio_buffer_wakeup_poll(indio_dev); 2073 2074 mutex_unlock(&iio_dev_opaque->info_exist_lock); 2075 2076 iio_buffers_free_sysfs_and_mask(indio_dev); 2077 } 2078 EXPORT_SYMBOL(iio_device_unregister); 2079 2080 static void devm_iio_device_unreg(void *indio_dev) 2081 { 2082 iio_device_unregister(indio_dev); 2083 } 2084 2085 int __devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev, 2086 struct module *this_mod) 2087 { 2088 int ret; 2089 2090 ret = __iio_device_register(indio_dev, this_mod); 2091 if (ret) 2092 return ret; 2093 2094 return devm_add_action_or_reset(dev, devm_iio_device_unreg, indio_dev); 2095 } 2096 EXPORT_SYMBOL_GPL(__devm_iio_device_register); 2097 2098 /** 2099 * iio_device_claim_direct_mode - Keep device in direct mode 2100 * @indio_dev: the iio_dev associated with the device 2101 * 2102 * If the device is in direct mode it is guaranteed to stay 2103 * that way until iio_device_release_direct_mode() is called. 2104 * 2105 * Use with iio_device_release_direct_mode() 2106 * 2107 * Returns: 0 on success, -EBUSY on failure. 2108 */ 2109 int iio_device_claim_direct_mode(struct iio_dev *indio_dev) 2110 { 2111 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2112 2113 mutex_lock(&iio_dev_opaque->mlock); 2114 2115 if (iio_buffer_enabled(indio_dev)) { 2116 mutex_unlock(&iio_dev_opaque->mlock); 2117 return -EBUSY; 2118 } 2119 return 0; 2120 } 2121 EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode); 2122 2123 /** 2124 * iio_device_release_direct_mode - releases claim on direct mode 2125 * @indio_dev: the iio_dev associated with the device 2126 * 2127 * Release the claim. Device is no longer guaranteed to stay 2128 * in direct mode. 2129 * 2130 * Use with iio_device_claim_direct_mode() 2131 */ 2132 void iio_device_release_direct_mode(struct iio_dev *indio_dev) 2133 { 2134 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2135 } 2136 EXPORT_SYMBOL_GPL(iio_device_release_direct_mode); 2137 2138 /** 2139 * iio_device_claim_buffer_mode - Keep device in buffer mode 2140 * @indio_dev: the iio_dev associated with the device 2141 * 2142 * If the device is in buffer mode it is guaranteed to stay 2143 * that way until iio_device_release_buffer_mode() is called. 2144 * 2145 * Use with iio_device_release_buffer_mode(). 2146 * 2147 * Returns: 0 on success, -EBUSY on failure. 2148 */ 2149 int iio_device_claim_buffer_mode(struct iio_dev *indio_dev) 2150 { 2151 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2152 2153 mutex_lock(&iio_dev_opaque->mlock); 2154 2155 if (iio_buffer_enabled(indio_dev)) 2156 return 0; 2157 2158 mutex_unlock(&iio_dev_opaque->mlock); 2159 return -EBUSY; 2160 } 2161 EXPORT_SYMBOL_GPL(iio_device_claim_buffer_mode); 2162 2163 /** 2164 * iio_device_release_buffer_mode - releases claim on buffer mode 2165 * @indio_dev: the iio_dev associated with the device 2166 * 2167 * Release the claim. Device is no longer guaranteed to stay 2168 * in buffer mode. 2169 * 2170 * Use with iio_device_claim_buffer_mode(). 2171 */ 2172 void iio_device_release_buffer_mode(struct iio_dev *indio_dev) 2173 { 2174 mutex_unlock(&to_iio_dev_opaque(indio_dev)->mlock); 2175 } 2176 EXPORT_SYMBOL_GPL(iio_device_release_buffer_mode); 2177 2178 /** 2179 * iio_device_get_current_mode() - helper function providing read-only access to 2180 * the opaque @currentmode variable 2181 * @indio_dev: IIO device structure for device 2182 */ 2183 int iio_device_get_current_mode(struct iio_dev *indio_dev) 2184 { 2185 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2186 2187 return iio_dev_opaque->currentmode; 2188 } 2189 EXPORT_SYMBOL_GPL(iio_device_get_current_mode); 2190 2191 subsys_initcall(iio_init); 2192 module_exit(iio_exit); 2193 2194 MODULE_AUTHOR("Jonathan Cameron <jic23@kernel.org>"); 2195 MODULE_DESCRIPTION("Industrial I/O core"); 2196 MODULE_LICENSE("GPL"); 2197