1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Handling of buffer allocation / resizing. 7 * 8 * Things to look at here. 9 * - Better memory allocation techniques? 10 * - Alternative access techniques? 11 */ 12 #include <linux/atomic.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/cleanup.h> 15 #include <linux/kernel.h> 16 #include <linux/export.h> 17 #include <linux/device.h> 18 #include <linux/dma-buf.h> 19 #include <linux/dma-fence.h> 20 #include <linux/dma-resv.h> 21 #include <linux/file.h> 22 #include <linux/fs.h> 23 #include <linux/cdev.h> 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/poll.h> 27 #include <linux/sched/signal.h> 28 29 #include <linux/iio/iio.h> 30 #include <linux/iio/iio-opaque.h> 31 #include "iio_core.h" 32 #include "iio_core_trigger.h" 33 #include <linux/iio/sysfs.h> 34 #include <linux/iio/buffer.h> 35 #include <linux/iio/buffer_impl.h> 36 37 #define DMABUF_ENQUEUE_TIMEOUT_MS 5000 38 39 MODULE_IMPORT_NS(DMA_BUF); 40 41 struct iio_dmabuf_priv { 42 struct list_head entry; 43 struct kref ref; 44 45 struct iio_buffer *buffer; 46 struct iio_dma_buffer_block *block; 47 48 u64 context; 49 50 /* Spinlock used for locking the dma_fence */ 51 spinlock_t lock; 52 53 struct dma_buf_attachment *attach; 54 struct sg_table *sgt; 55 enum dma_data_direction dir; 56 atomic_t seqno; 57 }; 58 59 struct iio_dma_fence { 60 struct dma_fence base; 61 struct iio_dmabuf_priv *priv; 62 struct work_struct work; 63 }; 64 65 static const char * const iio_endian_prefix[] = { 66 [IIO_BE] = "be", 67 [IIO_LE] = "le", 68 }; 69 70 static bool iio_buffer_is_active(struct iio_buffer *buf) 71 { 72 return !list_empty(&buf->buffer_list); 73 } 74 75 static size_t iio_buffer_data_available(struct iio_buffer *buf) 76 { 77 return buf->access->data_available(buf); 78 } 79 80 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev, 81 struct iio_buffer *buf, size_t required) 82 { 83 if (!indio_dev->info->hwfifo_flush_to_buffer) 84 return -ENODEV; 85 86 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); 87 } 88 89 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, 90 size_t to_wait, int to_flush) 91 { 92 size_t avail; 93 int flushed = 0; 94 95 /* wakeup if the device was unregistered */ 96 if (!indio_dev->info) 97 return true; 98 99 /* drain the buffer if it was disabled */ 100 if (!iio_buffer_is_active(buf)) { 101 to_wait = min_t(size_t, to_wait, 1); 102 to_flush = 0; 103 } 104 105 avail = iio_buffer_data_available(buf); 106 107 if (avail >= to_wait) { 108 /* force a flush for non-blocking reads */ 109 if (!to_wait && avail < to_flush) 110 iio_buffer_flush_hwfifo(indio_dev, buf, 111 to_flush - avail); 112 return true; 113 } 114 115 if (to_flush) 116 flushed = iio_buffer_flush_hwfifo(indio_dev, buf, 117 to_wait - avail); 118 if (flushed <= 0) 119 return false; 120 121 if (avail + flushed >= to_wait) 122 return true; 123 124 return false; 125 } 126 127 /** 128 * iio_buffer_read() - chrdev read for buffer access 129 * @filp: File structure pointer for the char device 130 * @buf: Destination buffer for iio buffer read 131 * @n: First n bytes to read 132 * @f_ps: Long offset provided by the user as a seek position 133 * 134 * This function relies on all buffer implementations having an 135 * iio_buffer as their first element. 136 * 137 * Return: negative values corresponding to error codes or ret != 0 138 * for ending the reading activity 139 **/ 140 static ssize_t iio_buffer_read(struct file *filp, char __user *buf, 141 size_t n, loff_t *f_ps) 142 { 143 struct iio_dev_buffer_pair *ib = filp->private_data; 144 struct iio_buffer *rb = ib->buffer; 145 struct iio_dev *indio_dev = ib->indio_dev; 146 DEFINE_WAIT_FUNC(wait, woken_wake_function); 147 size_t datum_size; 148 size_t to_wait; 149 int ret = 0; 150 151 if (!indio_dev->info) 152 return -ENODEV; 153 154 if (!rb || !rb->access->read) 155 return -EINVAL; 156 157 if (rb->direction != IIO_BUFFER_DIRECTION_IN) 158 return -EPERM; 159 160 datum_size = rb->bytes_per_datum; 161 162 /* 163 * If datum_size is 0 there will never be anything to read from the 164 * buffer, so signal end of file now. 165 */ 166 if (!datum_size) 167 return 0; 168 169 if (filp->f_flags & O_NONBLOCK) 170 to_wait = 0; 171 else 172 to_wait = min_t(size_t, n / datum_size, rb->watermark); 173 174 add_wait_queue(&rb->pollq, &wait); 175 do { 176 if (!indio_dev->info) { 177 ret = -ENODEV; 178 break; 179 } 180 181 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { 182 if (signal_pending(current)) { 183 ret = -ERESTARTSYS; 184 break; 185 } 186 187 wait_woken(&wait, TASK_INTERRUPTIBLE, 188 MAX_SCHEDULE_TIMEOUT); 189 continue; 190 } 191 192 ret = rb->access->read(rb, n, buf); 193 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) 194 ret = -EAGAIN; 195 } while (ret == 0); 196 remove_wait_queue(&rb->pollq, &wait); 197 198 return ret; 199 } 200 201 static size_t iio_buffer_space_available(struct iio_buffer *buf) 202 { 203 if (buf->access->space_available) 204 return buf->access->space_available(buf); 205 206 return SIZE_MAX; 207 } 208 209 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf, 210 size_t n, loff_t *f_ps) 211 { 212 struct iio_dev_buffer_pair *ib = filp->private_data; 213 struct iio_buffer *rb = ib->buffer; 214 struct iio_dev *indio_dev = ib->indio_dev; 215 DEFINE_WAIT_FUNC(wait, woken_wake_function); 216 int ret = 0; 217 size_t written; 218 219 if (!indio_dev->info) 220 return -ENODEV; 221 222 if (!rb || !rb->access->write) 223 return -EINVAL; 224 225 if (rb->direction != IIO_BUFFER_DIRECTION_OUT) 226 return -EPERM; 227 228 written = 0; 229 add_wait_queue(&rb->pollq, &wait); 230 do { 231 if (!indio_dev->info) 232 return -ENODEV; 233 234 if (!iio_buffer_space_available(rb)) { 235 if (signal_pending(current)) { 236 ret = -ERESTARTSYS; 237 break; 238 } 239 240 if (filp->f_flags & O_NONBLOCK) { 241 if (!written) 242 ret = -EAGAIN; 243 break; 244 } 245 246 wait_woken(&wait, TASK_INTERRUPTIBLE, 247 MAX_SCHEDULE_TIMEOUT); 248 continue; 249 } 250 251 ret = rb->access->write(rb, n - written, buf + written); 252 if (ret < 0) 253 break; 254 255 written += ret; 256 257 } while (written != n); 258 remove_wait_queue(&rb->pollq, &wait); 259 260 return ret < 0 ? ret : written; 261 } 262 263 /** 264 * iio_buffer_poll() - poll the buffer to find out if it has data 265 * @filp: File structure pointer for device access 266 * @wait: Poll table structure pointer for which the driver adds 267 * a wait queue 268 * 269 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading 270 * or 0 for other cases 271 */ 272 static __poll_t iio_buffer_poll(struct file *filp, 273 struct poll_table_struct *wait) 274 { 275 struct iio_dev_buffer_pair *ib = filp->private_data; 276 struct iio_buffer *rb = ib->buffer; 277 struct iio_dev *indio_dev = ib->indio_dev; 278 279 if (!indio_dev->info || !rb) 280 return 0; 281 282 poll_wait(filp, &rb->pollq, wait); 283 284 switch (rb->direction) { 285 case IIO_BUFFER_DIRECTION_IN: 286 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) 287 return EPOLLIN | EPOLLRDNORM; 288 break; 289 case IIO_BUFFER_DIRECTION_OUT: 290 if (iio_buffer_space_available(rb)) 291 return EPOLLOUT | EPOLLWRNORM; 292 break; 293 } 294 295 return 0; 296 } 297 298 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf, 299 size_t n, loff_t *f_ps) 300 { 301 struct iio_dev_buffer_pair *ib = filp->private_data; 302 struct iio_buffer *rb = ib->buffer; 303 304 /* check if buffer was opened through new API */ 305 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) 306 return -EBUSY; 307 308 return iio_buffer_read(filp, buf, n, f_ps); 309 } 310 311 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf, 312 size_t n, loff_t *f_ps) 313 { 314 struct iio_dev_buffer_pair *ib = filp->private_data; 315 struct iio_buffer *rb = ib->buffer; 316 317 /* check if buffer was opened through new API */ 318 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) 319 return -EBUSY; 320 321 return iio_buffer_write(filp, buf, n, f_ps); 322 } 323 324 __poll_t iio_buffer_poll_wrapper(struct file *filp, 325 struct poll_table_struct *wait) 326 { 327 struct iio_dev_buffer_pair *ib = filp->private_data; 328 struct iio_buffer *rb = ib->buffer; 329 330 /* check if buffer was opened through new API */ 331 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) 332 return 0; 333 334 return iio_buffer_poll(filp, wait); 335 } 336 337 /** 338 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue 339 * @indio_dev: The IIO device 340 * 341 * Wakes up the event waitqueue used for poll(). Should usually 342 * be called when the device is unregistered. 343 */ 344 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) 345 { 346 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 347 struct iio_buffer *buffer; 348 unsigned int i; 349 350 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { 351 buffer = iio_dev_opaque->attached_buffers[i]; 352 wake_up(&buffer->pollq); 353 } 354 } 355 356 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data) 357 { 358 if (!buffer || !buffer->access || !buffer->access->remove_from) 359 return -EINVAL; 360 361 return buffer->access->remove_from(buffer, data); 362 } 363 EXPORT_SYMBOL_GPL(iio_pop_from_buffer); 364 365 void iio_buffer_init(struct iio_buffer *buffer) 366 { 367 INIT_LIST_HEAD(&buffer->demux_list); 368 INIT_LIST_HEAD(&buffer->buffer_list); 369 INIT_LIST_HEAD(&buffer->dmabufs); 370 mutex_init(&buffer->dmabufs_mutex); 371 init_waitqueue_head(&buffer->pollq); 372 kref_init(&buffer->ref); 373 if (!buffer->watermark) 374 buffer->watermark = 1; 375 } 376 EXPORT_SYMBOL(iio_buffer_init); 377 378 void iio_device_detach_buffers(struct iio_dev *indio_dev) 379 { 380 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 381 struct iio_buffer *buffer; 382 unsigned int i; 383 384 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { 385 buffer = iio_dev_opaque->attached_buffers[i]; 386 iio_buffer_put(buffer); 387 } 388 389 kfree(iio_dev_opaque->attached_buffers); 390 } 391 392 static ssize_t iio_show_scan_index(struct device *dev, 393 struct device_attribute *attr, 394 char *buf) 395 { 396 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 397 } 398 399 static ssize_t iio_show_fixed_type(struct device *dev, 400 struct device_attribute *attr, 401 char *buf) 402 { 403 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 404 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 405 const struct iio_scan_type *scan_type; 406 u8 type; 407 408 scan_type = iio_get_current_scan_type(indio_dev, this_attr->c); 409 if (IS_ERR(scan_type)) 410 return PTR_ERR(scan_type); 411 412 type = scan_type->endianness; 413 414 if (type == IIO_CPU) { 415 #ifdef __LITTLE_ENDIAN 416 type = IIO_LE; 417 #else 418 type = IIO_BE; 419 #endif 420 } 421 if (scan_type->repeat > 1) 422 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n", 423 iio_endian_prefix[type], 424 scan_type->sign, 425 scan_type->realbits, 426 scan_type->storagebits, 427 scan_type->repeat, 428 scan_type->shift); 429 else 430 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n", 431 iio_endian_prefix[type], 432 scan_type->sign, 433 scan_type->realbits, 434 scan_type->storagebits, 435 scan_type->shift); 436 } 437 438 static ssize_t iio_scan_el_show(struct device *dev, 439 struct device_attribute *attr, 440 char *buf) 441 { 442 int ret; 443 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 444 445 /* Ensure ret is 0 or 1. */ 446 ret = !!test_bit(to_iio_dev_attr(attr)->address, 447 buffer->scan_mask); 448 449 return sysfs_emit(buf, "%d\n", ret); 450 } 451 452 /* Note NULL used as error indicator as it doesn't make sense. */ 453 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 454 unsigned int masklength, 455 const unsigned long *mask, 456 bool strict) 457 { 458 if (bitmap_empty(mask, masklength)) 459 return NULL; 460 /* 461 * The condition here do not handle multi-long masks correctly. 462 * It only checks the first long to be zero, and will use such mask 463 * as a terminator even if there was bits set after the first long. 464 * 465 * Correct check would require using: 466 * while (!bitmap_empty(av_masks, masklength)) 467 * instead. This is potentially hazardous because the 468 * avaliable_scan_masks is a zero terminated array of longs - and 469 * using the proper bitmap_empty() check for multi-long wide masks 470 * would require the array to be terminated with multiple zero longs - 471 * which is not such an usual pattern. 472 * 473 * As writing of this no multi-long wide masks were found in-tree, so 474 * the simple while (*av_masks) check is working. 475 */ 476 while (*av_masks) { 477 if (strict) { 478 if (bitmap_equal(mask, av_masks, masklength)) 479 return av_masks; 480 } else { 481 if (bitmap_subset(mask, av_masks, masklength)) 482 return av_masks; 483 } 484 av_masks += BITS_TO_LONGS(masklength); 485 } 486 return NULL; 487 } 488 489 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 490 const unsigned long *mask) 491 { 492 if (!indio_dev->setup_ops->validate_scan_mask) 493 return true; 494 495 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 496 } 497 498 /** 499 * iio_scan_mask_set() - set particular bit in the scan mask 500 * @indio_dev: the iio device 501 * @buffer: the buffer whose scan mask we are interested in 502 * @bit: the bit to be set. 503 * 504 * Note that at this point we have no way of knowing what other 505 * buffers might request, hence this code only verifies that the 506 * individual buffers request is plausible. 507 */ 508 static int iio_scan_mask_set(struct iio_dev *indio_dev, 509 struct iio_buffer *buffer, int bit) 510 { 511 const unsigned long *mask; 512 unsigned long *trialmask; 513 514 if (!indio_dev->masklength) { 515 WARN(1, "Trying to set scanmask prior to registering buffer\n"); 516 return -EINVAL; 517 } 518 519 trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL); 520 if (!trialmask) 521 return -ENOMEM; 522 bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength); 523 set_bit(bit, trialmask); 524 525 if (!iio_validate_scan_mask(indio_dev, trialmask)) 526 goto err_invalid_mask; 527 528 if (indio_dev->available_scan_masks) { 529 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 530 indio_dev->masklength, 531 trialmask, false); 532 if (!mask) 533 goto err_invalid_mask; 534 } 535 bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength); 536 537 bitmap_free(trialmask); 538 539 return 0; 540 541 err_invalid_mask: 542 bitmap_free(trialmask); 543 return -EINVAL; 544 } 545 546 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 547 { 548 clear_bit(bit, buffer->scan_mask); 549 return 0; 550 } 551 552 static int iio_scan_mask_query(struct iio_dev *indio_dev, 553 struct iio_buffer *buffer, int bit) 554 { 555 if (bit > indio_dev->masklength) 556 return -EINVAL; 557 558 if (!buffer->scan_mask) 559 return 0; 560 561 /* Ensure return value is 0 or 1. */ 562 return !!test_bit(bit, buffer->scan_mask); 563 }; 564 565 static ssize_t iio_scan_el_store(struct device *dev, 566 struct device_attribute *attr, 567 const char *buf, 568 size_t len) 569 { 570 int ret; 571 bool state; 572 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 573 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 574 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 575 struct iio_buffer *buffer = this_attr->buffer; 576 577 ret = kstrtobool(buf, &state); 578 if (ret < 0) 579 return ret; 580 581 guard(mutex)(&iio_dev_opaque->mlock); 582 if (iio_buffer_is_active(buffer)) 583 return -EBUSY; 584 585 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 586 if (ret < 0) 587 return ret; 588 589 if (state && ret) 590 return len; 591 592 if (state) 593 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 594 else 595 ret = iio_scan_mask_clear(buffer, this_attr->address); 596 if (ret) 597 return ret; 598 599 return len; 600 } 601 602 static ssize_t iio_scan_el_ts_show(struct device *dev, 603 struct device_attribute *attr, 604 char *buf) 605 { 606 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 607 608 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp); 609 } 610 611 static ssize_t iio_scan_el_ts_store(struct device *dev, 612 struct device_attribute *attr, 613 const char *buf, 614 size_t len) 615 { 616 int ret; 617 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 618 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 619 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 620 bool state; 621 622 ret = kstrtobool(buf, &state); 623 if (ret < 0) 624 return ret; 625 626 guard(mutex)(&iio_dev_opaque->mlock); 627 if (iio_buffer_is_active(buffer)) 628 return -EBUSY; 629 630 buffer->scan_timestamp = state; 631 632 return len; 633 } 634 635 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 636 struct iio_buffer *buffer, 637 const struct iio_chan_spec *chan) 638 { 639 int ret, attrcount = 0; 640 641 ret = __iio_add_chan_devattr("index", 642 chan, 643 &iio_show_scan_index, 644 NULL, 645 0, 646 IIO_SEPARATE, 647 &indio_dev->dev, 648 buffer, 649 &buffer->buffer_attr_list); 650 if (ret) 651 return ret; 652 attrcount++; 653 ret = __iio_add_chan_devattr("type", 654 chan, 655 &iio_show_fixed_type, 656 NULL, 657 0, 658 IIO_SEPARATE, 659 &indio_dev->dev, 660 buffer, 661 &buffer->buffer_attr_list); 662 if (ret) 663 return ret; 664 attrcount++; 665 if (chan->type != IIO_TIMESTAMP) 666 ret = __iio_add_chan_devattr("en", 667 chan, 668 &iio_scan_el_show, 669 &iio_scan_el_store, 670 chan->scan_index, 671 IIO_SEPARATE, 672 &indio_dev->dev, 673 buffer, 674 &buffer->buffer_attr_list); 675 else 676 ret = __iio_add_chan_devattr("en", 677 chan, 678 &iio_scan_el_ts_show, 679 &iio_scan_el_ts_store, 680 chan->scan_index, 681 IIO_SEPARATE, 682 &indio_dev->dev, 683 buffer, 684 &buffer->buffer_attr_list); 685 if (ret) 686 return ret; 687 attrcount++; 688 ret = attrcount; 689 return ret; 690 } 691 692 static ssize_t length_show(struct device *dev, struct device_attribute *attr, 693 char *buf) 694 { 695 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 696 697 return sysfs_emit(buf, "%d\n", buffer->length); 698 } 699 700 static ssize_t length_store(struct device *dev, struct device_attribute *attr, 701 const char *buf, size_t len) 702 { 703 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 704 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 705 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 706 unsigned int val; 707 int ret; 708 709 ret = kstrtouint(buf, 10, &val); 710 if (ret) 711 return ret; 712 713 if (val == buffer->length) 714 return len; 715 716 guard(mutex)(&iio_dev_opaque->mlock); 717 if (iio_buffer_is_active(buffer)) 718 return -EBUSY; 719 720 buffer->access->set_length(buffer, val); 721 722 if (buffer->length && buffer->length < buffer->watermark) 723 buffer->watermark = buffer->length; 724 725 return len; 726 } 727 728 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 729 char *buf) 730 { 731 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 732 733 return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer)); 734 } 735 736 static int iio_storage_bytes_for_si(struct iio_dev *indio_dev, 737 unsigned int scan_index) 738 { 739 const struct iio_chan_spec *ch; 740 const struct iio_scan_type *scan_type; 741 unsigned int bytes; 742 743 ch = iio_find_channel_from_si(indio_dev, scan_index); 744 scan_type = iio_get_current_scan_type(indio_dev, ch); 745 if (IS_ERR(scan_type)) 746 return PTR_ERR(scan_type); 747 748 bytes = scan_type->storagebits / 8; 749 750 if (scan_type->repeat > 1) 751 bytes *= scan_type->repeat; 752 753 return bytes; 754 } 755 756 static int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev) 757 { 758 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 759 760 return iio_storage_bytes_for_si(indio_dev, 761 iio_dev_opaque->scan_index_timestamp); 762 } 763 764 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, 765 const unsigned long *mask, bool timestamp) 766 { 767 unsigned int bytes = 0; 768 int length, i, largest = 0; 769 770 /* How much space will the demuxed element take? */ 771 for_each_set_bit(i, mask, 772 indio_dev->masklength) { 773 length = iio_storage_bytes_for_si(indio_dev, i); 774 if (length < 0) 775 return length; 776 777 bytes = ALIGN(bytes, length); 778 bytes += length; 779 largest = max(largest, length); 780 } 781 782 if (timestamp) { 783 length = iio_storage_bytes_for_timestamp(indio_dev); 784 if (length < 0) 785 return length; 786 787 bytes = ALIGN(bytes, length); 788 bytes += length; 789 largest = max(largest, length); 790 } 791 792 bytes = ALIGN(bytes, largest); 793 return bytes; 794 } 795 796 static void iio_buffer_activate(struct iio_dev *indio_dev, 797 struct iio_buffer *buffer) 798 { 799 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 800 801 iio_buffer_get(buffer); 802 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); 803 } 804 805 static void iio_buffer_deactivate(struct iio_buffer *buffer) 806 { 807 list_del_init(&buffer->buffer_list); 808 wake_up_interruptible(&buffer->pollq); 809 iio_buffer_put(buffer); 810 } 811 812 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) 813 { 814 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 815 struct iio_buffer *buffer, *_buffer; 816 817 list_for_each_entry_safe(buffer, _buffer, 818 &iio_dev_opaque->buffer_list, buffer_list) 819 iio_buffer_deactivate(buffer); 820 } 821 822 static int iio_buffer_enable(struct iio_buffer *buffer, 823 struct iio_dev *indio_dev) 824 { 825 if (!buffer->access->enable) 826 return 0; 827 return buffer->access->enable(buffer, indio_dev); 828 } 829 830 static int iio_buffer_disable(struct iio_buffer *buffer, 831 struct iio_dev *indio_dev) 832 { 833 if (!buffer->access->disable) 834 return 0; 835 return buffer->access->disable(buffer, indio_dev); 836 } 837 838 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, 839 struct iio_buffer *buffer) 840 { 841 unsigned int bytes; 842 843 if (!buffer->access->set_bytes_per_datum) 844 return; 845 846 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 847 buffer->scan_timestamp); 848 849 buffer->access->set_bytes_per_datum(buffer, bytes); 850 } 851 852 static int iio_buffer_request_update(struct iio_dev *indio_dev, 853 struct iio_buffer *buffer) 854 { 855 int ret; 856 857 iio_buffer_update_bytes_per_datum(indio_dev, buffer); 858 if (buffer->access->request_update) { 859 ret = buffer->access->request_update(buffer); 860 if (ret) { 861 dev_dbg(&indio_dev->dev, 862 "Buffer not started: buffer parameter update failed (%d)\n", 863 ret); 864 return ret; 865 } 866 } 867 868 return 0; 869 } 870 871 static void iio_free_scan_mask(struct iio_dev *indio_dev, 872 const unsigned long *mask) 873 { 874 /* If the mask is dynamically allocated free it, otherwise do nothing */ 875 if (!indio_dev->available_scan_masks) 876 bitmap_free(mask); 877 } 878 879 struct iio_device_config { 880 unsigned int mode; 881 unsigned int watermark; 882 const unsigned long *scan_mask; 883 unsigned int scan_bytes; 884 bool scan_timestamp; 885 }; 886 887 static int iio_verify_update(struct iio_dev *indio_dev, 888 struct iio_buffer *insert_buffer, 889 struct iio_buffer *remove_buffer, 890 struct iio_device_config *config) 891 { 892 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 893 unsigned long *compound_mask; 894 const unsigned long *scan_mask; 895 bool strict_scanmask = false; 896 struct iio_buffer *buffer; 897 bool scan_timestamp; 898 unsigned int modes; 899 900 if (insert_buffer && 901 bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) { 902 dev_dbg(&indio_dev->dev, 903 "At least one scan element must be enabled first\n"); 904 return -EINVAL; 905 } 906 907 memset(config, 0, sizeof(*config)); 908 config->watermark = ~0; 909 910 /* 911 * If there is just one buffer and we are removing it there is nothing 912 * to verify. 913 */ 914 if (remove_buffer && !insert_buffer && 915 list_is_singular(&iio_dev_opaque->buffer_list)) 916 return 0; 917 918 modes = indio_dev->modes; 919 920 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 921 if (buffer == remove_buffer) 922 continue; 923 modes &= buffer->access->modes; 924 config->watermark = min(config->watermark, buffer->watermark); 925 } 926 927 if (insert_buffer) { 928 modes &= insert_buffer->access->modes; 929 config->watermark = min(config->watermark, 930 insert_buffer->watermark); 931 } 932 933 /* Definitely possible for devices to support both of these. */ 934 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { 935 config->mode = INDIO_BUFFER_TRIGGERED; 936 } else if (modes & INDIO_BUFFER_HARDWARE) { 937 /* 938 * Keep things simple for now and only allow a single buffer to 939 * be connected in hardware mode. 940 */ 941 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) 942 return -EINVAL; 943 config->mode = INDIO_BUFFER_HARDWARE; 944 strict_scanmask = true; 945 } else if (modes & INDIO_BUFFER_SOFTWARE) { 946 config->mode = INDIO_BUFFER_SOFTWARE; 947 } else { 948 /* Can only occur on first buffer */ 949 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 950 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); 951 return -EINVAL; 952 } 953 954 /* What scan mask do we actually have? */ 955 compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL); 956 if (!compound_mask) 957 return -ENOMEM; 958 959 scan_timestamp = false; 960 961 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 962 if (buffer == remove_buffer) 963 continue; 964 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 965 indio_dev->masklength); 966 scan_timestamp |= buffer->scan_timestamp; 967 } 968 969 if (insert_buffer) { 970 bitmap_or(compound_mask, compound_mask, 971 insert_buffer->scan_mask, indio_dev->masklength); 972 scan_timestamp |= insert_buffer->scan_timestamp; 973 } 974 975 if (indio_dev->available_scan_masks) { 976 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, 977 indio_dev->masklength, 978 compound_mask, 979 strict_scanmask); 980 bitmap_free(compound_mask); 981 if (!scan_mask) 982 return -EINVAL; 983 } else { 984 scan_mask = compound_mask; 985 } 986 987 config->scan_bytes = iio_compute_scan_bytes(indio_dev, 988 scan_mask, scan_timestamp); 989 config->scan_mask = scan_mask; 990 config->scan_timestamp = scan_timestamp; 991 992 return 0; 993 } 994 995 /** 996 * struct iio_demux_table - table describing demux memcpy ops 997 * @from: index to copy from 998 * @to: index to copy to 999 * @length: how many bytes to copy 1000 * @l: list head used for management 1001 */ 1002 struct iio_demux_table { 1003 unsigned int from; 1004 unsigned int to; 1005 unsigned int length; 1006 struct list_head l; 1007 }; 1008 1009 static void iio_buffer_demux_free(struct iio_buffer *buffer) 1010 { 1011 struct iio_demux_table *p, *q; 1012 1013 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 1014 list_del(&p->l); 1015 kfree(p); 1016 } 1017 } 1018 1019 static int iio_buffer_add_demux(struct iio_buffer *buffer, 1020 struct iio_demux_table **p, unsigned int in_loc, 1021 unsigned int out_loc, 1022 unsigned int length) 1023 { 1024 if (*p && (*p)->from + (*p)->length == in_loc && 1025 (*p)->to + (*p)->length == out_loc) { 1026 (*p)->length += length; 1027 } else { 1028 *p = kmalloc(sizeof(**p), GFP_KERNEL); 1029 if (!(*p)) 1030 return -ENOMEM; 1031 (*p)->from = in_loc; 1032 (*p)->to = out_loc; 1033 (*p)->length = length; 1034 list_add_tail(&(*p)->l, &buffer->demux_list); 1035 } 1036 1037 return 0; 1038 } 1039 1040 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 1041 struct iio_buffer *buffer) 1042 { 1043 int ret, in_ind = -1, out_ind, length; 1044 unsigned int in_loc = 0, out_loc = 0; 1045 struct iio_demux_table *p = NULL; 1046 1047 /* Clear out any old demux */ 1048 iio_buffer_demux_free(buffer); 1049 kfree(buffer->demux_bounce); 1050 buffer->demux_bounce = NULL; 1051 1052 /* First work out which scan mode we will actually have */ 1053 if (bitmap_equal(indio_dev->active_scan_mask, 1054 buffer->scan_mask, 1055 indio_dev->masklength)) 1056 return 0; 1057 1058 /* Now we have the two masks, work from least sig and build up sizes */ 1059 for_each_set_bit(out_ind, 1060 buffer->scan_mask, 1061 indio_dev->masklength) { 1062 in_ind = find_next_bit(indio_dev->active_scan_mask, 1063 indio_dev->masklength, 1064 in_ind + 1); 1065 while (in_ind != out_ind) { 1066 ret = iio_storage_bytes_for_si(indio_dev, in_ind); 1067 if (ret < 0) 1068 goto error_clear_mux_table; 1069 1070 length = ret; 1071 /* Make sure we are aligned */ 1072 in_loc = roundup(in_loc, length) + length; 1073 in_ind = find_next_bit(indio_dev->active_scan_mask, 1074 indio_dev->masklength, 1075 in_ind + 1); 1076 } 1077 ret = iio_storage_bytes_for_si(indio_dev, in_ind); 1078 if (ret < 0) 1079 goto error_clear_mux_table; 1080 1081 length = ret; 1082 out_loc = roundup(out_loc, length); 1083 in_loc = roundup(in_loc, length); 1084 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 1085 if (ret) 1086 goto error_clear_mux_table; 1087 out_loc += length; 1088 in_loc += length; 1089 } 1090 /* Relies on scan_timestamp being last */ 1091 if (buffer->scan_timestamp) { 1092 ret = iio_storage_bytes_for_timestamp(indio_dev); 1093 if (ret < 0) 1094 goto error_clear_mux_table; 1095 1096 length = ret; 1097 out_loc = roundup(out_loc, length); 1098 in_loc = roundup(in_loc, length); 1099 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 1100 if (ret) 1101 goto error_clear_mux_table; 1102 out_loc += length; 1103 } 1104 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 1105 if (!buffer->demux_bounce) { 1106 ret = -ENOMEM; 1107 goto error_clear_mux_table; 1108 } 1109 return 0; 1110 1111 error_clear_mux_table: 1112 iio_buffer_demux_free(buffer); 1113 1114 return ret; 1115 } 1116 1117 static int iio_update_demux(struct iio_dev *indio_dev) 1118 { 1119 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1120 struct iio_buffer *buffer; 1121 int ret; 1122 1123 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 1124 ret = iio_buffer_update_demux(indio_dev, buffer); 1125 if (ret < 0) 1126 goto error_clear_mux_table; 1127 } 1128 return 0; 1129 1130 error_clear_mux_table: 1131 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) 1132 iio_buffer_demux_free(buffer); 1133 1134 return ret; 1135 } 1136 1137 static int iio_enable_buffers(struct iio_dev *indio_dev, 1138 struct iio_device_config *config) 1139 { 1140 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1141 struct iio_buffer *buffer, *tmp = NULL; 1142 int ret; 1143 1144 indio_dev->active_scan_mask = config->scan_mask; 1145 indio_dev->scan_timestamp = config->scan_timestamp; 1146 indio_dev->scan_bytes = config->scan_bytes; 1147 iio_dev_opaque->currentmode = config->mode; 1148 1149 iio_update_demux(indio_dev); 1150 1151 /* Wind up again */ 1152 if (indio_dev->setup_ops->preenable) { 1153 ret = indio_dev->setup_ops->preenable(indio_dev); 1154 if (ret) { 1155 dev_dbg(&indio_dev->dev, 1156 "Buffer not started: buffer preenable failed (%d)\n", ret); 1157 goto err_undo_config; 1158 } 1159 } 1160 1161 if (indio_dev->info->update_scan_mode) { 1162 ret = indio_dev->info 1163 ->update_scan_mode(indio_dev, 1164 indio_dev->active_scan_mask); 1165 if (ret < 0) { 1166 dev_dbg(&indio_dev->dev, 1167 "Buffer not started: update scan mode failed (%d)\n", 1168 ret); 1169 goto err_run_postdisable; 1170 } 1171 } 1172 1173 if (indio_dev->info->hwfifo_set_watermark) 1174 indio_dev->info->hwfifo_set_watermark(indio_dev, 1175 config->watermark); 1176 1177 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 1178 ret = iio_buffer_enable(buffer, indio_dev); 1179 if (ret) { 1180 tmp = buffer; 1181 goto err_disable_buffers; 1182 } 1183 } 1184 1185 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { 1186 ret = iio_trigger_attach_poll_func(indio_dev->trig, 1187 indio_dev->pollfunc); 1188 if (ret) 1189 goto err_disable_buffers; 1190 } 1191 1192 if (indio_dev->setup_ops->postenable) { 1193 ret = indio_dev->setup_ops->postenable(indio_dev); 1194 if (ret) { 1195 dev_dbg(&indio_dev->dev, 1196 "Buffer not started: postenable failed (%d)\n", ret); 1197 goto err_detach_pollfunc; 1198 } 1199 } 1200 1201 return 0; 1202 1203 err_detach_pollfunc: 1204 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { 1205 iio_trigger_detach_poll_func(indio_dev->trig, 1206 indio_dev->pollfunc); 1207 } 1208 err_disable_buffers: 1209 buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list); 1210 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, 1211 buffer_list) 1212 iio_buffer_disable(buffer, indio_dev); 1213 err_run_postdisable: 1214 if (indio_dev->setup_ops->postdisable) 1215 indio_dev->setup_ops->postdisable(indio_dev); 1216 err_undo_config: 1217 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; 1218 indio_dev->active_scan_mask = NULL; 1219 1220 return ret; 1221 } 1222 1223 static int iio_disable_buffers(struct iio_dev *indio_dev) 1224 { 1225 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1226 struct iio_buffer *buffer; 1227 int ret = 0; 1228 int ret2; 1229 1230 /* Wind down existing buffers - iff there are any */ 1231 if (list_empty(&iio_dev_opaque->buffer_list)) 1232 return 0; 1233 1234 /* 1235 * If things go wrong at some step in disable we still need to continue 1236 * to perform the other steps, otherwise we leave the device in a 1237 * inconsistent state. We return the error code for the first error we 1238 * encountered. 1239 */ 1240 1241 if (indio_dev->setup_ops->predisable) { 1242 ret2 = indio_dev->setup_ops->predisable(indio_dev); 1243 if (ret2 && !ret) 1244 ret = ret2; 1245 } 1246 1247 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { 1248 iio_trigger_detach_poll_func(indio_dev->trig, 1249 indio_dev->pollfunc); 1250 } 1251 1252 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 1253 ret2 = iio_buffer_disable(buffer, indio_dev); 1254 if (ret2 && !ret) 1255 ret = ret2; 1256 } 1257 1258 if (indio_dev->setup_ops->postdisable) { 1259 ret2 = indio_dev->setup_ops->postdisable(indio_dev); 1260 if (ret2 && !ret) 1261 ret = ret2; 1262 } 1263 1264 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); 1265 indio_dev->active_scan_mask = NULL; 1266 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; 1267 1268 return ret; 1269 } 1270 1271 static int __iio_update_buffers(struct iio_dev *indio_dev, 1272 struct iio_buffer *insert_buffer, 1273 struct iio_buffer *remove_buffer) 1274 { 1275 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1276 struct iio_device_config new_config; 1277 int ret; 1278 1279 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer, 1280 &new_config); 1281 if (ret) 1282 return ret; 1283 1284 if (insert_buffer) { 1285 ret = iio_buffer_request_update(indio_dev, insert_buffer); 1286 if (ret) 1287 goto err_free_config; 1288 } 1289 1290 ret = iio_disable_buffers(indio_dev); 1291 if (ret) 1292 goto err_deactivate_all; 1293 1294 if (remove_buffer) 1295 iio_buffer_deactivate(remove_buffer); 1296 if (insert_buffer) 1297 iio_buffer_activate(indio_dev, insert_buffer); 1298 1299 /* If no buffers in list, we are done */ 1300 if (list_empty(&iio_dev_opaque->buffer_list)) 1301 return 0; 1302 1303 ret = iio_enable_buffers(indio_dev, &new_config); 1304 if (ret) 1305 goto err_deactivate_all; 1306 1307 return 0; 1308 1309 err_deactivate_all: 1310 /* 1311 * We've already verified that the config is valid earlier. If things go 1312 * wrong in either enable or disable the most likely reason is an IO 1313 * error from the device. In this case there is no good recovery 1314 * strategy. Just make sure to disable everything and leave the device 1315 * in a sane state. With a bit of luck the device might come back to 1316 * life again later and userspace can try again. 1317 */ 1318 iio_buffer_deactivate_all(indio_dev); 1319 1320 err_free_config: 1321 iio_free_scan_mask(indio_dev, new_config.scan_mask); 1322 return ret; 1323 } 1324 1325 int iio_update_buffers(struct iio_dev *indio_dev, 1326 struct iio_buffer *insert_buffer, 1327 struct iio_buffer *remove_buffer) 1328 { 1329 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1330 1331 if (insert_buffer == remove_buffer) 1332 return 0; 1333 1334 if (insert_buffer && 1335 insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT) 1336 return -EINVAL; 1337 1338 guard(mutex)(&iio_dev_opaque->info_exist_lock); 1339 guard(mutex)(&iio_dev_opaque->mlock); 1340 1341 if (insert_buffer && iio_buffer_is_active(insert_buffer)) 1342 insert_buffer = NULL; 1343 1344 if (remove_buffer && !iio_buffer_is_active(remove_buffer)) 1345 remove_buffer = NULL; 1346 1347 if (!insert_buffer && !remove_buffer) 1348 return 0; 1349 1350 if (!indio_dev->info) 1351 return -ENODEV; 1352 1353 return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); 1354 } 1355 EXPORT_SYMBOL_GPL(iio_update_buffers); 1356 1357 void iio_disable_all_buffers(struct iio_dev *indio_dev) 1358 { 1359 iio_disable_buffers(indio_dev); 1360 iio_buffer_deactivate_all(indio_dev); 1361 } 1362 1363 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 1364 const char *buf, size_t len) 1365 { 1366 int ret; 1367 bool requested_state; 1368 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1369 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1370 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1371 bool inlist; 1372 1373 ret = kstrtobool(buf, &requested_state); 1374 if (ret < 0) 1375 return ret; 1376 1377 guard(mutex)(&iio_dev_opaque->mlock); 1378 1379 /* Find out if it is in the list */ 1380 inlist = iio_buffer_is_active(buffer); 1381 /* Already in desired state */ 1382 if (inlist == requested_state) 1383 return len; 1384 1385 if (requested_state) 1386 ret = __iio_update_buffers(indio_dev, buffer, NULL); 1387 else 1388 ret = __iio_update_buffers(indio_dev, NULL, buffer); 1389 if (ret) 1390 return ret; 1391 1392 return len; 1393 } 1394 1395 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr, 1396 char *buf) 1397 { 1398 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1399 1400 return sysfs_emit(buf, "%u\n", buffer->watermark); 1401 } 1402 1403 static ssize_t watermark_store(struct device *dev, 1404 struct device_attribute *attr, 1405 const char *buf, size_t len) 1406 { 1407 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1408 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1409 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1410 unsigned int val; 1411 int ret; 1412 1413 ret = kstrtouint(buf, 10, &val); 1414 if (ret) 1415 return ret; 1416 if (!val) 1417 return -EINVAL; 1418 1419 guard(mutex)(&iio_dev_opaque->mlock); 1420 1421 if (val > buffer->length) 1422 return -EINVAL; 1423 1424 if (iio_buffer_is_active(buffer)) 1425 return -EBUSY; 1426 1427 buffer->watermark = val; 1428 1429 return len; 1430 } 1431 1432 static ssize_t data_available_show(struct device *dev, 1433 struct device_attribute *attr, char *buf) 1434 { 1435 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1436 1437 return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer)); 1438 } 1439 1440 static ssize_t direction_show(struct device *dev, 1441 struct device_attribute *attr, 1442 char *buf) 1443 { 1444 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1445 1446 switch (buffer->direction) { 1447 case IIO_BUFFER_DIRECTION_IN: 1448 return sysfs_emit(buf, "in\n"); 1449 case IIO_BUFFER_DIRECTION_OUT: 1450 return sysfs_emit(buf, "out\n"); 1451 default: 1452 return -EINVAL; 1453 } 1454 } 1455 1456 static DEVICE_ATTR_RW(length); 1457 static struct device_attribute dev_attr_length_ro = __ATTR_RO(length); 1458 static DEVICE_ATTR_RW(enable); 1459 static DEVICE_ATTR_RW(watermark); 1460 static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark); 1461 static DEVICE_ATTR_RO(data_available); 1462 static DEVICE_ATTR_RO(direction); 1463 1464 /* 1465 * When adding new attributes here, put the at the end, at least until 1466 * the code that handles the length/length_ro & watermark/watermark_ro 1467 * assignments gets cleaned up. Otherwise these can create some weird 1468 * duplicate attributes errors under some setups. 1469 */ 1470 static struct attribute *iio_buffer_attrs[] = { 1471 &dev_attr_length.attr, 1472 &dev_attr_enable.attr, 1473 &dev_attr_watermark.attr, 1474 &dev_attr_data_available.attr, 1475 &dev_attr_direction.attr, 1476 }; 1477 1478 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 1479 1480 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer, 1481 struct attribute *attr) 1482 { 1483 struct device_attribute *dattr = to_dev_attr(attr); 1484 struct iio_dev_attr *iio_attr; 1485 1486 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1487 if (!iio_attr) 1488 return NULL; 1489 1490 iio_attr->buffer = buffer; 1491 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr)); 1492 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL); 1493 if (!iio_attr->dev_attr.attr.name) { 1494 kfree(iio_attr); 1495 return NULL; 1496 } 1497 1498 sysfs_attr_init(&iio_attr->dev_attr.attr); 1499 1500 list_add(&iio_attr->l, &buffer->buffer_attr_list); 1501 1502 return &iio_attr->dev_attr.attr; 1503 } 1504 1505 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev, 1506 struct attribute **buffer_attrs, 1507 int buffer_attrcount, 1508 int scan_el_attrcount) 1509 { 1510 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1511 struct attribute_group *group; 1512 struct attribute **attrs; 1513 int ret; 1514 1515 attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL); 1516 if (!attrs) 1517 return -ENOMEM; 1518 1519 memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs)); 1520 1521 group = &iio_dev_opaque->legacy_buffer_group; 1522 group->attrs = attrs; 1523 group->name = "buffer"; 1524 1525 ret = iio_device_register_sysfs_group(indio_dev, group); 1526 if (ret) 1527 goto error_free_buffer_attrs; 1528 1529 attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL); 1530 if (!attrs) { 1531 ret = -ENOMEM; 1532 goto error_free_buffer_attrs; 1533 } 1534 1535 memcpy(attrs, &buffer_attrs[buffer_attrcount], 1536 scan_el_attrcount * sizeof(*attrs)); 1537 1538 group = &iio_dev_opaque->legacy_scan_el_group; 1539 group->attrs = attrs; 1540 group->name = "scan_elements"; 1541 1542 ret = iio_device_register_sysfs_group(indio_dev, group); 1543 if (ret) 1544 goto error_free_scan_el_attrs; 1545 1546 return 0; 1547 1548 error_free_scan_el_attrs: 1549 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); 1550 error_free_buffer_attrs: 1551 kfree(iio_dev_opaque->legacy_buffer_group.attrs); 1552 1553 return ret; 1554 } 1555 1556 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev) 1557 { 1558 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1559 1560 kfree(iio_dev_opaque->legacy_buffer_group.attrs); 1561 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); 1562 } 1563 1564 static void iio_buffer_dmabuf_release(struct kref *ref) 1565 { 1566 struct iio_dmabuf_priv *priv = container_of(ref, struct iio_dmabuf_priv, ref); 1567 struct dma_buf_attachment *attach = priv->attach; 1568 struct iio_buffer *buffer = priv->buffer; 1569 struct dma_buf *dmabuf = attach->dmabuf; 1570 1571 dma_resv_lock(dmabuf->resv, NULL); 1572 dma_buf_unmap_attachment(attach, priv->sgt, priv->dir); 1573 dma_resv_unlock(dmabuf->resv); 1574 1575 buffer->access->detach_dmabuf(buffer, priv->block); 1576 1577 dma_buf_detach(attach->dmabuf, attach); 1578 dma_buf_put(dmabuf); 1579 kfree(priv); 1580 } 1581 1582 static void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach) 1583 { 1584 struct iio_dmabuf_priv *priv = attach->importer_priv; 1585 1586 kref_get(&priv->ref); 1587 } 1588 1589 static void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach) 1590 { 1591 struct iio_dmabuf_priv *priv = attach->importer_priv; 1592 1593 kref_put(&priv->ref, iio_buffer_dmabuf_release); 1594 } 1595 1596 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep) 1597 { 1598 struct iio_dev_buffer_pair *ib = filep->private_data; 1599 struct iio_dev *indio_dev = ib->indio_dev; 1600 struct iio_buffer *buffer = ib->buffer; 1601 struct iio_dmabuf_priv *priv, *tmp; 1602 1603 wake_up(&buffer->pollq); 1604 1605 guard(mutex)(&buffer->dmabufs_mutex); 1606 1607 /* Close all attached DMABUFs */ 1608 list_for_each_entry_safe(priv, tmp, &buffer->dmabufs, entry) { 1609 list_del_init(&priv->entry); 1610 iio_buffer_dmabuf_put(priv->attach); 1611 } 1612 1613 kfree(ib); 1614 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); 1615 iio_device_put(indio_dev); 1616 1617 return 0; 1618 } 1619 1620 static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock) 1621 { 1622 if (!nonblock) 1623 return dma_resv_lock_interruptible(dmabuf->resv, NULL); 1624 1625 if (!dma_resv_trylock(dmabuf->resv)) 1626 return -EBUSY; 1627 1628 return 0; 1629 } 1630 1631 static struct dma_buf_attachment * 1632 iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib, 1633 struct dma_buf *dmabuf, bool nonblock) 1634 { 1635 struct device *dev = ib->indio_dev->dev.parent; 1636 struct iio_buffer *buffer = ib->buffer; 1637 struct dma_buf_attachment *attach = NULL; 1638 struct iio_dmabuf_priv *priv; 1639 1640 guard(mutex)(&buffer->dmabufs_mutex); 1641 1642 list_for_each_entry(priv, &buffer->dmabufs, entry) { 1643 if (priv->attach->dev == dev 1644 && priv->attach->dmabuf == dmabuf) { 1645 attach = priv->attach; 1646 break; 1647 } 1648 } 1649 1650 if (attach) 1651 iio_buffer_dmabuf_get(attach); 1652 1653 return attach ?: ERR_PTR(-EPERM); 1654 } 1655 1656 static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib, 1657 int __user *user_fd, bool nonblock) 1658 { 1659 struct iio_dev *indio_dev = ib->indio_dev; 1660 struct iio_buffer *buffer = ib->buffer; 1661 struct dma_buf_attachment *attach; 1662 struct iio_dmabuf_priv *priv, *each; 1663 struct dma_buf *dmabuf; 1664 int err, fd; 1665 1666 if (!buffer->access->attach_dmabuf 1667 || !buffer->access->detach_dmabuf 1668 || !buffer->access->enqueue_dmabuf) 1669 return -EPERM; 1670 1671 if (copy_from_user(&fd, user_fd, sizeof(fd))) 1672 return -EFAULT; 1673 1674 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1675 if (!priv) 1676 return -ENOMEM; 1677 1678 spin_lock_init(&priv->lock); 1679 priv->context = dma_fence_context_alloc(1); 1680 1681 dmabuf = dma_buf_get(fd); 1682 if (IS_ERR(dmabuf)) { 1683 err = PTR_ERR(dmabuf); 1684 goto err_free_priv; 1685 } 1686 1687 attach = dma_buf_attach(dmabuf, indio_dev->dev.parent); 1688 if (IS_ERR(attach)) { 1689 err = PTR_ERR(attach); 1690 goto err_dmabuf_put; 1691 } 1692 1693 err = iio_dma_resv_lock(dmabuf, nonblock); 1694 if (err) 1695 goto err_dmabuf_detach; 1696 1697 priv->dir = buffer->direction == IIO_BUFFER_DIRECTION_IN 1698 ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1699 1700 priv->sgt = dma_buf_map_attachment(attach, priv->dir); 1701 if (IS_ERR(priv->sgt)) { 1702 err = PTR_ERR(priv->sgt); 1703 dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", err); 1704 goto err_resv_unlock; 1705 } 1706 1707 kref_init(&priv->ref); 1708 priv->buffer = buffer; 1709 priv->attach = attach; 1710 attach->importer_priv = priv; 1711 1712 priv->block = buffer->access->attach_dmabuf(buffer, attach); 1713 if (IS_ERR(priv->block)) { 1714 err = PTR_ERR(priv->block); 1715 goto err_dmabuf_unmap_attachment; 1716 } 1717 1718 dma_resv_unlock(dmabuf->resv); 1719 1720 mutex_lock(&buffer->dmabufs_mutex); 1721 1722 /* 1723 * Check whether we already have an attachment for this driver/DMABUF 1724 * combo. If we do, refuse to attach. 1725 */ 1726 list_for_each_entry(each, &buffer->dmabufs, entry) { 1727 if (each->attach->dev == indio_dev->dev.parent 1728 && each->attach->dmabuf == dmabuf) { 1729 /* 1730 * We unlocked the reservation object, so going through 1731 * the cleanup code would mean re-locking it first. 1732 * At this stage it is simpler to free the attachment 1733 * using iio_buffer_dma_put(). 1734 */ 1735 mutex_unlock(&buffer->dmabufs_mutex); 1736 iio_buffer_dmabuf_put(attach); 1737 return -EBUSY; 1738 } 1739 } 1740 1741 /* Otherwise, add the new attachment to our dmabufs list. */ 1742 list_add(&priv->entry, &buffer->dmabufs); 1743 mutex_unlock(&buffer->dmabufs_mutex); 1744 1745 return 0; 1746 1747 err_dmabuf_unmap_attachment: 1748 dma_buf_unmap_attachment(attach, priv->sgt, priv->dir); 1749 err_resv_unlock: 1750 dma_resv_unlock(dmabuf->resv); 1751 err_dmabuf_detach: 1752 dma_buf_detach(dmabuf, attach); 1753 err_dmabuf_put: 1754 dma_buf_put(dmabuf); 1755 err_free_priv: 1756 kfree(priv); 1757 1758 return err; 1759 } 1760 1761 static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib, 1762 int __user *user_req, bool nonblock) 1763 { 1764 struct iio_buffer *buffer = ib->buffer; 1765 struct iio_dev *indio_dev = ib->indio_dev; 1766 struct iio_dmabuf_priv *priv; 1767 struct dma_buf *dmabuf; 1768 int dmabuf_fd, ret = -EPERM; 1769 1770 if (copy_from_user(&dmabuf_fd, user_req, sizeof(dmabuf_fd))) 1771 return -EFAULT; 1772 1773 dmabuf = dma_buf_get(dmabuf_fd); 1774 if (IS_ERR(dmabuf)) 1775 return PTR_ERR(dmabuf); 1776 1777 guard(mutex)(&buffer->dmabufs_mutex); 1778 1779 list_for_each_entry(priv, &buffer->dmabufs, entry) { 1780 if (priv->attach->dev == indio_dev->dev.parent 1781 && priv->attach->dmabuf == dmabuf) { 1782 list_del(&priv->entry); 1783 1784 /* Unref the reference from iio_buffer_attach_dmabuf() */ 1785 iio_buffer_dmabuf_put(priv->attach); 1786 ret = 0; 1787 break; 1788 } 1789 } 1790 1791 dma_buf_put(dmabuf); 1792 1793 return ret; 1794 } 1795 1796 static const char * 1797 iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence) 1798 { 1799 return "iio"; 1800 } 1801 1802 static void iio_buffer_dma_fence_release(struct dma_fence *fence) 1803 { 1804 struct iio_dma_fence *iio_fence = 1805 container_of(fence, struct iio_dma_fence, base); 1806 1807 kfree(iio_fence); 1808 } 1809 1810 static const struct dma_fence_ops iio_buffer_dma_fence_ops = { 1811 .get_driver_name = iio_buffer_dma_fence_get_driver_name, 1812 .get_timeline_name = iio_buffer_dma_fence_get_driver_name, 1813 .release = iio_buffer_dma_fence_release, 1814 }; 1815 1816 static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib, 1817 struct iio_dmabuf __user *iio_dmabuf_req, 1818 bool nonblock) 1819 { 1820 struct iio_buffer *buffer = ib->buffer; 1821 struct iio_dmabuf iio_dmabuf; 1822 struct dma_buf_attachment *attach; 1823 struct iio_dmabuf_priv *priv; 1824 struct iio_dma_fence *fence; 1825 struct dma_buf *dmabuf; 1826 unsigned long timeout; 1827 bool cookie, cyclic, dma_to_ram; 1828 long retl; 1829 u32 seqno; 1830 int ret; 1831 1832 if (copy_from_user(&iio_dmabuf, iio_dmabuf_req, sizeof(iio_dmabuf))) 1833 return -EFAULT; 1834 1835 if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS) 1836 return -EINVAL; 1837 1838 cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC; 1839 1840 /* Cyclic flag is only supported on output buffers */ 1841 if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT) 1842 return -EINVAL; 1843 1844 dmabuf = dma_buf_get(iio_dmabuf.fd); 1845 if (IS_ERR(dmabuf)) 1846 return PTR_ERR(dmabuf); 1847 1848 if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) { 1849 ret = -EINVAL; 1850 goto err_dmabuf_put; 1851 } 1852 1853 attach = iio_buffer_find_attachment(ib, dmabuf, nonblock); 1854 if (IS_ERR(attach)) { 1855 ret = PTR_ERR(attach); 1856 goto err_dmabuf_put; 1857 } 1858 1859 priv = attach->importer_priv; 1860 1861 fence = kmalloc(sizeof(*fence), GFP_KERNEL); 1862 if (!fence) { 1863 ret = -ENOMEM; 1864 goto err_attachment_put; 1865 } 1866 1867 fence->priv = priv; 1868 1869 seqno = atomic_add_return(1, &priv->seqno); 1870 1871 /* 1872 * The transfers are guaranteed to be processed in the order they are 1873 * enqueued, so we can use a simple incrementing sequence number for 1874 * the dma_fence. 1875 */ 1876 dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops, 1877 &priv->lock, priv->context, seqno); 1878 1879 ret = iio_dma_resv_lock(dmabuf, nonblock); 1880 if (ret) 1881 goto err_fence_put; 1882 1883 timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS); 1884 dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN; 1885 1886 /* Make sure we don't have writers */ 1887 retl = dma_resv_wait_timeout(dmabuf->resv, 1888 dma_resv_usage_rw(dma_to_ram), 1889 true, timeout); 1890 if (retl == 0) 1891 retl = -EBUSY; 1892 if (retl < 0) { 1893 ret = (int)retl; 1894 goto err_resv_unlock; 1895 } 1896 1897 if (buffer->access->lock_queue) 1898 buffer->access->lock_queue(buffer); 1899 1900 ret = dma_resv_reserve_fences(dmabuf->resv, 1); 1901 if (ret) 1902 goto err_queue_unlock; 1903 1904 dma_resv_add_fence(dmabuf->resv, &fence->base, 1905 dma_to_ram ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); 1906 dma_resv_unlock(dmabuf->resv); 1907 1908 cookie = dma_fence_begin_signalling(); 1909 1910 ret = buffer->access->enqueue_dmabuf(buffer, priv->block, &fence->base, 1911 priv->sgt, iio_dmabuf.bytes_used, 1912 cyclic); 1913 if (ret) { 1914 /* 1915 * DMABUF enqueue failed, but we already added the fence. 1916 * Signal the error through the fence completion mechanism. 1917 */ 1918 iio_buffer_signal_dmabuf_done(&fence->base, ret); 1919 } 1920 1921 if (buffer->access->unlock_queue) 1922 buffer->access->unlock_queue(buffer); 1923 1924 dma_fence_end_signalling(cookie); 1925 dma_buf_put(dmabuf); 1926 1927 return ret; 1928 1929 err_queue_unlock: 1930 if (buffer->access->unlock_queue) 1931 buffer->access->unlock_queue(buffer); 1932 err_resv_unlock: 1933 dma_resv_unlock(dmabuf->resv); 1934 err_fence_put: 1935 dma_fence_put(&fence->base); 1936 err_attachment_put: 1937 iio_buffer_dmabuf_put(attach); 1938 err_dmabuf_put: 1939 dma_buf_put(dmabuf); 1940 1941 return ret; 1942 } 1943 1944 static void iio_buffer_cleanup(struct work_struct *work) 1945 { 1946 struct iio_dma_fence *fence = 1947 container_of(work, struct iio_dma_fence, work); 1948 struct iio_dmabuf_priv *priv = fence->priv; 1949 struct dma_buf_attachment *attach = priv->attach; 1950 1951 dma_fence_put(&fence->base); 1952 iio_buffer_dmabuf_put(attach); 1953 } 1954 1955 void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret) 1956 { 1957 struct iio_dma_fence *iio_fence = 1958 container_of(fence, struct iio_dma_fence, base); 1959 bool cookie = dma_fence_begin_signalling(); 1960 1961 /* 1962 * Get a reference to the fence, so that it's not freed as soon as 1963 * it's signaled. 1964 */ 1965 dma_fence_get(fence); 1966 1967 fence->error = ret; 1968 dma_fence_signal(fence); 1969 dma_fence_end_signalling(cookie); 1970 1971 /* 1972 * The fence will be unref'd in iio_buffer_cleanup. 1973 * It can't be done here, as the unref functions might try to lock the 1974 * resv object, which can deadlock. 1975 */ 1976 INIT_WORK(&iio_fence->work, iio_buffer_cleanup); 1977 schedule_work(&iio_fence->work); 1978 } 1979 EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done); 1980 1981 static long iio_buffer_chrdev_ioctl(struct file *filp, 1982 unsigned int cmd, unsigned long arg) 1983 { 1984 struct iio_dev_buffer_pair *ib = filp->private_data; 1985 void __user *_arg = (void __user *)arg; 1986 bool nonblock = filp->f_flags & O_NONBLOCK; 1987 1988 switch (cmd) { 1989 case IIO_BUFFER_DMABUF_ATTACH_IOCTL: 1990 return iio_buffer_attach_dmabuf(ib, _arg, nonblock); 1991 case IIO_BUFFER_DMABUF_DETACH_IOCTL: 1992 return iio_buffer_detach_dmabuf(ib, _arg, nonblock); 1993 case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL: 1994 return iio_buffer_enqueue_dmabuf(ib, _arg, nonblock); 1995 default: 1996 return -EINVAL; 1997 } 1998 } 1999 2000 static const struct file_operations iio_buffer_chrdev_fileops = { 2001 .owner = THIS_MODULE, 2002 .llseek = noop_llseek, 2003 .read = iio_buffer_read, 2004 .write = iio_buffer_write, 2005 .unlocked_ioctl = iio_buffer_chrdev_ioctl, 2006 .compat_ioctl = compat_ptr_ioctl, 2007 .poll = iio_buffer_poll, 2008 .release = iio_buffer_chrdev_release, 2009 }; 2010 2011 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg) 2012 { 2013 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2014 int __user *ival = (int __user *)arg; 2015 struct iio_dev_buffer_pair *ib; 2016 struct iio_buffer *buffer; 2017 int fd, idx, ret; 2018 2019 if (copy_from_user(&idx, ival, sizeof(idx))) 2020 return -EFAULT; 2021 2022 if (idx >= iio_dev_opaque->attached_buffers_cnt) 2023 return -ENODEV; 2024 2025 iio_device_get(indio_dev); 2026 2027 buffer = iio_dev_opaque->attached_buffers[idx]; 2028 2029 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) { 2030 ret = -EBUSY; 2031 goto error_iio_dev_put; 2032 } 2033 2034 ib = kzalloc(sizeof(*ib), GFP_KERNEL); 2035 if (!ib) { 2036 ret = -ENOMEM; 2037 goto error_clear_busy_bit; 2038 } 2039 2040 ib->indio_dev = indio_dev; 2041 ib->buffer = buffer; 2042 2043 fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops, 2044 ib, O_RDWR | O_CLOEXEC); 2045 if (fd < 0) { 2046 ret = fd; 2047 goto error_free_ib; 2048 } 2049 2050 if (copy_to_user(ival, &fd, sizeof(fd))) { 2051 /* 2052 * "Leak" the fd, as there's not much we can do about this 2053 * anyway. 'fd' might have been closed already, as 2054 * anon_inode_getfd() called fd_install() on it, which made 2055 * it reachable by userland. 2056 * 2057 * Instead of allowing a malicious user to play tricks with 2058 * us, rely on the process exit path to do any necessary 2059 * cleanup, as in releasing the file, if still needed. 2060 */ 2061 return -EFAULT; 2062 } 2063 2064 return 0; 2065 2066 error_free_ib: 2067 kfree(ib); 2068 error_clear_busy_bit: 2069 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); 2070 error_iio_dev_put: 2071 iio_device_put(indio_dev); 2072 return ret; 2073 } 2074 2075 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp, 2076 unsigned int cmd, unsigned long arg) 2077 { 2078 switch (cmd) { 2079 case IIO_BUFFER_GET_FD_IOCTL: 2080 return iio_device_buffer_getfd(indio_dev, arg); 2081 default: 2082 return IIO_IOCTL_UNHANDLED; 2083 } 2084 } 2085 2086 static int iio_channel_validate_scan_type(struct device *dev, int ch, 2087 const struct iio_scan_type *scan_type) 2088 { 2089 /* Verify that sample bits fit into storage */ 2090 if (scan_type->storagebits < scan_type->realbits + scan_type->shift) { 2091 dev_err(dev, 2092 "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n", 2093 ch, scan_type->storagebits, 2094 scan_type->realbits, 2095 scan_type->shift); 2096 return -EINVAL; 2097 } 2098 2099 return 0; 2100 } 2101 2102 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, 2103 struct iio_dev *indio_dev, 2104 int index) 2105 { 2106 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2107 struct iio_dev_attr *p; 2108 const struct iio_dev_attr *id_attr; 2109 struct attribute **attr; 2110 int ret, i, attrn, scan_el_attrcount, buffer_attrcount; 2111 const struct iio_chan_spec *channels; 2112 2113 buffer_attrcount = 0; 2114 if (buffer->attrs) { 2115 while (buffer->attrs[buffer_attrcount]) 2116 buffer_attrcount++; 2117 } 2118 buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs); 2119 2120 scan_el_attrcount = 0; 2121 INIT_LIST_HEAD(&buffer->buffer_attr_list); 2122 channels = indio_dev->channels; 2123 if (channels) { 2124 /* new magic */ 2125 for (i = 0; i < indio_dev->num_channels; i++) { 2126 const struct iio_scan_type *scan_type; 2127 2128 if (channels[i].scan_index < 0) 2129 continue; 2130 2131 if (channels[i].has_ext_scan_type) { 2132 int j; 2133 2134 /* 2135 * get_current_scan_type is required when using 2136 * extended scan types. 2137 */ 2138 if (!indio_dev->info->get_current_scan_type) { 2139 ret = -EINVAL; 2140 goto error_cleanup_dynamic; 2141 } 2142 2143 for (j = 0; j < channels[i].num_ext_scan_type; j++) { 2144 scan_type = &channels[i].ext_scan_type[j]; 2145 2146 ret = iio_channel_validate_scan_type( 2147 &indio_dev->dev, i, scan_type); 2148 if (ret) 2149 goto error_cleanup_dynamic; 2150 } 2151 } else { 2152 scan_type = &channels[i].scan_type; 2153 2154 ret = iio_channel_validate_scan_type( 2155 &indio_dev->dev, i, scan_type); 2156 if (ret) 2157 goto error_cleanup_dynamic; 2158 } 2159 2160 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer, 2161 &channels[i]); 2162 if (ret < 0) 2163 goto error_cleanup_dynamic; 2164 scan_el_attrcount += ret; 2165 if (channels[i].type == IIO_TIMESTAMP) 2166 iio_dev_opaque->scan_index_timestamp = 2167 channels[i].scan_index; 2168 } 2169 if (indio_dev->masklength && !buffer->scan_mask) { 2170 buffer->scan_mask = bitmap_zalloc(indio_dev->masklength, 2171 GFP_KERNEL); 2172 if (!buffer->scan_mask) { 2173 ret = -ENOMEM; 2174 goto error_cleanup_dynamic; 2175 } 2176 } 2177 } 2178 2179 attrn = buffer_attrcount + scan_el_attrcount; 2180 attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL); 2181 if (!attr) { 2182 ret = -ENOMEM; 2183 goto error_free_scan_mask; 2184 } 2185 2186 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); 2187 if (!buffer->access->set_length) 2188 attr[0] = &dev_attr_length_ro.attr; 2189 2190 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) 2191 attr[2] = &dev_attr_watermark_ro.attr; 2192 2193 if (buffer->attrs) 2194 for (i = 0, id_attr = buffer->attrs[i]; 2195 (id_attr = buffer->attrs[i]); i++) 2196 attr[ARRAY_SIZE(iio_buffer_attrs) + i] = 2197 (struct attribute *)&id_attr->dev_attr.attr; 2198 2199 buffer->buffer_group.attrs = attr; 2200 2201 for (i = 0; i < buffer_attrcount; i++) { 2202 struct attribute *wrapped; 2203 2204 wrapped = iio_buffer_wrap_attr(buffer, attr[i]); 2205 if (!wrapped) { 2206 ret = -ENOMEM; 2207 goto error_free_buffer_attrs; 2208 } 2209 attr[i] = wrapped; 2210 } 2211 2212 attrn = 0; 2213 list_for_each_entry(p, &buffer->buffer_attr_list, l) 2214 attr[attrn++] = &p->dev_attr.attr; 2215 2216 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index); 2217 if (!buffer->buffer_group.name) { 2218 ret = -ENOMEM; 2219 goto error_free_buffer_attrs; 2220 } 2221 2222 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group); 2223 if (ret) 2224 goto error_free_buffer_attr_group_name; 2225 2226 /* we only need to register the legacy groups for the first buffer */ 2227 if (index > 0) 2228 return 0; 2229 2230 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr, 2231 buffer_attrcount, 2232 scan_el_attrcount); 2233 if (ret) 2234 goto error_free_buffer_attr_group_name; 2235 2236 return 0; 2237 2238 error_free_buffer_attr_group_name: 2239 kfree(buffer->buffer_group.name); 2240 error_free_buffer_attrs: 2241 kfree(buffer->buffer_group.attrs); 2242 error_free_scan_mask: 2243 bitmap_free(buffer->scan_mask); 2244 error_cleanup_dynamic: 2245 iio_free_chan_devattr_list(&buffer->buffer_attr_list); 2246 2247 return ret; 2248 } 2249 2250 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer, 2251 struct iio_dev *indio_dev, 2252 int index) 2253 { 2254 if (index == 0) 2255 iio_buffer_unregister_legacy_sysfs_groups(indio_dev); 2256 bitmap_free(buffer->scan_mask); 2257 kfree(buffer->buffer_group.name); 2258 kfree(buffer->buffer_group.attrs); 2259 iio_free_chan_devattr_list(&buffer->buffer_attr_list); 2260 } 2261 2262 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev) 2263 { 2264 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2265 const struct iio_chan_spec *channels; 2266 struct iio_buffer *buffer; 2267 int ret, i, idx; 2268 size_t sz; 2269 2270 channels = indio_dev->channels; 2271 if (channels) { 2272 int ml = 0; 2273 2274 for (i = 0; i < indio_dev->num_channels; i++) 2275 ml = max(ml, channels[i].scan_index + 1); 2276 indio_dev->masklength = ml; 2277 } 2278 2279 if (!iio_dev_opaque->attached_buffers_cnt) 2280 return 0; 2281 2282 for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) { 2283 buffer = iio_dev_opaque->attached_buffers[idx]; 2284 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx); 2285 if (ret) 2286 goto error_unwind_sysfs_and_mask; 2287 } 2288 2289 sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler); 2290 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL); 2291 if (!iio_dev_opaque->buffer_ioctl_handler) { 2292 ret = -ENOMEM; 2293 goto error_unwind_sysfs_and_mask; 2294 } 2295 2296 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl; 2297 iio_device_ioctl_handler_register(indio_dev, 2298 iio_dev_opaque->buffer_ioctl_handler); 2299 2300 return 0; 2301 2302 error_unwind_sysfs_and_mask: 2303 while (idx--) { 2304 buffer = iio_dev_opaque->attached_buffers[idx]; 2305 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx); 2306 } 2307 return ret; 2308 } 2309 2310 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev) 2311 { 2312 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2313 struct iio_buffer *buffer; 2314 int i; 2315 2316 if (!iio_dev_opaque->attached_buffers_cnt) 2317 return; 2318 2319 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler); 2320 kfree(iio_dev_opaque->buffer_ioctl_handler); 2321 2322 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) { 2323 buffer = iio_dev_opaque->attached_buffers[i]; 2324 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i); 2325 } 2326 } 2327 2328 /** 2329 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 2330 * @indio_dev: the iio device 2331 * @mask: scan mask to be checked 2332 * 2333 * Return true if exactly one bit is set in the scan mask, false otherwise. It 2334 * can be used for devices where only one channel can be active for sampling at 2335 * a time. 2336 */ 2337 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 2338 const unsigned long *mask) 2339 { 2340 return bitmap_weight(mask, indio_dev->masklength) == 1; 2341 } 2342 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 2343 2344 static const void *iio_demux(struct iio_buffer *buffer, 2345 const void *datain) 2346 { 2347 struct iio_demux_table *t; 2348 2349 if (list_empty(&buffer->demux_list)) 2350 return datain; 2351 list_for_each_entry(t, &buffer->demux_list, l) 2352 memcpy(buffer->demux_bounce + t->to, 2353 datain + t->from, t->length); 2354 2355 return buffer->demux_bounce; 2356 } 2357 2358 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 2359 { 2360 const void *dataout = iio_demux(buffer, data); 2361 int ret; 2362 2363 ret = buffer->access->store_to(buffer, dataout); 2364 if (ret) 2365 return ret; 2366 2367 /* 2368 * We can't just test for watermark to decide if we wake the poll queue 2369 * because read may request less samples than the watermark. 2370 */ 2371 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); 2372 return 0; 2373 } 2374 2375 /** 2376 * iio_push_to_buffers() - push to a registered buffer. 2377 * @indio_dev: iio_dev structure for device. 2378 * @data: Full scan. 2379 */ 2380 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) 2381 { 2382 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2383 int ret; 2384 struct iio_buffer *buf; 2385 2386 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) { 2387 ret = iio_push_to_buffer(buf, data); 2388 if (ret < 0) 2389 return ret; 2390 } 2391 2392 return 0; 2393 } 2394 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 2395 2396 /** 2397 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer, 2398 * no alignment or space requirements. 2399 * @indio_dev: iio_dev structure for device. 2400 * @data: channel data excluding the timestamp. 2401 * @data_sz: size of data. 2402 * @timestamp: timestamp for the sample data. 2403 * 2404 * This special variant of iio_push_to_buffers_with_timestamp() does 2405 * not require space for the timestamp, or 8 byte alignment of data. 2406 * It does however require an allocation on first call and additional 2407 * copies on all calls, so should be avoided if possible. 2408 */ 2409 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev, 2410 const void *data, 2411 size_t data_sz, 2412 int64_t timestamp) 2413 { 2414 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2415 2416 /* 2417 * Conservative estimate - we can always safely copy the minimum 2418 * of either the data provided or the length of the destination buffer. 2419 * This relaxed limit allows the calling drivers to be lax about 2420 * tracking the size of the data they are pushing, at the cost of 2421 * unnecessary copying of padding. 2422 */ 2423 data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz); 2424 if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) { 2425 void *bb; 2426 2427 bb = devm_krealloc(&indio_dev->dev, 2428 iio_dev_opaque->bounce_buffer, 2429 indio_dev->scan_bytes, GFP_KERNEL); 2430 if (!bb) 2431 return -ENOMEM; 2432 iio_dev_opaque->bounce_buffer = bb; 2433 iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes; 2434 } 2435 memcpy(iio_dev_opaque->bounce_buffer, data, data_sz); 2436 return iio_push_to_buffers_with_timestamp(indio_dev, 2437 iio_dev_opaque->bounce_buffer, 2438 timestamp); 2439 } 2440 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned); 2441 2442 /** 2443 * iio_buffer_release() - Free a buffer's resources 2444 * @ref: Pointer to the kref embedded in the iio_buffer struct 2445 * 2446 * This function is called when the last reference to the buffer has been 2447 * dropped. It will typically free all resources allocated by the buffer. Do not 2448 * call this function manually, always use iio_buffer_put() when done using a 2449 * buffer. 2450 */ 2451 static void iio_buffer_release(struct kref *ref) 2452 { 2453 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); 2454 2455 mutex_destroy(&buffer->dmabufs_mutex); 2456 buffer->access->release(buffer); 2457 } 2458 2459 /** 2460 * iio_buffer_get() - Grab a reference to the buffer 2461 * @buffer: The buffer to grab a reference for, may be NULL 2462 * 2463 * Returns the pointer to the buffer that was passed into the function. 2464 */ 2465 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) 2466 { 2467 if (buffer) 2468 kref_get(&buffer->ref); 2469 2470 return buffer; 2471 } 2472 EXPORT_SYMBOL_GPL(iio_buffer_get); 2473 2474 /** 2475 * iio_buffer_put() - Release the reference to the buffer 2476 * @buffer: The buffer to release the reference for, may be NULL 2477 */ 2478 void iio_buffer_put(struct iio_buffer *buffer) 2479 { 2480 if (buffer) 2481 kref_put(&buffer->ref, iio_buffer_release); 2482 } 2483 EXPORT_SYMBOL_GPL(iio_buffer_put); 2484 2485 /** 2486 * iio_device_attach_buffer - Attach a buffer to a IIO device 2487 * @indio_dev: The device the buffer should be attached to 2488 * @buffer: The buffer to attach to the device 2489 * 2490 * Return 0 if successful, negative if error. 2491 * 2492 * This function attaches a buffer to a IIO device. The buffer stays attached to 2493 * the device until the device is freed. For legacy reasons, the first attached 2494 * buffer will also be assigned to 'indio_dev->buffer'. 2495 * The array allocated here, will be free'd via the iio_device_detach_buffers() 2496 * call which is handled by the iio_device_free(). 2497 */ 2498 int iio_device_attach_buffer(struct iio_dev *indio_dev, 2499 struct iio_buffer *buffer) 2500 { 2501 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2502 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers; 2503 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt; 2504 2505 cnt++; 2506 2507 new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL); 2508 if (!new) 2509 return -ENOMEM; 2510 iio_dev_opaque->attached_buffers = new; 2511 2512 buffer = iio_buffer_get(buffer); 2513 2514 /* first buffer is legacy; attach it to the IIO device directly */ 2515 if (!indio_dev->buffer) 2516 indio_dev->buffer = buffer; 2517 2518 iio_dev_opaque->attached_buffers[cnt - 1] = buffer; 2519 iio_dev_opaque->attached_buffers_cnt = cnt; 2520 2521 return 0; 2522 } 2523 EXPORT_SYMBOL_GPL(iio_device_attach_buffer); 2524