1 // SPDX-License-Identifier: GPL-2.0-only 2 /* The industrial I/O core 3 * 4 * Copyright (c) 2008 Jonathan Cameron 5 * 6 * Handling of buffer allocation / resizing. 7 * 8 * Things to look at here. 9 * - Better memory allocation techniques? 10 * - Alternative access techniques? 11 */ 12 #include <linux/atomic.h> 13 #include <linux/anon_inodes.h> 14 #include <linux/cleanup.h> 15 #include <linux/kernel.h> 16 #include <linux/export.h> 17 #include <linux/device.h> 18 #include <linux/dma-buf.h> 19 #include <linux/dma-fence.h> 20 #include <linux/dma-resv.h> 21 #include <linux/file.h> 22 #include <linux/fs.h> 23 #include <linux/cdev.h> 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/poll.h> 27 #include <linux/sched/signal.h> 28 29 #include <linux/iio/iio.h> 30 #include <linux/iio/iio-opaque.h> 31 #include "iio_core.h" 32 #include "iio_core_trigger.h" 33 #include <linux/iio/sysfs.h> 34 #include <linux/iio/buffer.h> 35 #include <linux/iio/buffer_impl.h> 36 37 #define DMABUF_ENQUEUE_TIMEOUT_MS 5000 38 39 MODULE_IMPORT_NS("DMA_BUF"); 40 41 struct iio_dmabuf_priv { 42 struct list_head entry; 43 struct kref ref; 44 45 struct iio_buffer *buffer; 46 struct iio_dma_buffer_block *block; 47 48 u64 context; 49 50 /* Spinlock used for locking the dma_fence */ 51 spinlock_t lock; 52 53 struct dma_buf_attachment *attach; 54 struct sg_table *sgt; 55 enum dma_data_direction dir; 56 atomic_t seqno; 57 }; 58 59 struct iio_dma_fence { 60 struct dma_fence base; 61 struct iio_dmabuf_priv *priv; 62 struct work_struct work; 63 }; 64 65 static const char * const iio_endian_prefix[] = { 66 [IIO_BE] = "be", 67 [IIO_LE] = "le", 68 }; 69 70 static bool iio_buffer_is_active(struct iio_buffer *buf) 71 { 72 return !list_empty(&buf->buffer_list); 73 } 74 75 static size_t iio_buffer_data_available(struct iio_buffer *buf) 76 { 77 return buf->access->data_available(buf); 78 } 79 80 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev, 81 struct iio_buffer *buf, size_t required) 82 { 83 if (!indio_dev->info->hwfifo_flush_to_buffer) 84 return -ENODEV; 85 86 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); 87 } 88 89 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf, 90 size_t to_wait, int to_flush) 91 { 92 size_t avail; 93 int flushed = 0; 94 95 /* wakeup if the device was unregistered */ 96 if (!indio_dev->info) 97 return true; 98 99 /* drain the buffer if it was disabled */ 100 if (!iio_buffer_is_active(buf)) { 101 to_wait = min_t(size_t, to_wait, 1); 102 to_flush = 0; 103 } 104 105 avail = iio_buffer_data_available(buf); 106 107 if (avail >= to_wait) { 108 /* force a flush for non-blocking reads */ 109 if (!to_wait && avail < to_flush) 110 iio_buffer_flush_hwfifo(indio_dev, buf, 111 to_flush - avail); 112 return true; 113 } 114 115 if (to_flush) 116 flushed = iio_buffer_flush_hwfifo(indio_dev, buf, 117 to_wait - avail); 118 if (flushed <= 0) 119 return false; 120 121 if (avail + flushed >= to_wait) 122 return true; 123 124 return false; 125 } 126 127 /** 128 * iio_buffer_read() - chrdev read for buffer access 129 * @filp: File structure pointer for the char device 130 * @buf: Destination buffer for iio buffer read 131 * @n: First n bytes to read 132 * @f_ps: Long offset provided by the user as a seek position 133 * 134 * This function relies on all buffer implementations having an 135 * iio_buffer as their first element. 136 * 137 * Return: negative values corresponding to error codes or ret != 0 138 * for ending the reading activity 139 **/ 140 static ssize_t iio_buffer_read(struct file *filp, char __user *buf, 141 size_t n, loff_t *f_ps) 142 { 143 struct iio_dev_buffer_pair *ib = filp->private_data; 144 struct iio_buffer *rb = ib->buffer; 145 struct iio_dev *indio_dev = ib->indio_dev; 146 DEFINE_WAIT_FUNC(wait, woken_wake_function); 147 size_t datum_size; 148 size_t to_wait; 149 int ret = 0; 150 151 if (!indio_dev->info) 152 return -ENODEV; 153 154 if (!rb || !rb->access->read) 155 return -EINVAL; 156 157 if (rb->direction != IIO_BUFFER_DIRECTION_IN) 158 return -EPERM; 159 160 datum_size = rb->bytes_per_datum; 161 162 /* 163 * If datum_size is 0 there will never be anything to read from the 164 * buffer, so signal end of file now. 165 */ 166 if (!datum_size) 167 return 0; 168 169 if (filp->f_flags & O_NONBLOCK) 170 to_wait = 0; 171 else 172 to_wait = min_t(size_t, n / datum_size, rb->watermark); 173 174 add_wait_queue(&rb->pollq, &wait); 175 do { 176 if (!indio_dev->info) { 177 ret = -ENODEV; 178 break; 179 } 180 181 if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) { 182 if (signal_pending(current)) { 183 ret = -ERESTARTSYS; 184 break; 185 } 186 187 wait_woken(&wait, TASK_INTERRUPTIBLE, 188 MAX_SCHEDULE_TIMEOUT); 189 continue; 190 } 191 192 ret = rb->access->read(rb, n, buf); 193 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) 194 ret = -EAGAIN; 195 } while (ret == 0); 196 remove_wait_queue(&rb->pollq, &wait); 197 198 return ret; 199 } 200 201 static size_t iio_buffer_space_available(struct iio_buffer *buf) 202 { 203 if (buf->access->space_available) 204 return buf->access->space_available(buf); 205 206 return SIZE_MAX; 207 } 208 209 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf, 210 size_t n, loff_t *f_ps) 211 { 212 struct iio_dev_buffer_pair *ib = filp->private_data; 213 struct iio_buffer *rb = ib->buffer; 214 struct iio_dev *indio_dev = ib->indio_dev; 215 DEFINE_WAIT_FUNC(wait, woken_wake_function); 216 int ret = 0; 217 size_t written; 218 219 if (!indio_dev->info) 220 return -ENODEV; 221 222 if (!rb || !rb->access->write) 223 return -EINVAL; 224 225 if (rb->direction != IIO_BUFFER_DIRECTION_OUT) 226 return -EPERM; 227 228 written = 0; 229 add_wait_queue(&rb->pollq, &wait); 230 do { 231 if (!indio_dev->info) 232 return -ENODEV; 233 234 if (!iio_buffer_space_available(rb)) { 235 if (signal_pending(current)) { 236 ret = -ERESTARTSYS; 237 break; 238 } 239 240 if (filp->f_flags & O_NONBLOCK) { 241 if (!written) 242 ret = -EAGAIN; 243 break; 244 } 245 246 wait_woken(&wait, TASK_INTERRUPTIBLE, 247 MAX_SCHEDULE_TIMEOUT); 248 continue; 249 } 250 251 ret = rb->access->write(rb, n - written, buf + written); 252 if (ret < 0) 253 break; 254 255 written += ret; 256 257 } while (written != n); 258 remove_wait_queue(&rb->pollq, &wait); 259 260 return ret < 0 ? ret : written; 261 } 262 263 /** 264 * iio_buffer_poll() - poll the buffer to find out if it has data 265 * @filp: File structure pointer for device access 266 * @wait: Poll table structure pointer for which the driver adds 267 * a wait queue 268 * 269 * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading 270 * or 0 for other cases 271 */ 272 static __poll_t iio_buffer_poll(struct file *filp, 273 struct poll_table_struct *wait) 274 { 275 struct iio_dev_buffer_pair *ib = filp->private_data; 276 struct iio_buffer *rb = ib->buffer; 277 struct iio_dev *indio_dev = ib->indio_dev; 278 279 if (!indio_dev->info || !rb) 280 return 0; 281 282 poll_wait(filp, &rb->pollq, wait); 283 284 switch (rb->direction) { 285 case IIO_BUFFER_DIRECTION_IN: 286 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) 287 return EPOLLIN | EPOLLRDNORM; 288 break; 289 case IIO_BUFFER_DIRECTION_OUT: 290 if (iio_buffer_space_available(rb)) 291 return EPOLLOUT | EPOLLWRNORM; 292 break; 293 } 294 295 return 0; 296 } 297 298 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf, 299 size_t n, loff_t *f_ps) 300 { 301 struct iio_dev_buffer_pair *ib = filp->private_data; 302 struct iio_buffer *rb = ib->buffer; 303 304 /* check if buffer was opened through new API */ 305 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) 306 return -EBUSY; 307 308 return iio_buffer_read(filp, buf, n, f_ps); 309 } 310 311 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf, 312 size_t n, loff_t *f_ps) 313 { 314 struct iio_dev_buffer_pair *ib = filp->private_data; 315 struct iio_buffer *rb = ib->buffer; 316 317 /* check if buffer was opened through new API */ 318 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) 319 return -EBUSY; 320 321 return iio_buffer_write(filp, buf, n, f_ps); 322 } 323 324 __poll_t iio_buffer_poll_wrapper(struct file *filp, 325 struct poll_table_struct *wait) 326 { 327 struct iio_dev_buffer_pair *ib = filp->private_data; 328 struct iio_buffer *rb = ib->buffer; 329 330 /* check if buffer was opened through new API */ 331 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) 332 return 0; 333 334 return iio_buffer_poll(filp, wait); 335 } 336 337 /** 338 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue 339 * @indio_dev: The IIO device 340 * 341 * Wakes up the event waitqueue used for poll(). Should usually 342 * be called when the device is unregistered. 343 */ 344 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev) 345 { 346 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 347 struct iio_buffer *buffer; 348 unsigned int i; 349 350 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { 351 buffer = iio_dev_opaque->attached_buffers[i]; 352 wake_up(&buffer->pollq); 353 } 354 } 355 356 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data) 357 { 358 if (!buffer || !buffer->access || !buffer->access->remove_from) 359 return -EINVAL; 360 361 return buffer->access->remove_from(buffer, data); 362 } 363 EXPORT_SYMBOL_GPL(iio_pop_from_buffer); 364 365 void iio_buffer_init(struct iio_buffer *buffer) 366 { 367 INIT_LIST_HEAD(&buffer->demux_list); 368 INIT_LIST_HEAD(&buffer->buffer_list); 369 INIT_LIST_HEAD(&buffer->dmabufs); 370 mutex_init(&buffer->dmabufs_mutex); 371 init_waitqueue_head(&buffer->pollq); 372 kref_init(&buffer->ref); 373 if (!buffer->watermark) 374 buffer->watermark = 1; 375 } 376 EXPORT_SYMBOL(iio_buffer_init); 377 378 void iio_device_detach_buffers(struct iio_dev *indio_dev) 379 { 380 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 381 struct iio_buffer *buffer; 382 unsigned int i; 383 384 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { 385 buffer = iio_dev_opaque->attached_buffers[i]; 386 iio_buffer_put(buffer); 387 } 388 389 kfree(iio_dev_opaque->attached_buffers); 390 } 391 392 static ssize_t iio_show_scan_index(struct device *dev, 393 struct device_attribute *attr, 394 char *buf) 395 { 396 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); 397 } 398 399 static ssize_t iio_show_fixed_type(struct device *dev, 400 struct device_attribute *attr, 401 char *buf) 402 { 403 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 404 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 405 const struct iio_scan_type *scan_type; 406 u8 type; 407 408 scan_type = iio_get_current_scan_type(indio_dev, this_attr->c); 409 if (IS_ERR(scan_type)) 410 return PTR_ERR(scan_type); 411 412 type = scan_type->endianness; 413 414 if (type == IIO_CPU) { 415 #ifdef __LITTLE_ENDIAN 416 type = IIO_LE; 417 #else 418 type = IIO_BE; 419 #endif 420 } 421 if (scan_type->repeat > 1) 422 return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n", 423 iio_endian_prefix[type], 424 scan_type->sign, 425 scan_type->realbits, 426 scan_type->storagebits, 427 scan_type->repeat, 428 scan_type->shift); 429 else 430 return sysfs_emit(buf, "%s:%c%d/%d>>%u\n", 431 iio_endian_prefix[type], 432 scan_type->sign, 433 scan_type->realbits, 434 scan_type->storagebits, 435 scan_type->shift); 436 } 437 438 static ssize_t iio_scan_el_show(struct device *dev, 439 struct device_attribute *attr, 440 char *buf) 441 { 442 int ret; 443 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 444 445 /* Ensure ret is 0 or 1. */ 446 ret = !!test_bit(to_iio_dev_attr(attr)->address, 447 buffer->scan_mask); 448 449 return sysfs_emit(buf, "%d\n", ret); 450 } 451 452 /* Note NULL used as error indicator as it doesn't make sense. */ 453 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks, 454 unsigned int masklength, 455 const unsigned long *mask, 456 bool strict) 457 { 458 if (bitmap_empty(mask, masklength)) 459 return NULL; 460 /* 461 * The condition here do not handle multi-long masks correctly. 462 * It only checks the first long to be zero, and will use such mask 463 * as a terminator even if there was bits set after the first long. 464 * 465 * Correct check would require using: 466 * while (!bitmap_empty(av_masks, masklength)) 467 * instead. This is potentially hazardous because the 468 * avaliable_scan_masks is a zero terminated array of longs - and 469 * using the proper bitmap_empty() check for multi-long wide masks 470 * would require the array to be terminated with multiple zero longs - 471 * which is not such an usual pattern. 472 * 473 * As writing of this no multi-long wide masks were found in-tree, so 474 * the simple while (*av_masks) check is working. 475 */ 476 while (*av_masks) { 477 if (strict) { 478 if (bitmap_equal(mask, av_masks, masklength)) 479 return av_masks; 480 } else { 481 if (bitmap_subset(mask, av_masks, masklength)) 482 return av_masks; 483 } 484 av_masks += BITS_TO_LONGS(masklength); 485 } 486 return NULL; 487 } 488 489 static bool iio_validate_scan_mask(struct iio_dev *indio_dev, 490 const unsigned long *mask) 491 { 492 if (!indio_dev->setup_ops->validate_scan_mask) 493 return true; 494 495 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); 496 } 497 498 /** 499 * iio_scan_mask_set() - set particular bit in the scan mask 500 * @indio_dev: the iio device 501 * @buffer: the buffer whose scan mask we are interested in 502 * @bit: the bit to be set. 503 * 504 * Note that at this point we have no way of knowing what other 505 * buffers might request, hence this code only verifies that the 506 * individual buffers request is plausible. 507 */ 508 static int iio_scan_mask_set(struct iio_dev *indio_dev, 509 struct iio_buffer *buffer, int bit) 510 { 511 unsigned int masklength = iio_get_masklength(indio_dev); 512 const unsigned long *mask; 513 unsigned long *trialmask; 514 515 if (!masklength) { 516 WARN(1, "Trying to set scanmask prior to registering buffer\n"); 517 return -EINVAL; 518 } 519 520 trialmask = bitmap_alloc(masklength, GFP_KERNEL); 521 if (!trialmask) 522 return -ENOMEM; 523 bitmap_copy(trialmask, buffer->scan_mask, masklength); 524 set_bit(bit, trialmask); 525 526 if (!iio_validate_scan_mask(indio_dev, trialmask)) 527 goto err_invalid_mask; 528 529 if (indio_dev->available_scan_masks) { 530 mask = iio_scan_mask_match(indio_dev->available_scan_masks, 531 masklength, trialmask, false); 532 if (!mask) 533 goto err_invalid_mask; 534 } 535 bitmap_copy(buffer->scan_mask, trialmask, masklength); 536 537 bitmap_free(trialmask); 538 539 return 0; 540 541 err_invalid_mask: 542 bitmap_free(trialmask); 543 return -EINVAL; 544 } 545 546 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit) 547 { 548 clear_bit(bit, buffer->scan_mask); 549 return 0; 550 } 551 552 static int iio_scan_mask_query(struct iio_dev *indio_dev, 553 struct iio_buffer *buffer, int bit) 554 { 555 if (bit > iio_get_masklength(indio_dev)) 556 return -EINVAL; 557 558 if (!buffer->scan_mask) 559 return 0; 560 561 /* Ensure return value is 0 or 1. */ 562 return !!test_bit(bit, buffer->scan_mask); 563 }; 564 565 static ssize_t iio_scan_el_store(struct device *dev, 566 struct device_attribute *attr, 567 const char *buf, 568 size_t len) 569 { 570 int ret; 571 bool state; 572 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 573 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 574 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); 575 struct iio_buffer *buffer = this_attr->buffer; 576 577 ret = kstrtobool(buf, &state); 578 if (ret < 0) 579 return ret; 580 581 guard(mutex)(&iio_dev_opaque->mlock); 582 if (iio_buffer_is_active(buffer)) 583 return -EBUSY; 584 585 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); 586 if (ret < 0) 587 return ret; 588 589 if (state && ret) 590 return len; 591 592 if (state) 593 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); 594 else 595 ret = iio_scan_mask_clear(buffer, this_attr->address); 596 if (ret) 597 return ret; 598 599 return len; 600 } 601 602 static ssize_t iio_scan_el_ts_show(struct device *dev, 603 struct device_attribute *attr, 604 char *buf) 605 { 606 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 607 608 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp); 609 } 610 611 static ssize_t iio_scan_el_ts_store(struct device *dev, 612 struct device_attribute *attr, 613 const char *buf, 614 size_t len) 615 { 616 int ret; 617 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 618 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 619 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 620 bool state; 621 622 ret = kstrtobool(buf, &state); 623 if (ret < 0) 624 return ret; 625 626 guard(mutex)(&iio_dev_opaque->mlock); 627 if (iio_buffer_is_active(buffer)) 628 return -EBUSY; 629 630 buffer->scan_timestamp = state; 631 632 return len; 633 } 634 635 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev, 636 struct iio_buffer *buffer, 637 const struct iio_chan_spec *chan) 638 { 639 int ret, attrcount = 0; 640 641 ret = __iio_add_chan_devattr("index", 642 chan, 643 &iio_show_scan_index, 644 NULL, 645 0, 646 IIO_SEPARATE, 647 &indio_dev->dev, 648 buffer, 649 &buffer->buffer_attr_list); 650 if (ret) 651 return ret; 652 attrcount++; 653 ret = __iio_add_chan_devattr("type", 654 chan, 655 &iio_show_fixed_type, 656 NULL, 657 0, 658 IIO_SEPARATE, 659 &indio_dev->dev, 660 buffer, 661 &buffer->buffer_attr_list); 662 if (ret) 663 return ret; 664 attrcount++; 665 if (chan->type != IIO_TIMESTAMP) 666 ret = __iio_add_chan_devattr("en", 667 chan, 668 &iio_scan_el_show, 669 &iio_scan_el_store, 670 chan->scan_index, 671 IIO_SEPARATE, 672 &indio_dev->dev, 673 buffer, 674 &buffer->buffer_attr_list); 675 else 676 ret = __iio_add_chan_devattr("en", 677 chan, 678 &iio_scan_el_ts_show, 679 &iio_scan_el_ts_store, 680 chan->scan_index, 681 IIO_SEPARATE, 682 &indio_dev->dev, 683 buffer, 684 &buffer->buffer_attr_list); 685 if (ret) 686 return ret; 687 attrcount++; 688 ret = attrcount; 689 return ret; 690 } 691 692 static ssize_t length_show(struct device *dev, struct device_attribute *attr, 693 char *buf) 694 { 695 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 696 697 return sysfs_emit(buf, "%d\n", buffer->length); 698 } 699 700 static ssize_t length_store(struct device *dev, struct device_attribute *attr, 701 const char *buf, size_t len) 702 { 703 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 704 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 705 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 706 unsigned int val; 707 int ret; 708 709 ret = kstrtouint(buf, 10, &val); 710 if (ret) 711 return ret; 712 713 if (val == buffer->length) 714 return len; 715 716 guard(mutex)(&iio_dev_opaque->mlock); 717 if (iio_buffer_is_active(buffer)) 718 return -EBUSY; 719 720 buffer->access->set_length(buffer, val); 721 722 if (buffer->length && buffer->length < buffer->watermark) 723 buffer->watermark = buffer->length; 724 725 return len; 726 } 727 728 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, 729 char *buf) 730 { 731 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 732 733 return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer)); 734 } 735 736 static int iio_storage_bytes_for_si(struct iio_dev *indio_dev, 737 unsigned int scan_index) 738 { 739 const struct iio_chan_spec *ch; 740 const struct iio_scan_type *scan_type; 741 unsigned int bytes; 742 743 ch = iio_find_channel_from_si(indio_dev, scan_index); 744 scan_type = iio_get_current_scan_type(indio_dev, ch); 745 if (IS_ERR(scan_type)) 746 return PTR_ERR(scan_type); 747 748 bytes = scan_type->storagebits / 8; 749 750 if (scan_type->repeat > 1) 751 bytes *= scan_type->repeat; 752 753 return bytes; 754 } 755 756 static int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev) 757 { 758 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 759 760 return iio_storage_bytes_for_si(indio_dev, 761 iio_dev_opaque->scan_index_timestamp); 762 } 763 764 static int iio_compute_scan_bytes(struct iio_dev *indio_dev, 765 const unsigned long *mask, bool timestamp) 766 { 767 unsigned int bytes = 0; 768 int length, i, largest = 0; 769 770 /* How much space will the demuxed element take? */ 771 for_each_set_bit(i, mask, iio_get_masklength(indio_dev)) { 772 length = iio_storage_bytes_for_si(indio_dev, i); 773 if (length < 0) 774 return length; 775 776 bytes = ALIGN(bytes, length); 777 bytes += length; 778 largest = max(largest, length); 779 } 780 781 if (timestamp) { 782 length = iio_storage_bytes_for_timestamp(indio_dev); 783 if (length < 0) 784 return length; 785 786 bytes = ALIGN(bytes, length); 787 bytes += length; 788 largest = max(largest, length); 789 } 790 791 bytes = ALIGN(bytes, largest); 792 return bytes; 793 } 794 795 static void iio_buffer_activate(struct iio_dev *indio_dev, 796 struct iio_buffer *buffer) 797 { 798 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 799 800 iio_buffer_get(buffer); 801 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); 802 } 803 804 static void iio_buffer_deactivate(struct iio_buffer *buffer) 805 { 806 list_del_init(&buffer->buffer_list); 807 wake_up_interruptible(&buffer->pollq); 808 iio_buffer_put(buffer); 809 } 810 811 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev) 812 { 813 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 814 struct iio_buffer *buffer, *_buffer; 815 816 list_for_each_entry_safe(buffer, _buffer, 817 &iio_dev_opaque->buffer_list, buffer_list) 818 iio_buffer_deactivate(buffer); 819 } 820 821 static int iio_buffer_enable(struct iio_buffer *buffer, 822 struct iio_dev *indio_dev) 823 { 824 if (!buffer->access->enable) 825 return 0; 826 return buffer->access->enable(buffer, indio_dev); 827 } 828 829 static int iio_buffer_disable(struct iio_buffer *buffer, 830 struct iio_dev *indio_dev) 831 { 832 if (!buffer->access->disable) 833 return 0; 834 return buffer->access->disable(buffer, indio_dev); 835 } 836 837 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev, 838 struct iio_buffer *buffer) 839 { 840 unsigned int bytes; 841 842 if (!buffer->access->set_bytes_per_datum) 843 return; 844 845 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, 846 buffer->scan_timestamp); 847 848 buffer->access->set_bytes_per_datum(buffer, bytes); 849 } 850 851 static int iio_buffer_request_update(struct iio_dev *indio_dev, 852 struct iio_buffer *buffer) 853 { 854 int ret; 855 856 iio_buffer_update_bytes_per_datum(indio_dev, buffer); 857 if (buffer->access->request_update) { 858 ret = buffer->access->request_update(buffer); 859 if (ret) { 860 dev_dbg(&indio_dev->dev, 861 "Buffer not started: buffer parameter update failed (%d)\n", 862 ret); 863 return ret; 864 } 865 } 866 867 return 0; 868 } 869 870 static void iio_free_scan_mask(struct iio_dev *indio_dev, 871 const unsigned long *mask) 872 { 873 /* If the mask is dynamically allocated free it, otherwise do nothing */ 874 if (!indio_dev->available_scan_masks) 875 bitmap_free(mask); 876 } 877 878 struct iio_device_config { 879 unsigned int mode; 880 unsigned int watermark; 881 const unsigned long *scan_mask; 882 unsigned int scan_bytes; 883 bool scan_timestamp; 884 }; 885 886 static int iio_verify_update(struct iio_dev *indio_dev, 887 struct iio_buffer *insert_buffer, 888 struct iio_buffer *remove_buffer, 889 struct iio_device_config *config) 890 { 891 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 892 unsigned int masklength = iio_get_masklength(indio_dev); 893 unsigned long *compound_mask; 894 const unsigned long *scan_mask; 895 bool strict_scanmask = false; 896 struct iio_buffer *buffer; 897 bool scan_timestamp; 898 unsigned int modes; 899 900 if (insert_buffer && 901 bitmap_empty(insert_buffer->scan_mask, masklength)) { 902 dev_dbg(&indio_dev->dev, 903 "At least one scan element must be enabled first\n"); 904 return -EINVAL; 905 } 906 907 memset(config, 0, sizeof(*config)); 908 config->watermark = ~0; 909 910 /* 911 * If there is just one buffer and we are removing it there is nothing 912 * to verify. 913 */ 914 if (remove_buffer && !insert_buffer && 915 list_is_singular(&iio_dev_opaque->buffer_list)) 916 return 0; 917 918 modes = indio_dev->modes; 919 920 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 921 if (buffer == remove_buffer) 922 continue; 923 modes &= buffer->access->modes; 924 config->watermark = min(config->watermark, buffer->watermark); 925 } 926 927 if (insert_buffer) { 928 modes &= insert_buffer->access->modes; 929 config->watermark = min(config->watermark, 930 insert_buffer->watermark); 931 } 932 933 /* Definitely possible for devices to support both of these. */ 934 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { 935 config->mode = INDIO_BUFFER_TRIGGERED; 936 } else if (modes & INDIO_BUFFER_HARDWARE) { 937 /* 938 * Keep things simple for now and only allow a single buffer to 939 * be connected in hardware mode. 940 */ 941 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) 942 return -EINVAL; 943 config->mode = INDIO_BUFFER_HARDWARE; 944 strict_scanmask = true; 945 } else if (modes & INDIO_BUFFER_SOFTWARE) { 946 config->mode = INDIO_BUFFER_SOFTWARE; 947 } else { 948 /* Can only occur on first buffer */ 949 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 950 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); 951 return -EINVAL; 952 } 953 954 /* What scan mask do we actually have? */ 955 compound_mask = bitmap_zalloc(masklength, GFP_KERNEL); 956 if (!compound_mask) 957 return -ENOMEM; 958 959 scan_timestamp = false; 960 961 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 962 if (buffer == remove_buffer) 963 continue; 964 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, 965 masklength); 966 scan_timestamp |= buffer->scan_timestamp; 967 } 968 969 if (insert_buffer) { 970 bitmap_or(compound_mask, compound_mask, 971 insert_buffer->scan_mask, masklength); 972 scan_timestamp |= insert_buffer->scan_timestamp; 973 } 974 975 if (indio_dev->available_scan_masks) { 976 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, 977 masklength, compound_mask, 978 strict_scanmask); 979 bitmap_free(compound_mask); 980 if (!scan_mask) 981 return -EINVAL; 982 } else { 983 scan_mask = compound_mask; 984 } 985 986 config->scan_bytes = iio_compute_scan_bytes(indio_dev, 987 scan_mask, scan_timestamp); 988 config->scan_mask = scan_mask; 989 config->scan_timestamp = scan_timestamp; 990 991 return 0; 992 } 993 994 /** 995 * struct iio_demux_table - table describing demux memcpy ops 996 * @from: index to copy from 997 * @to: index to copy to 998 * @length: how many bytes to copy 999 * @l: list head used for management 1000 */ 1001 struct iio_demux_table { 1002 unsigned int from; 1003 unsigned int to; 1004 unsigned int length; 1005 struct list_head l; 1006 }; 1007 1008 static void iio_buffer_demux_free(struct iio_buffer *buffer) 1009 { 1010 struct iio_demux_table *p, *q; 1011 1012 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { 1013 list_del(&p->l); 1014 kfree(p); 1015 } 1016 } 1017 1018 static int iio_buffer_add_demux(struct iio_buffer *buffer, 1019 struct iio_demux_table **p, unsigned int in_loc, 1020 unsigned int out_loc, 1021 unsigned int length) 1022 { 1023 if (*p && (*p)->from + (*p)->length == in_loc && 1024 (*p)->to + (*p)->length == out_loc) { 1025 (*p)->length += length; 1026 } else { 1027 *p = kmalloc(sizeof(**p), GFP_KERNEL); 1028 if (!(*p)) 1029 return -ENOMEM; 1030 (*p)->from = in_loc; 1031 (*p)->to = out_loc; 1032 (*p)->length = length; 1033 list_add_tail(&(*p)->l, &buffer->demux_list); 1034 } 1035 1036 return 0; 1037 } 1038 1039 static int iio_buffer_update_demux(struct iio_dev *indio_dev, 1040 struct iio_buffer *buffer) 1041 { 1042 unsigned int masklength = iio_get_masklength(indio_dev); 1043 int ret, in_ind = -1, out_ind, length; 1044 unsigned int in_loc = 0, out_loc = 0; 1045 struct iio_demux_table *p = NULL; 1046 1047 /* Clear out any old demux */ 1048 iio_buffer_demux_free(buffer); 1049 kfree(buffer->demux_bounce); 1050 buffer->demux_bounce = NULL; 1051 1052 /* First work out which scan mode we will actually have */ 1053 if (bitmap_equal(indio_dev->active_scan_mask, 1054 buffer->scan_mask, masklength)) 1055 return 0; 1056 1057 /* Now we have the two masks, work from least sig and build up sizes */ 1058 for_each_set_bit(out_ind, buffer->scan_mask, masklength) { 1059 in_ind = find_next_bit(indio_dev->active_scan_mask, 1060 masklength, in_ind + 1); 1061 while (in_ind != out_ind) { 1062 ret = iio_storage_bytes_for_si(indio_dev, in_ind); 1063 if (ret < 0) 1064 goto error_clear_mux_table; 1065 1066 length = ret; 1067 /* Make sure we are aligned */ 1068 in_loc = roundup(in_loc, length) + length; 1069 in_ind = find_next_bit(indio_dev->active_scan_mask, 1070 masklength, in_ind + 1); 1071 } 1072 ret = iio_storage_bytes_for_si(indio_dev, in_ind); 1073 if (ret < 0) 1074 goto error_clear_mux_table; 1075 1076 length = ret; 1077 out_loc = roundup(out_loc, length); 1078 in_loc = roundup(in_loc, length); 1079 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 1080 if (ret) 1081 goto error_clear_mux_table; 1082 out_loc += length; 1083 in_loc += length; 1084 } 1085 /* Relies on scan_timestamp being last */ 1086 if (buffer->scan_timestamp) { 1087 ret = iio_storage_bytes_for_timestamp(indio_dev); 1088 if (ret < 0) 1089 goto error_clear_mux_table; 1090 1091 length = ret; 1092 out_loc = roundup(out_loc, length); 1093 in_loc = roundup(in_loc, length); 1094 ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length); 1095 if (ret) 1096 goto error_clear_mux_table; 1097 out_loc += length; 1098 } 1099 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); 1100 if (!buffer->demux_bounce) { 1101 ret = -ENOMEM; 1102 goto error_clear_mux_table; 1103 } 1104 return 0; 1105 1106 error_clear_mux_table: 1107 iio_buffer_demux_free(buffer); 1108 1109 return ret; 1110 } 1111 1112 static int iio_update_demux(struct iio_dev *indio_dev) 1113 { 1114 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1115 struct iio_buffer *buffer; 1116 int ret; 1117 1118 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 1119 ret = iio_buffer_update_demux(indio_dev, buffer); 1120 if (ret < 0) 1121 goto error_clear_mux_table; 1122 } 1123 return 0; 1124 1125 error_clear_mux_table: 1126 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) 1127 iio_buffer_demux_free(buffer); 1128 1129 return ret; 1130 } 1131 1132 static int iio_enable_buffers(struct iio_dev *indio_dev, 1133 struct iio_device_config *config) 1134 { 1135 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1136 struct iio_buffer *buffer, *tmp = NULL; 1137 int ret; 1138 1139 indio_dev->active_scan_mask = config->scan_mask; 1140 ACCESS_PRIVATE(indio_dev, scan_timestamp) = config->scan_timestamp; 1141 indio_dev->scan_bytes = config->scan_bytes; 1142 iio_dev_opaque->currentmode = config->mode; 1143 1144 iio_update_demux(indio_dev); 1145 1146 /* Wind up again */ 1147 if (indio_dev->setup_ops->preenable) { 1148 ret = indio_dev->setup_ops->preenable(indio_dev); 1149 if (ret) { 1150 dev_dbg(&indio_dev->dev, 1151 "Buffer not started: buffer preenable failed (%d)\n", ret); 1152 goto err_undo_config; 1153 } 1154 } 1155 1156 if (indio_dev->info->update_scan_mode) { 1157 ret = indio_dev->info 1158 ->update_scan_mode(indio_dev, 1159 indio_dev->active_scan_mask); 1160 if (ret < 0) { 1161 dev_dbg(&indio_dev->dev, 1162 "Buffer not started: update scan mode failed (%d)\n", 1163 ret); 1164 goto err_run_postdisable; 1165 } 1166 } 1167 1168 if (indio_dev->info->hwfifo_set_watermark) 1169 indio_dev->info->hwfifo_set_watermark(indio_dev, 1170 config->watermark); 1171 1172 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 1173 ret = iio_buffer_enable(buffer, indio_dev); 1174 if (ret) { 1175 tmp = buffer; 1176 goto err_disable_buffers; 1177 } 1178 } 1179 1180 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { 1181 ret = iio_trigger_attach_poll_func(indio_dev->trig, 1182 indio_dev->pollfunc); 1183 if (ret) 1184 goto err_disable_buffers; 1185 } 1186 1187 if (indio_dev->setup_ops->postenable) { 1188 ret = indio_dev->setup_ops->postenable(indio_dev); 1189 if (ret) { 1190 dev_dbg(&indio_dev->dev, 1191 "Buffer not started: postenable failed (%d)\n", ret); 1192 goto err_detach_pollfunc; 1193 } 1194 } 1195 1196 return 0; 1197 1198 err_detach_pollfunc: 1199 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { 1200 iio_trigger_detach_poll_func(indio_dev->trig, 1201 indio_dev->pollfunc); 1202 } 1203 err_disable_buffers: 1204 buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list); 1205 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, 1206 buffer_list) 1207 iio_buffer_disable(buffer, indio_dev); 1208 err_run_postdisable: 1209 if (indio_dev->setup_ops->postdisable) 1210 indio_dev->setup_ops->postdisable(indio_dev); 1211 err_undo_config: 1212 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; 1213 indio_dev->active_scan_mask = NULL; 1214 1215 return ret; 1216 } 1217 1218 static int iio_disable_buffers(struct iio_dev *indio_dev) 1219 { 1220 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1221 struct iio_buffer *buffer; 1222 int ret = 0; 1223 int ret2; 1224 1225 /* Wind down existing buffers - iff there are any */ 1226 if (list_empty(&iio_dev_opaque->buffer_list)) 1227 return 0; 1228 1229 /* 1230 * If things go wrong at some step in disable we still need to continue 1231 * to perform the other steps, otherwise we leave the device in a 1232 * inconsistent state. We return the error code for the first error we 1233 * encountered. 1234 */ 1235 1236 if (indio_dev->setup_ops->predisable) { 1237 ret2 = indio_dev->setup_ops->predisable(indio_dev); 1238 if (ret2 && !ret) 1239 ret = ret2; 1240 } 1241 1242 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { 1243 iio_trigger_detach_poll_func(indio_dev->trig, 1244 indio_dev->pollfunc); 1245 } 1246 1247 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { 1248 ret2 = iio_buffer_disable(buffer, indio_dev); 1249 if (ret2 && !ret) 1250 ret = ret2; 1251 } 1252 1253 if (indio_dev->setup_ops->postdisable) { 1254 ret2 = indio_dev->setup_ops->postdisable(indio_dev); 1255 if (ret2 && !ret) 1256 ret = ret2; 1257 } 1258 1259 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); 1260 indio_dev->active_scan_mask = NULL; 1261 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; 1262 1263 return ret; 1264 } 1265 1266 static int __iio_update_buffers(struct iio_dev *indio_dev, 1267 struct iio_buffer *insert_buffer, 1268 struct iio_buffer *remove_buffer) 1269 { 1270 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1271 struct iio_device_config new_config; 1272 int ret; 1273 1274 ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer, 1275 &new_config); 1276 if (ret) 1277 return ret; 1278 1279 if (insert_buffer) { 1280 ret = iio_buffer_request_update(indio_dev, insert_buffer); 1281 if (ret) 1282 goto err_free_config; 1283 } 1284 1285 ret = iio_disable_buffers(indio_dev); 1286 if (ret) 1287 goto err_deactivate_all; 1288 1289 if (remove_buffer) 1290 iio_buffer_deactivate(remove_buffer); 1291 if (insert_buffer) 1292 iio_buffer_activate(indio_dev, insert_buffer); 1293 1294 /* If no buffers in list, we are done */ 1295 if (list_empty(&iio_dev_opaque->buffer_list)) 1296 return 0; 1297 1298 ret = iio_enable_buffers(indio_dev, &new_config); 1299 if (ret) 1300 goto err_deactivate_all; 1301 1302 return 0; 1303 1304 err_deactivate_all: 1305 /* 1306 * We've already verified that the config is valid earlier. If things go 1307 * wrong in either enable or disable the most likely reason is an IO 1308 * error from the device. In this case there is no good recovery 1309 * strategy. Just make sure to disable everything and leave the device 1310 * in a sane state. With a bit of luck the device might come back to 1311 * life again later and userspace can try again. 1312 */ 1313 iio_buffer_deactivate_all(indio_dev); 1314 1315 err_free_config: 1316 iio_free_scan_mask(indio_dev, new_config.scan_mask); 1317 return ret; 1318 } 1319 1320 int iio_update_buffers(struct iio_dev *indio_dev, 1321 struct iio_buffer *insert_buffer, 1322 struct iio_buffer *remove_buffer) 1323 { 1324 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1325 1326 if (insert_buffer == remove_buffer) 1327 return 0; 1328 1329 if (insert_buffer && 1330 insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT) 1331 return -EINVAL; 1332 1333 guard(mutex)(&iio_dev_opaque->info_exist_lock); 1334 guard(mutex)(&iio_dev_opaque->mlock); 1335 1336 if (insert_buffer && iio_buffer_is_active(insert_buffer)) 1337 insert_buffer = NULL; 1338 1339 if (remove_buffer && !iio_buffer_is_active(remove_buffer)) 1340 remove_buffer = NULL; 1341 1342 if (!insert_buffer && !remove_buffer) 1343 return 0; 1344 1345 if (!indio_dev->info) 1346 return -ENODEV; 1347 1348 return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer); 1349 } 1350 EXPORT_SYMBOL_GPL(iio_update_buffers); 1351 1352 void iio_disable_all_buffers(struct iio_dev *indio_dev) 1353 { 1354 iio_disable_buffers(indio_dev); 1355 iio_buffer_deactivate_all(indio_dev); 1356 } 1357 1358 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, 1359 const char *buf, size_t len) 1360 { 1361 int ret; 1362 bool requested_state; 1363 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1364 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1365 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1366 bool inlist; 1367 1368 ret = kstrtobool(buf, &requested_state); 1369 if (ret < 0) 1370 return ret; 1371 1372 guard(mutex)(&iio_dev_opaque->mlock); 1373 1374 /* Find out if it is in the list */ 1375 inlist = iio_buffer_is_active(buffer); 1376 /* Already in desired state */ 1377 if (inlist == requested_state) 1378 return len; 1379 1380 if (requested_state) 1381 ret = __iio_update_buffers(indio_dev, buffer, NULL); 1382 else 1383 ret = __iio_update_buffers(indio_dev, NULL, buffer); 1384 if (ret) 1385 return ret; 1386 1387 return len; 1388 } 1389 1390 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr, 1391 char *buf) 1392 { 1393 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1394 1395 return sysfs_emit(buf, "%u\n", buffer->watermark); 1396 } 1397 1398 static ssize_t watermark_store(struct device *dev, 1399 struct device_attribute *attr, 1400 const char *buf, size_t len) 1401 { 1402 struct iio_dev *indio_dev = dev_to_iio_dev(dev); 1403 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1404 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1405 unsigned int val; 1406 int ret; 1407 1408 ret = kstrtouint(buf, 10, &val); 1409 if (ret) 1410 return ret; 1411 if (!val) 1412 return -EINVAL; 1413 1414 guard(mutex)(&iio_dev_opaque->mlock); 1415 1416 if (val > buffer->length) 1417 return -EINVAL; 1418 1419 if (iio_buffer_is_active(buffer)) 1420 return -EBUSY; 1421 1422 buffer->watermark = val; 1423 1424 return len; 1425 } 1426 1427 static ssize_t data_available_show(struct device *dev, 1428 struct device_attribute *attr, char *buf) 1429 { 1430 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1431 1432 return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer)); 1433 } 1434 1435 static ssize_t direction_show(struct device *dev, 1436 struct device_attribute *attr, 1437 char *buf) 1438 { 1439 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; 1440 1441 switch (buffer->direction) { 1442 case IIO_BUFFER_DIRECTION_IN: 1443 return sysfs_emit(buf, "in\n"); 1444 case IIO_BUFFER_DIRECTION_OUT: 1445 return sysfs_emit(buf, "out\n"); 1446 default: 1447 return -EINVAL; 1448 } 1449 } 1450 1451 static DEVICE_ATTR_RW(length); 1452 static struct device_attribute dev_attr_length_ro = __ATTR_RO(length); 1453 static DEVICE_ATTR_RW(enable); 1454 static DEVICE_ATTR_RW(watermark); 1455 static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark); 1456 static DEVICE_ATTR_RO(data_available); 1457 static DEVICE_ATTR_RO(direction); 1458 1459 /* 1460 * When adding new attributes here, put the at the end, at least until 1461 * the code that handles the length/length_ro & watermark/watermark_ro 1462 * assignments gets cleaned up. Otherwise these can create some weird 1463 * duplicate attributes errors under some setups. 1464 */ 1465 static struct attribute *iio_buffer_attrs[] = { 1466 &dev_attr_length.attr, 1467 &dev_attr_enable.attr, 1468 &dev_attr_watermark.attr, 1469 &dev_attr_data_available.attr, 1470 &dev_attr_direction.attr, 1471 }; 1472 1473 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr) 1474 1475 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer, 1476 struct attribute *attr) 1477 { 1478 struct device_attribute *dattr = to_dev_attr(attr); 1479 struct iio_dev_attr *iio_attr; 1480 1481 iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL); 1482 if (!iio_attr) 1483 return NULL; 1484 1485 iio_attr->buffer = buffer; 1486 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr)); 1487 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL); 1488 if (!iio_attr->dev_attr.attr.name) { 1489 kfree(iio_attr); 1490 return NULL; 1491 } 1492 1493 sysfs_attr_init(&iio_attr->dev_attr.attr); 1494 1495 list_add(&iio_attr->l, &buffer->buffer_attr_list); 1496 1497 return &iio_attr->dev_attr.attr; 1498 } 1499 1500 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev, 1501 struct attribute **buffer_attrs, 1502 int buffer_attrcount, 1503 int scan_el_attrcount) 1504 { 1505 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1506 struct attribute_group *group; 1507 struct attribute **attrs; 1508 int ret; 1509 1510 attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL); 1511 if (!attrs) 1512 return -ENOMEM; 1513 1514 memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs)); 1515 1516 group = &iio_dev_opaque->legacy_buffer_group; 1517 group->attrs = attrs; 1518 group->name = "buffer"; 1519 1520 ret = iio_device_register_sysfs_group(indio_dev, group); 1521 if (ret) 1522 goto error_free_buffer_attrs; 1523 1524 attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL); 1525 if (!attrs) { 1526 ret = -ENOMEM; 1527 goto error_free_buffer_attrs; 1528 } 1529 1530 memcpy(attrs, &buffer_attrs[buffer_attrcount], 1531 scan_el_attrcount * sizeof(*attrs)); 1532 1533 group = &iio_dev_opaque->legacy_scan_el_group; 1534 group->attrs = attrs; 1535 group->name = "scan_elements"; 1536 1537 ret = iio_device_register_sysfs_group(indio_dev, group); 1538 if (ret) 1539 goto error_free_scan_el_attrs; 1540 1541 return 0; 1542 1543 error_free_scan_el_attrs: 1544 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); 1545 error_free_buffer_attrs: 1546 kfree(iio_dev_opaque->legacy_buffer_group.attrs); 1547 1548 return ret; 1549 } 1550 1551 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev) 1552 { 1553 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 1554 1555 kfree(iio_dev_opaque->legacy_buffer_group.attrs); 1556 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); 1557 } 1558 1559 static void iio_buffer_dmabuf_release(struct kref *ref) 1560 { 1561 struct iio_dmabuf_priv *priv = container_of(ref, struct iio_dmabuf_priv, ref); 1562 struct dma_buf_attachment *attach = priv->attach; 1563 struct iio_buffer *buffer = priv->buffer; 1564 struct dma_buf *dmabuf = attach->dmabuf; 1565 1566 dma_resv_lock(dmabuf->resv, NULL); 1567 dma_buf_unmap_attachment(attach, priv->sgt, priv->dir); 1568 dma_resv_unlock(dmabuf->resv); 1569 1570 buffer->access->detach_dmabuf(buffer, priv->block); 1571 1572 dma_buf_detach(attach->dmabuf, attach); 1573 dma_buf_put(dmabuf); 1574 kfree(priv); 1575 } 1576 1577 static void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach) 1578 { 1579 struct iio_dmabuf_priv *priv = attach->importer_priv; 1580 1581 kref_get(&priv->ref); 1582 } 1583 1584 static void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach) 1585 { 1586 struct iio_dmabuf_priv *priv = attach->importer_priv; 1587 1588 kref_put(&priv->ref, iio_buffer_dmabuf_release); 1589 } 1590 1591 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep) 1592 { 1593 struct iio_dev_buffer_pair *ib = filep->private_data; 1594 struct iio_dev *indio_dev = ib->indio_dev; 1595 struct iio_buffer *buffer = ib->buffer; 1596 struct iio_dmabuf_priv *priv, *tmp; 1597 1598 wake_up(&buffer->pollq); 1599 1600 guard(mutex)(&buffer->dmabufs_mutex); 1601 1602 /* Close all attached DMABUFs */ 1603 list_for_each_entry_safe(priv, tmp, &buffer->dmabufs, entry) { 1604 list_del_init(&priv->entry); 1605 iio_buffer_dmabuf_put(priv->attach); 1606 } 1607 1608 kfree(ib); 1609 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); 1610 iio_device_put(indio_dev); 1611 1612 return 0; 1613 } 1614 1615 static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock) 1616 { 1617 if (!nonblock) 1618 return dma_resv_lock_interruptible(dmabuf->resv, NULL); 1619 1620 if (!dma_resv_trylock(dmabuf->resv)) 1621 return -EBUSY; 1622 1623 return 0; 1624 } 1625 1626 static struct device *iio_buffer_get_dma_dev(const struct iio_dev *indio_dev, 1627 struct iio_buffer *buffer) 1628 { 1629 if (buffer->access->get_dma_dev) 1630 return buffer->access->get_dma_dev(buffer); 1631 1632 return indio_dev->dev.parent; 1633 } 1634 1635 static struct dma_buf_attachment * 1636 iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib, 1637 struct dma_buf *dmabuf, bool nonblock) 1638 { 1639 struct iio_buffer *buffer = ib->buffer; 1640 struct device *dma_dev = iio_buffer_get_dma_dev(ib->indio_dev, buffer); 1641 struct dma_buf_attachment *attach = NULL; 1642 struct iio_dmabuf_priv *priv; 1643 1644 guard(mutex)(&buffer->dmabufs_mutex); 1645 1646 list_for_each_entry(priv, &buffer->dmabufs, entry) { 1647 if (priv->attach->dev == dma_dev 1648 && priv->attach->dmabuf == dmabuf) { 1649 attach = priv->attach; 1650 break; 1651 } 1652 } 1653 1654 if (attach) 1655 iio_buffer_dmabuf_get(attach); 1656 1657 return attach ?: ERR_PTR(-EPERM); 1658 } 1659 1660 static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib, 1661 int __user *user_fd, bool nonblock) 1662 { 1663 struct iio_dev *indio_dev = ib->indio_dev; 1664 struct iio_buffer *buffer = ib->buffer; 1665 struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer); 1666 struct dma_buf_attachment *attach; 1667 struct iio_dmabuf_priv *priv, *each; 1668 struct dma_buf *dmabuf; 1669 int err, fd; 1670 1671 if (!buffer->access->attach_dmabuf 1672 || !buffer->access->detach_dmabuf 1673 || !buffer->access->enqueue_dmabuf) 1674 return -EPERM; 1675 1676 if (copy_from_user(&fd, user_fd, sizeof(fd))) 1677 return -EFAULT; 1678 1679 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 1680 if (!priv) 1681 return -ENOMEM; 1682 1683 spin_lock_init(&priv->lock); 1684 priv->context = dma_fence_context_alloc(1); 1685 1686 dmabuf = dma_buf_get(fd); 1687 if (IS_ERR(dmabuf)) { 1688 err = PTR_ERR(dmabuf); 1689 goto err_free_priv; 1690 } 1691 1692 attach = dma_buf_attach(dmabuf, dma_dev); 1693 if (IS_ERR(attach)) { 1694 err = PTR_ERR(attach); 1695 goto err_dmabuf_put; 1696 } 1697 1698 err = iio_dma_resv_lock(dmabuf, nonblock); 1699 if (err) 1700 goto err_dmabuf_detach; 1701 1702 priv->dir = buffer->direction == IIO_BUFFER_DIRECTION_IN 1703 ? DMA_FROM_DEVICE : DMA_TO_DEVICE; 1704 1705 priv->sgt = dma_buf_map_attachment(attach, priv->dir); 1706 if (IS_ERR(priv->sgt)) { 1707 err = PTR_ERR(priv->sgt); 1708 dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", err); 1709 goto err_resv_unlock; 1710 } 1711 1712 kref_init(&priv->ref); 1713 priv->buffer = buffer; 1714 priv->attach = attach; 1715 attach->importer_priv = priv; 1716 1717 priv->block = buffer->access->attach_dmabuf(buffer, attach); 1718 if (IS_ERR(priv->block)) { 1719 err = PTR_ERR(priv->block); 1720 goto err_dmabuf_unmap_attachment; 1721 } 1722 1723 dma_resv_unlock(dmabuf->resv); 1724 1725 mutex_lock(&buffer->dmabufs_mutex); 1726 1727 /* 1728 * Check whether we already have an attachment for this driver/DMABUF 1729 * combo. If we do, refuse to attach. 1730 */ 1731 list_for_each_entry(each, &buffer->dmabufs, entry) { 1732 if (each->attach->dev == dma_dev 1733 && each->attach->dmabuf == dmabuf) { 1734 /* 1735 * We unlocked the reservation object, so going through 1736 * the cleanup code would mean re-locking it first. 1737 * At this stage it is simpler to free the attachment 1738 * using iio_buffer_dma_put(). 1739 */ 1740 mutex_unlock(&buffer->dmabufs_mutex); 1741 iio_buffer_dmabuf_put(attach); 1742 return -EBUSY; 1743 } 1744 } 1745 1746 /* Otherwise, add the new attachment to our dmabufs list. */ 1747 list_add(&priv->entry, &buffer->dmabufs); 1748 mutex_unlock(&buffer->dmabufs_mutex); 1749 1750 return 0; 1751 1752 err_dmabuf_unmap_attachment: 1753 dma_buf_unmap_attachment(attach, priv->sgt, priv->dir); 1754 err_resv_unlock: 1755 dma_resv_unlock(dmabuf->resv); 1756 err_dmabuf_detach: 1757 dma_buf_detach(dmabuf, attach); 1758 err_dmabuf_put: 1759 dma_buf_put(dmabuf); 1760 err_free_priv: 1761 kfree(priv); 1762 1763 return err; 1764 } 1765 1766 static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib, 1767 int __user *user_req, bool nonblock) 1768 { 1769 struct iio_buffer *buffer = ib->buffer; 1770 struct iio_dev *indio_dev = ib->indio_dev; 1771 struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer); 1772 struct iio_dmabuf_priv *priv; 1773 struct dma_buf *dmabuf; 1774 int dmabuf_fd, ret = -EPERM; 1775 1776 if (copy_from_user(&dmabuf_fd, user_req, sizeof(dmabuf_fd))) 1777 return -EFAULT; 1778 1779 dmabuf = dma_buf_get(dmabuf_fd); 1780 if (IS_ERR(dmabuf)) 1781 return PTR_ERR(dmabuf); 1782 1783 guard(mutex)(&buffer->dmabufs_mutex); 1784 1785 list_for_each_entry(priv, &buffer->dmabufs, entry) { 1786 if (priv->attach->dev == dma_dev 1787 && priv->attach->dmabuf == dmabuf) { 1788 list_del(&priv->entry); 1789 1790 /* Unref the reference from iio_buffer_attach_dmabuf() */ 1791 iio_buffer_dmabuf_put(priv->attach); 1792 ret = 0; 1793 break; 1794 } 1795 } 1796 1797 dma_buf_put(dmabuf); 1798 1799 return ret; 1800 } 1801 1802 static const char * 1803 iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence) 1804 { 1805 return "iio"; 1806 } 1807 1808 static void iio_buffer_dma_fence_release(struct dma_fence *fence) 1809 { 1810 struct iio_dma_fence *iio_fence = 1811 container_of(fence, struct iio_dma_fence, base); 1812 1813 kfree(iio_fence); 1814 } 1815 1816 static const struct dma_fence_ops iio_buffer_dma_fence_ops = { 1817 .get_driver_name = iio_buffer_dma_fence_get_driver_name, 1818 .get_timeline_name = iio_buffer_dma_fence_get_driver_name, 1819 .release = iio_buffer_dma_fence_release, 1820 }; 1821 1822 static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib, 1823 struct iio_dmabuf __user *iio_dmabuf_req, 1824 bool nonblock) 1825 { 1826 struct iio_buffer *buffer = ib->buffer; 1827 struct iio_dmabuf iio_dmabuf; 1828 struct dma_buf_attachment *attach; 1829 struct iio_dmabuf_priv *priv; 1830 struct iio_dma_fence *fence; 1831 struct dma_buf *dmabuf; 1832 unsigned long timeout; 1833 bool cookie, cyclic, dma_to_ram; 1834 long retl; 1835 u32 seqno; 1836 int ret; 1837 1838 if (copy_from_user(&iio_dmabuf, iio_dmabuf_req, sizeof(iio_dmabuf))) 1839 return -EFAULT; 1840 1841 if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS) 1842 return -EINVAL; 1843 1844 cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC; 1845 1846 /* Cyclic flag is only supported on output buffers */ 1847 if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT) 1848 return -EINVAL; 1849 1850 dmabuf = dma_buf_get(iio_dmabuf.fd); 1851 if (IS_ERR(dmabuf)) 1852 return PTR_ERR(dmabuf); 1853 1854 if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) { 1855 ret = -EINVAL; 1856 goto err_dmabuf_put; 1857 } 1858 1859 attach = iio_buffer_find_attachment(ib, dmabuf, nonblock); 1860 if (IS_ERR(attach)) { 1861 ret = PTR_ERR(attach); 1862 goto err_dmabuf_put; 1863 } 1864 1865 priv = attach->importer_priv; 1866 1867 fence = kmalloc(sizeof(*fence), GFP_KERNEL); 1868 if (!fence) { 1869 ret = -ENOMEM; 1870 goto err_attachment_put; 1871 } 1872 1873 fence->priv = priv; 1874 1875 seqno = atomic_add_return(1, &priv->seqno); 1876 1877 /* 1878 * The transfers are guaranteed to be processed in the order they are 1879 * enqueued, so we can use a simple incrementing sequence number for 1880 * the dma_fence. 1881 */ 1882 dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops, 1883 &priv->lock, priv->context, seqno); 1884 1885 ret = iio_dma_resv_lock(dmabuf, nonblock); 1886 if (ret) 1887 goto err_fence_put; 1888 1889 timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS); 1890 dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN; 1891 1892 /* Make sure we don't have writers */ 1893 retl = dma_resv_wait_timeout(dmabuf->resv, 1894 dma_resv_usage_rw(dma_to_ram), 1895 true, timeout); 1896 if (retl == 0) 1897 retl = -EBUSY; 1898 if (retl < 0) { 1899 ret = (int)retl; 1900 goto err_resv_unlock; 1901 } 1902 1903 if (buffer->access->lock_queue) 1904 buffer->access->lock_queue(buffer); 1905 1906 ret = dma_resv_reserve_fences(dmabuf->resv, 1); 1907 if (ret) 1908 goto err_queue_unlock; 1909 1910 dma_resv_add_fence(dmabuf->resv, &fence->base, 1911 dma_to_ram ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ); 1912 dma_resv_unlock(dmabuf->resv); 1913 1914 cookie = dma_fence_begin_signalling(); 1915 1916 ret = buffer->access->enqueue_dmabuf(buffer, priv->block, &fence->base, 1917 priv->sgt, iio_dmabuf.bytes_used, 1918 cyclic); 1919 if (ret) { 1920 /* 1921 * DMABUF enqueue failed, but we already added the fence. 1922 * Signal the error through the fence completion mechanism. 1923 */ 1924 iio_buffer_signal_dmabuf_done(&fence->base, ret); 1925 } 1926 1927 if (buffer->access->unlock_queue) 1928 buffer->access->unlock_queue(buffer); 1929 1930 dma_fence_end_signalling(cookie); 1931 dma_buf_put(dmabuf); 1932 1933 return ret; 1934 1935 err_queue_unlock: 1936 if (buffer->access->unlock_queue) 1937 buffer->access->unlock_queue(buffer); 1938 err_resv_unlock: 1939 dma_resv_unlock(dmabuf->resv); 1940 err_fence_put: 1941 dma_fence_put(&fence->base); 1942 err_attachment_put: 1943 iio_buffer_dmabuf_put(attach); 1944 err_dmabuf_put: 1945 dma_buf_put(dmabuf); 1946 1947 return ret; 1948 } 1949 1950 static void iio_buffer_cleanup(struct work_struct *work) 1951 { 1952 struct iio_dma_fence *fence = 1953 container_of(work, struct iio_dma_fence, work); 1954 struct iio_dmabuf_priv *priv = fence->priv; 1955 struct dma_buf_attachment *attach = priv->attach; 1956 1957 dma_fence_put(&fence->base); 1958 iio_buffer_dmabuf_put(attach); 1959 } 1960 1961 void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret) 1962 { 1963 struct iio_dma_fence *iio_fence = 1964 container_of(fence, struct iio_dma_fence, base); 1965 bool cookie = dma_fence_begin_signalling(); 1966 1967 /* 1968 * Get a reference to the fence, so that it's not freed as soon as 1969 * it's signaled. 1970 */ 1971 dma_fence_get(fence); 1972 1973 fence->error = ret; 1974 dma_fence_signal(fence); 1975 dma_fence_end_signalling(cookie); 1976 1977 /* 1978 * The fence will be unref'd in iio_buffer_cleanup. 1979 * It can't be done here, as the unref functions might try to lock the 1980 * resv object, which can deadlock. 1981 */ 1982 INIT_WORK(&iio_fence->work, iio_buffer_cleanup); 1983 schedule_work(&iio_fence->work); 1984 } 1985 EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done); 1986 1987 static long iio_buffer_chrdev_ioctl(struct file *filp, 1988 unsigned int cmd, unsigned long arg) 1989 { 1990 struct iio_dev_buffer_pair *ib = filp->private_data; 1991 void __user *_arg = (void __user *)arg; 1992 bool nonblock = filp->f_flags & O_NONBLOCK; 1993 1994 switch (cmd) { 1995 case IIO_BUFFER_DMABUF_ATTACH_IOCTL: 1996 return iio_buffer_attach_dmabuf(ib, _arg, nonblock); 1997 case IIO_BUFFER_DMABUF_DETACH_IOCTL: 1998 return iio_buffer_detach_dmabuf(ib, _arg, nonblock); 1999 case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL: 2000 return iio_buffer_enqueue_dmabuf(ib, _arg, nonblock); 2001 default: 2002 return -EINVAL; 2003 } 2004 } 2005 2006 static const struct file_operations iio_buffer_chrdev_fileops = { 2007 .owner = THIS_MODULE, 2008 .llseek = noop_llseek, 2009 .read = iio_buffer_read, 2010 .write = iio_buffer_write, 2011 .unlocked_ioctl = iio_buffer_chrdev_ioctl, 2012 .compat_ioctl = compat_ptr_ioctl, 2013 .poll = iio_buffer_poll, 2014 .release = iio_buffer_chrdev_release, 2015 }; 2016 2017 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg) 2018 { 2019 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2020 int __user *ival = (int __user *)arg; 2021 struct iio_dev_buffer_pair *ib; 2022 struct iio_buffer *buffer; 2023 int fd, idx, ret; 2024 2025 if (copy_from_user(&idx, ival, sizeof(idx))) 2026 return -EFAULT; 2027 2028 if (idx >= iio_dev_opaque->attached_buffers_cnt) 2029 return -ENODEV; 2030 2031 iio_device_get(indio_dev); 2032 2033 buffer = iio_dev_opaque->attached_buffers[idx]; 2034 2035 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) { 2036 ret = -EBUSY; 2037 goto error_iio_dev_put; 2038 } 2039 2040 ib = kzalloc(sizeof(*ib), GFP_KERNEL); 2041 if (!ib) { 2042 ret = -ENOMEM; 2043 goto error_clear_busy_bit; 2044 } 2045 2046 ib->indio_dev = indio_dev; 2047 ib->buffer = buffer; 2048 2049 fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops, 2050 ib, O_RDWR | O_CLOEXEC); 2051 if (fd < 0) { 2052 ret = fd; 2053 goto error_free_ib; 2054 } 2055 2056 if (copy_to_user(ival, &fd, sizeof(fd))) { 2057 /* 2058 * "Leak" the fd, as there's not much we can do about this 2059 * anyway. 'fd' might have been closed already, as 2060 * anon_inode_getfd() called fd_install() on it, which made 2061 * it reachable by userland. 2062 * 2063 * Instead of allowing a malicious user to play tricks with 2064 * us, rely on the process exit path to do any necessary 2065 * cleanup, as in releasing the file, if still needed. 2066 */ 2067 return -EFAULT; 2068 } 2069 2070 return 0; 2071 2072 error_free_ib: 2073 kfree(ib); 2074 error_clear_busy_bit: 2075 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); 2076 error_iio_dev_put: 2077 iio_device_put(indio_dev); 2078 return ret; 2079 } 2080 2081 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp, 2082 unsigned int cmd, unsigned long arg) 2083 { 2084 switch (cmd) { 2085 case IIO_BUFFER_GET_FD_IOCTL: 2086 return iio_device_buffer_getfd(indio_dev, arg); 2087 default: 2088 return IIO_IOCTL_UNHANDLED; 2089 } 2090 } 2091 2092 static int iio_channel_validate_scan_type(struct device *dev, int ch, 2093 const struct iio_scan_type *scan_type) 2094 { 2095 /* Verify that sample bits fit into storage */ 2096 if (scan_type->storagebits < scan_type->realbits + scan_type->shift) { 2097 dev_err(dev, 2098 "Channel %d storagebits (%d) < shifted realbits (%d + %d)\n", 2099 ch, scan_type->storagebits, 2100 scan_type->realbits, 2101 scan_type->shift); 2102 return -EINVAL; 2103 } 2104 2105 return 0; 2106 } 2107 2108 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer, 2109 struct iio_dev *indio_dev, 2110 int index) 2111 { 2112 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2113 unsigned int masklength = iio_get_masklength(indio_dev); 2114 struct iio_dev_attr *p; 2115 const struct iio_dev_attr *id_attr; 2116 struct attribute **attr; 2117 int ret, i, attrn, scan_el_attrcount, buffer_attrcount; 2118 const struct iio_chan_spec *channels; 2119 2120 buffer_attrcount = 0; 2121 if (buffer->attrs) { 2122 while (buffer->attrs[buffer_attrcount]) 2123 buffer_attrcount++; 2124 } 2125 buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs); 2126 2127 scan_el_attrcount = 0; 2128 INIT_LIST_HEAD(&buffer->buffer_attr_list); 2129 channels = indio_dev->channels; 2130 if (channels) { 2131 /* new magic */ 2132 for (i = 0; i < indio_dev->num_channels; i++) { 2133 const struct iio_scan_type *scan_type; 2134 2135 if (channels[i].scan_index < 0) 2136 continue; 2137 2138 if (channels[i].has_ext_scan_type) { 2139 int j; 2140 2141 /* 2142 * get_current_scan_type is required when using 2143 * extended scan types. 2144 */ 2145 if (!indio_dev->info->get_current_scan_type) { 2146 ret = -EINVAL; 2147 goto error_cleanup_dynamic; 2148 } 2149 2150 for (j = 0; j < channels[i].num_ext_scan_type; j++) { 2151 scan_type = &channels[i].ext_scan_type[j]; 2152 2153 ret = iio_channel_validate_scan_type( 2154 &indio_dev->dev, i, scan_type); 2155 if (ret) 2156 goto error_cleanup_dynamic; 2157 } 2158 } else { 2159 scan_type = &channels[i].scan_type; 2160 2161 ret = iio_channel_validate_scan_type( 2162 &indio_dev->dev, i, scan_type); 2163 if (ret) 2164 goto error_cleanup_dynamic; 2165 } 2166 2167 ret = iio_buffer_add_channel_sysfs(indio_dev, buffer, 2168 &channels[i]); 2169 if (ret < 0) 2170 goto error_cleanup_dynamic; 2171 scan_el_attrcount += ret; 2172 if (channels[i].type == IIO_TIMESTAMP) 2173 iio_dev_opaque->scan_index_timestamp = 2174 channels[i].scan_index; 2175 } 2176 if (masklength && !buffer->scan_mask) { 2177 buffer->scan_mask = bitmap_zalloc(masklength, 2178 GFP_KERNEL); 2179 if (!buffer->scan_mask) { 2180 ret = -ENOMEM; 2181 goto error_cleanup_dynamic; 2182 } 2183 } 2184 } 2185 2186 attrn = buffer_attrcount + scan_el_attrcount; 2187 attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL); 2188 if (!attr) { 2189 ret = -ENOMEM; 2190 goto error_free_scan_mask; 2191 } 2192 2193 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); 2194 if (!buffer->access->set_length) 2195 attr[0] = &dev_attr_length_ro.attr; 2196 2197 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) 2198 attr[2] = &dev_attr_watermark_ro.attr; 2199 2200 if (buffer->attrs) 2201 for (i = 0, id_attr = buffer->attrs[i]; 2202 (id_attr = buffer->attrs[i]); i++) 2203 attr[ARRAY_SIZE(iio_buffer_attrs) + i] = 2204 (struct attribute *)&id_attr->dev_attr.attr; 2205 2206 buffer->buffer_group.attrs = attr; 2207 2208 for (i = 0; i < buffer_attrcount; i++) { 2209 struct attribute *wrapped; 2210 2211 wrapped = iio_buffer_wrap_attr(buffer, attr[i]); 2212 if (!wrapped) { 2213 ret = -ENOMEM; 2214 goto error_free_buffer_attrs; 2215 } 2216 attr[i] = wrapped; 2217 } 2218 2219 attrn = 0; 2220 list_for_each_entry(p, &buffer->buffer_attr_list, l) 2221 attr[attrn++] = &p->dev_attr.attr; 2222 2223 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index); 2224 if (!buffer->buffer_group.name) { 2225 ret = -ENOMEM; 2226 goto error_free_buffer_attrs; 2227 } 2228 2229 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group); 2230 if (ret) 2231 goto error_free_buffer_attr_group_name; 2232 2233 /* we only need to register the legacy groups for the first buffer */ 2234 if (index > 0) 2235 return 0; 2236 2237 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr, 2238 buffer_attrcount, 2239 scan_el_attrcount); 2240 if (ret) 2241 goto error_free_buffer_attr_group_name; 2242 2243 return 0; 2244 2245 error_free_buffer_attr_group_name: 2246 kfree(buffer->buffer_group.name); 2247 error_free_buffer_attrs: 2248 kfree(buffer->buffer_group.attrs); 2249 error_free_scan_mask: 2250 bitmap_free(buffer->scan_mask); 2251 error_cleanup_dynamic: 2252 iio_free_chan_devattr_list(&buffer->buffer_attr_list); 2253 2254 return ret; 2255 } 2256 2257 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer, 2258 struct iio_dev *indio_dev, 2259 int index) 2260 { 2261 if (index == 0) 2262 iio_buffer_unregister_legacy_sysfs_groups(indio_dev); 2263 bitmap_free(buffer->scan_mask); 2264 kfree(buffer->buffer_group.name); 2265 kfree(buffer->buffer_group.attrs); 2266 iio_free_chan_devattr_list(&buffer->buffer_attr_list); 2267 } 2268 2269 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev) 2270 { 2271 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2272 const struct iio_chan_spec *channels; 2273 struct iio_buffer *buffer; 2274 int ret, i, idx; 2275 size_t sz; 2276 2277 channels = indio_dev->channels; 2278 if (channels) { 2279 int ml = 0; 2280 2281 for (i = 0; i < indio_dev->num_channels; i++) 2282 ml = max(ml, channels[i].scan_index + 1); 2283 ACCESS_PRIVATE(indio_dev, masklength) = ml; 2284 } 2285 2286 if (!iio_dev_opaque->attached_buffers_cnt) 2287 return 0; 2288 2289 for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) { 2290 buffer = iio_dev_opaque->attached_buffers[idx]; 2291 ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx); 2292 if (ret) 2293 goto error_unwind_sysfs_and_mask; 2294 } 2295 2296 sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler); 2297 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL); 2298 if (!iio_dev_opaque->buffer_ioctl_handler) { 2299 ret = -ENOMEM; 2300 goto error_unwind_sysfs_and_mask; 2301 } 2302 2303 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl; 2304 iio_device_ioctl_handler_register(indio_dev, 2305 iio_dev_opaque->buffer_ioctl_handler); 2306 2307 return 0; 2308 2309 error_unwind_sysfs_and_mask: 2310 while (idx--) { 2311 buffer = iio_dev_opaque->attached_buffers[idx]; 2312 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx); 2313 } 2314 return ret; 2315 } 2316 2317 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev) 2318 { 2319 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2320 struct iio_buffer *buffer; 2321 int i; 2322 2323 if (!iio_dev_opaque->attached_buffers_cnt) 2324 return; 2325 2326 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler); 2327 kfree(iio_dev_opaque->buffer_ioctl_handler); 2328 2329 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) { 2330 buffer = iio_dev_opaque->attached_buffers[i]; 2331 __iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i); 2332 } 2333 } 2334 2335 /** 2336 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected 2337 * @indio_dev: the iio device 2338 * @mask: scan mask to be checked 2339 * 2340 * Return true if exactly one bit is set in the scan mask, false otherwise. It 2341 * can be used for devices where only one channel can be active for sampling at 2342 * a time. 2343 */ 2344 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev, 2345 const unsigned long *mask) 2346 { 2347 return bitmap_weight(mask, iio_get_masklength(indio_dev)) == 1; 2348 } 2349 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot); 2350 2351 static const void *iio_demux(struct iio_buffer *buffer, 2352 const void *datain) 2353 { 2354 struct iio_demux_table *t; 2355 2356 if (list_empty(&buffer->demux_list)) 2357 return datain; 2358 list_for_each_entry(t, &buffer->demux_list, l) 2359 memcpy(buffer->demux_bounce + t->to, 2360 datain + t->from, t->length); 2361 2362 return buffer->demux_bounce; 2363 } 2364 2365 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data) 2366 { 2367 const void *dataout = iio_demux(buffer, data); 2368 int ret; 2369 2370 ret = buffer->access->store_to(buffer, dataout); 2371 if (ret) 2372 return ret; 2373 2374 /* 2375 * We can't just test for watermark to decide if we wake the poll queue 2376 * because read may request less samples than the watermark. 2377 */ 2378 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); 2379 return 0; 2380 } 2381 2382 /** 2383 * iio_push_to_buffers() - push to a registered buffer. 2384 * @indio_dev: iio_dev structure for device. 2385 * @data: Full scan. 2386 */ 2387 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data) 2388 { 2389 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2390 int ret; 2391 struct iio_buffer *buf; 2392 2393 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) { 2394 ret = iio_push_to_buffer(buf, data); 2395 if (ret < 0) 2396 return ret; 2397 } 2398 2399 return 0; 2400 } 2401 EXPORT_SYMBOL_GPL(iio_push_to_buffers); 2402 2403 /** 2404 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer, 2405 * no alignment or space requirements. 2406 * @indio_dev: iio_dev structure for device. 2407 * @data: channel data excluding the timestamp. 2408 * @data_sz: size of data. 2409 * @timestamp: timestamp for the sample data. 2410 * 2411 * This special variant of iio_push_to_buffers_with_timestamp() does 2412 * not require space for the timestamp, or 8 byte alignment of data. 2413 * It does however require an allocation on first call and additional 2414 * copies on all calls, so should be avoided if possible. 2415 */ 2416 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev, 2417 const void *data, 2418 size_t data_sz, 2419 int64_t timestamp) 2420 { 2421 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2422 2423 /* 2424 * Conservative estimate - we can always safely copy the minimum 2425 * of either the data provided or the length of the destination buffer. 2426 * This relaxed limit allows the calling drivers to be lax about 2427 * tracking the size of the data they are pushing, at the cost of 2428 * unnecessary copying of padding. 2429 */ 2430 data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz); 2431 if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) { 2432 void *bb; 2433 2434 bb = devm_krealloc(&indio_dev->dev, 2435 iio_dev_opaque->bounce_buffer, 2436 indio_dev->scan_bytes, GFP_KERNEL); 2437 if (!bb) 2438 return -ENOMEM; 2439 iio_dev_opaque->bounce_buffer = bb; 2440 iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes; 2441 } 2442 memcpy(iio_dev_opaque->bounce_buffer, data, data_sz); 2443 return iio_push_to_buffers_with_timestamp(indio_dev, 2444 iio_dev_opaque->bounce_buffer, 2445 timestamp); 2446 } 2447 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned); 2448 2449 /** 2450 * iio_buffer_release() - Free a buffer's resources 2451 * @ref: Pointer to the kref embedded in the iio_buffer struct 2452 * 2453 * This function is called when the last reference to the buffer has been 2454 * dropped. It will typically free all resources allocated by the buffer. Do not 2455 * call this function manually, always use iio_buffer_put() when done using a 2456 * buffer. 2457 */ 2458 static void iio_buffer_release(struct kref *ref) 2459 { 2460 struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref); 2461 2462 mutex_destroy(&buffer->dmabufs_mutex); 2463 buffer->access->release(buffer); 2464 } 2465 2466 /** 2467 * iio_buffer_get() - Grab a reference to the buffer 2468 * @buffer: The buffer to grab a reference for, may be NULL 2469 * 2470 * Returns the pointer to the buffer that was passed into the function. 2471 */ 2472 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer) 2473 { 2474 if (buffer) 2475 kref_get(&buffer->ref); 2476 2477 return buffer; 2478 } 2479 EXPORT_SYMBOL_GPL(iio_buffer_get); 2480 2481 /** 2482 * iio_buffer_put() - Release the reference to the buffer 2483 * @buffer: The buffer to release the reference for, may be NULL 2484 */ 2485 void iio_buffer_put(struct iio_buffer *buffer) 2486 { 2487 if (buffer) 2488 kref_put(&buffer->ref, iio_buffer_release); 2489 } 2490 EXPORT_SYMBOL_GPL(iio_buffer_put); 2491 2492 /** 2493 * iio_device_attach_buffer - Attach a buffer to a IIO device 2494 * @indio_dev: The device the buffer should be attached to 2495 * @buffer: The buffer to attach to the device 2496 * 2497 * Return 0 if successful, negative if error. 2498 * 2499 * This function attaches a buffer to a IIO device. The buffer stays attached to 2500 * the device until the device is freed. For legacy reasons, the first attached 2501 * buffer will also be assigned to 'indio_dev->buffer'. 2502 * The array allocated here, will be free'd via the iio_device_detach_buffers() 2503 * call which is handled by the iio_device_free(). 2504 */ 2505 int iio_device_attach_buffer(struct iio_dev *indio_dev, 2506 struct iio_buffer *buffer) 2507 { 2508 struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev); 2509 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers; 2510 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt; 2511 2512 cnt++; 2513 2514 new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL); 2515 if (!new) 2516 return -ENOMEM; 2517 iio_dev_opaque->attached_buffers = new; 2518 2519 buffer = iio_buffer_get(buffer); 2520 2521 /* first buffer is legacy; attach it to the IIO device directly */ 2522 if (!indio_dev->buffer) 2523 indio_dev->buffer = buffer; 2524 2525 iio_dev_opaque->attached_buffers[cnt - 1] = buffer; 2526 iio_dev_opaque->attached_buffers_cnt = cnt; 2527 2528 return 0; 2529 } 2530 EXPORT_SYMBOL_GPL(iio_device_attach_buffer); 2531