xref: /linux/drivers/iio/industrialio-buffer.c (revision e9d053f4222e79958fbea3c71c563a2d528d0d5e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/anon_inodes.h>
13 #include <linux/cleanup.h>
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/device.h>
17 #include <linux/file.h>
18 #include <linux/fs.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/sched/signal.h>
23 
24 #include <linux/iio/iio.h>
25 #include <linux/iio/iio-opaque.h>
26 #include "iio_core.h"
27 #include "iio_core_trigger.h"
28 #include <linux/iio/sysfs.h>
29 #include <linux/iio/buffer.h>
30 #include <linux/iio/buffer_impl.h>
31 
32 static const char * const iio_endian_prefix[] = {
33 	[IIO_BE] = "be",
34 	[IIO_LE] = "le",
35 };
36 
37 static bool iio_buffer_is_active(struct iio_buffer *buf)
38 {
39 	return !list_empty(&buf->buffer_list);
40 }
41 
42 static size_t iio_buffer_data_available(struct iio_buffer *buf)
43 {
44 	return buf->access->data_available(buf);
45 }
46 
47 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
48 				   struct iio_buffer *buf, size_t required)
49 {
50 	if (!indio_dev->info->hwfifo_flush_to_buffer)
51 		return -ENODEV;
52 
53 	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
54 }
55 
56 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
57 			     size_t to_wait, int to_flush)
58 {
59 	size_t avail;
60 	int flushed = 0;
61 
62 	/* wakeup if the device was unregistered */
63 	if (!indio_dev->info)
64 		return true;
65 
66 	/* drain the buffer if it was disabled */
67 	if (!iio_buffer_is_active(buf)) {
68 		to_wait = min_t(size_t, to_wait, 1);
69 		to_flush = 0;
70 	}
71 
72 	avail = iio_buffer_data_available(buf);
73 
74 	if (avail >= to_wait) {
75 		/* force a flush for non-blocking reads */
76 		if (!to_wait && avail < to_flush)
77 			iio_buffer_flush_hwfifo(indio_dev, buf,
78 						to_flush - avail);
79 		return true;
80 	}
81 
82 	if (to_flush)
83 		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
84 						  to_wait - avail);
85 	if (flushed <= 0)
86 		return false;
87 
88 	if (avail + flushed >= to_wait)
89 		return true;
90 
91 	return false;
92 }
93 
94 /**
95  * iio_buffer_read() - chrdev read for buffer access
96  * @filp:	File structure pointer for the char device
97  * @buf:	Destination buffer for iio buffer read
98  * @n:		First n bytes to read
99  * @f_ps:	Long offset provided by the user as a seek position
100  *
101  * This function relies on all buffer implementations having an
102  * iio_buffer as their first element.
103  *
104  * Return: negative values corresponding to error codes or ret != 0
105  *	   for ending the reading activity
106  **/
107 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
108 			       size_t n, loff_t *f_ps)
109 {
110 	struct iio_dev_buffer_pair *ib = filp->private_data;
111 	struct iio_buffer *rb = ib->buffer;
112 	struct iio_dev *indio_dev = ib->indio_dev;
113 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
114 	size_t datum_size;
115 	size_t to_wait;
116 	int ret = 0;
117 
118 	if (!indio_dev->info)
119 		return -ENODEV;
120 
121 	if (!rb || !rb->access->read)
122 		return -EINVAL;
123 
124 	if (rb->direction != IIO_BUFFER_DIRECTION_IN)
125 		return -EPERM;
126 
127 	datum_size = rb->bytes_per_datum;
128 
129 	/*
130 	 * If datum_size is 0 there will never be anything to read from the
131 	 * buffer, so signal end of file now.
132 	 */
133 	if (!datum_size)
134 		return 0;
135 
136 	if (filp->f_flags & O_NONBLOCK)
137 		to_wait = 0;
138 	else
139 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
140 
141 	add_wait_queue(&rb->pollq, &wait);
142 	do {
143 		if (!indio_dev->info) {
144 			ret = -ENODEV;
145 			break;
146 		}
147 
148 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
149 			if (signal_pending(current)) {
150 				ret = -ERESTARTSYS;
151 				break;
152 			}
153 
154 			wait_woken(&wait, TASK_INTERRUPTIBLE,
155 				   MAX_SCHEDULE_TIMEOUT);
156 			continue;
157 		}
158 
159 		ret = rb->access->read(rb, n, buf);
160 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
161 			ret = -EAGAIN;
162 	} while (ret == 0);
163 	remove_wait_queue(&rb->pollq, &wait);
164 
165 	return ret;
166 }
167 
168 static size_t iio_buffer_space_available(struct iio_buffer *buf)
169 {
170 	if (buf->access->space_available)
171 		return buf->access->space_available(buf);
172 
173 	return SIZE_MAX;
174 }
175 
176 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
177 				size_t n, loff_t *f_ps)
178 {
179 	struct iio_dev_buffer_pair *ib = filp->private_data;
180 	struct iio_buffer *rb = ib->buffer;
181 	struct iio_dev *indio_dev = ib->indio_dev;
182 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
183 	int ret = 0;
184 	size_t written;
185 
186 	if (!indio_dev->info)
187 		return -ENODEV;
188 
189 	if (!rb || !rb->access->write)
190 		return -EINVAL;
191 
192 	if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
193 		return -EPERM;
194 
195 	written = 0;
196 	add_wait_queue(&rb->pollq, &wait);
197 	do {
198 		if (!indio_dev->info)
199 			return -ENODEV;
200 
201 		if (!iio_buffer_space_available(rb)) {
202 			if (signal_pending(current)) {
203 				ret = -ERESTARTSYS;
204 				break;
205 			}
206 
207 			if (filp->f_flags & O_NONBLOCK) {
208 				if (!written)
209 					ret = -EAGAIN;
210 				break;
211 			}
212 
213 			wait_woken(&wait, TASK_INTERRUPTIBLE,
214 				   MAX_SCHEDULE_TIMEOUT);
215 			continue;
216 		}
217 
218 		ret = rb->access->write(rb, n - written, buf + written);
219 		if (ret < 0)
220 			break;
221 
222 		written += ret;
223 
224 	} while (written != n);
225 	remove_wait_queue(&rb->pollq, &wait);
226 
227 	return ret < 0 ? ret : written;
228 }
229 
230 /**
231  * iio_buffer_poll() - poll the buffer to find out if it has data
232  * @filp:	File structure pointer for device access
233  * @wait:	Poll table structure pointer for which the driver adds
234  *		a wait queue
235  *
236  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
237  *	   or 0 for other cases
238  */
239 static __poll_t iio_buffer_poll(struct file *filp,
240 				struct poll_table_struct *wait)
241 {
242 	struct iio_dev_buffer_pair *ib = filp->private_data;
243 	struct iio_buffer *rb = ib->buffer;
244 	struct iio_dev *indio_dev = ib->indio_dev;
245 
246 	if (!indio_dev->info || !rb)
247 		return 0;
248 
249 	poll_wait(filp, &rb->pollq, wait);
250 
251 	switch (rb->direction) {
252 	case IIO_BUFFER_DIRECTION_IN:
253 		if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
254 			return EPOLLIN | EPOLLRDNORM;
255 		break;
256 	case IIO_BUFFER_DIRECTION_OUT:
257 		if (iio_buffer_space_available(rb))
258 			return EPOLLOUT | EPOLLWRNORM;
259 		break;
260 	}
261 
262 	return 0;
263 }
264 
265 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
266 				size_t n, loff_t *f_ps)
267 {
268 	struct iio_dev_buffer_pair *ib = filp->private_data;
269 	struct iio_buffer *rb = ib->buffer;
270 
271 	/* check if buffer was opened through new API */
272 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
273 		return -EBUSY;
274 
275 	return iio_buffer_read(filp, buf, n, f_ps);
276 }
277 
278 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
279 				 size_t n, loff_t *f_ps)
280 {
281 	struct iio_dev_buffer_pair *ib = filp->private_data;
282 	struct iio_buffer *rb = ib->buffer;
283 
284 	/* check if buffer was opened through new API */
285 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
286 		return -EBUSY;
287 
288 	return iio_buffer_write(filp, buf, n, f_ps);
289 }
290 
291 __poll_t iio_buffer_poll_wrapper(struct file *filp,
292 				 struct poll_table_struct *wait)
293 {
294 	struct iio_dev_buffer_pair *ib = filp->private_data;
295 	struct iio_buffer *rb = ib->buffer;
296 
297 	/* check if buffer was opened through new API */
298 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
299 		return 0;
300 
301 	return iio_buffer_poll(filp, wait);
302 }
303 
304 /**
305  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
306  * @indio_dev: The IIO device
307  *
308  * Wakes up the event waitqueue used for poll(). Should usually
309  * be called when the device is unregistered.
310  */
311 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
312 {
313 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
314 	struct iio_buffer *buffer;
315 	unsigned int i;
316 
317 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
318 		buffer = iio_dev_opaque->attached_buffers[i];
319 		wake_up(&buffer->pollq);
320 	}
321 }
322 
323 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
324 {
325 	if (!buffer || !buffer->access || !buffer->access->remove_from)
326 		return -EINVAL;
327 
328 	return buffer->access->remove_from(buffer, data);
329 }
330 EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
331 
332 void iio_buffer_init(struct iio_buffer *buffer)
333 {
334 	INIT_LIST_HEAD(&buffer->demux_list);
335 	INIT_LIST_HEAD(&buffer->buffer_list);
336 	init_waitqueue_head(&buffer->pollq);
337 	kref_init(&buffer->ref);
338 	if (!buffer->watermark)
339 		buffer->watermark = 1;
340 }
341 EXPORT_SYMBOL(iio_buffer_init);
342 
343 void iio_device_detach_buffers(struct iio_dev *indio_dev)
344 {
345 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
346 	struct iio_buffer *buffer;
347 	unsigned int i;
348 
349 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
350 		buffer = iio_dev_opaque->attached_buffers[i];
351 		iio_buffer_put(buffer);
352 	}
353 
354 	kfree(iio_dev_opaque->attached_buffers);
355 }
356 
357 static ssize_t iio_show_scan_index(struct device *dev,
358 				   struct device_attribute *attr,
359 				   char *buf)
360 {
361 	return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
362 }
363 
364 static ssize_t iio_show_fixed_type(struct device *dev,
365 				   struct device_attribute *attr,
366 				   char *buf)
367 {
368 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
369 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
370 	const struct iio_scan_type *scan_type;
371 	u8 type;
372 
373 	scan_type = iio_get_current_scan_type(indio_dev, this_attr->c);
374 	if (IS_ERR(scan_type))
375 		return PTR_ERR(scan_type);
376 
377 	type = scan_type->endianness;
378 
379 	if (type == IIO_CPU) {
380 #ifdef __LITTLE_ENDIAN
381 		type = IIO_LE;
382 #else
383 		type = IIO_BE;
384 #endif
385 	}
386 	if (scan_type->repeat > 1)
387 		return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
388 		       iio_endian_prefix[type],
389 		       scan_type->sign,
390 		       scan_type->realbits,
391 		       scan_type->storagebits,
392 		       scan_type->repeat,
393 		       scan_type->shift);
394 	else
395 		return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
396 		       iio_endian_prefix[type],
397 		       scan_type->sign,
398 		       scan_type->realbits,
399 		       scan_type->storagebits,
400 		       scan_type->shift);
401 }
402 
403 static ssize_t iio_scan_el_show(struct device *dev,
404 				struct device_attribute *attr,
405 				char *buf)
406 {
407 	int ret;
408 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
409 
410 	/* Ensure ret is 0 or 1. */
411 	ret = !!test_bit(to_iio_dev_attr(attr)->address,
412 		       buffer->scan_mask);
413 
414 	return sysfs_emit(buf, "%d\n", ret);
415 }
416 
417 /* Note NULL used as error indicator as it doesn't make sense. */
418 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
419 						unsigned int masklength,
420 						const unsigned long *mask,
421 						bool strict)
422 {
423 	if (bitmap_empty(mask, masklength))
424 		return NULL;
425 	/*
426 	 * The condition here do not handle multi-long masks correctly.
427 	 * It only checks the first long to be zero, and will use such mask
428 	 * as a terminator even if there was bits set after the first long.
429 	 *
430 	 * Correct check would require using:
431 	 * while (!bitmap_empty(av_masks, masklength))
432 	 * instead. This is potentially hazardous because the
433 	 * avaliable_scan_masks is a zero terminated array of longs - and
434 	 * using the proper bitmap_empty() check for multi-long wide masks
435 	 * would require the array to be terminated with multiple zero longs -
436 	 * which is not such an usual pattern.
437 	 *
438 	 * As writing of this no multi-long wide masks were found in-tree, so
439 	 * the simple while (*av_masks) check is working.
440 	 */
441 	while (*av_masks) {
442 		if (strict) {
443 			if (bitmap_equal(mask, av_masks, masklength))
444 				return av_masks;
445 		} else {
446 			if (bitmap_subset(mask, av_masks, masklength))
447 				return av_masks;
448 		}
449 		av_masks += BITS_TO_LONGS(masklength);
450 	}
451 	return NULL;
452 }
453 
454 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
455 				   const unsigned long *mask)
456 {
457 	if (!indio_dev->setup_ops->validate_scan_mask)
458 		return true;
459 
460 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
461 }
462 
463 /**
464  * iio_scan_mask_set() - set particular bit in the scan mask
465  * @indio_dev: the iio device
466  * @buffer: the buffer whose scan mask we are interested in
467  * @bit: the bit to be set.
468  *
469  * Note that at this point we have no way of knowing what other
470  * buffers might request, hence this code only verifies that the
471  * individual buffers request is plausible.
472  */
473 static int iio_scan_mask_set(struct iio_dev *indio_dev,
474 			     struct iio_buffer *buffer, int bit)
475 {
476 	const unsigned long *mask;
477 	unsigned long *trialmask;
478 
479 	if (!indio_dev->masklength) {
480 		WARN(1, "Trying to set scanmask prior to registering buffer\n");
481 		return -EINVAL;
482 	}
483 
484 	trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
485 	if (!trialmask)
486 		return -ENOMEM;
487 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
488 	set_bit(bit, trialmask);
489 
490 	if (!iio_validate_scan_mask(indio_dev, trialmask))
491 		goto err_invalid_mask;
492 
493 	if (indio_dev->available_scan_masks) {
494 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
495 					   indio_dev->masklength,
496 					   trialmask, false);
497 		if (!mask)
498 			goto err_invalid_mask;
499 	}
500 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
501 
502 	bitmap_free(trialmask);
503 
504 	return 0;
505 
506 err_invalid_mask:
507 	bitmap_free(trialmask);
508 	return -EINVAL;
509 }
510 
511 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
512 {
513 	clear_bit(bit, buffer->scan_mask);
514 	return 0;
515 }
516 
517 static int iio_scan_mask_query(struct iio_dev *indio_dev,
518 			       struct iio_buffer *buffer, int bit)
519 {
520 	if (bit > indio_dev->masklength)
521 		return -EINVAL;
522 
523 	if (!buffer->scan_mask)
524 		return 0;
525 
526 	/* Ensure return value is 0 or 1. */
527 	return !!test_bit(bit, buffer->scan_mask);
528 };
529 
530 static ssize_t iio_scan_el_store(struct device *dev,
531 				 struct device_attribute *attr,
532 				 const char *buf,
533 				 size_t len)
534 {
535 	int ret;
536 	bool state;
537 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
538 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
539 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
540 	struct iio_buffer *buffer = this_attr->buffer;
541 
542 	ret = kstrtobool(buf, &state);
543 	if (ret < 0)
544 		return ret;
545 
546 	guard(mutex)(&iio_dev_opaque->mlock);
547 	if (iio_buffer_is_active(buffer))
548 		return -EBUSY;
549 
550 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
551 	if (ret < 0)
552 		return ret;
553 
554 	if (state && ret)
555 		return len;
556 
557 	if (state)
558 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
559 	else
560 		ret = iio_scan_mask_clear(buffer, this_attr->address);
561 	if (ret)
562 		return ret;
563 
564 	return len;
565 }
566 
567 static ssize_t iio_scan_el_ts_show(struct device *dev,
568 				   struct device_attribute *attr,
569 				   char *buf)
570 {
571 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
572 
573 	return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
574 }
575 
576 static ssize_t iio_scan_el_ts_store(struct device *dev,
577 				    struct device_attribute *attr,
578 				    const char *buf,
579 				    size_t len)
580 {
581 	int ret;
582 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
583 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
584 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
585 	bool state;
586 
587 	ret = kstrtobool(buf, &state);
588 	if (ret < 0)
589 		return ret;
590 
591 	guard(mutex)(&iio_dev_opaque->mlock);
592 	if (iio_buffer_is_active(buffer))
593 		return -EBUSY;
594 
595 	buffer->scan_timestamp = state;
596 
597 	return len;
598 }
599 
600 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
601 					struct iio_buffer *buffer,
602 					const struct iio_chan_spec *chan)
603 {
604 	int ret, attrcount = 0;
605 
606 	ret = __iio_add_chan_devattr("index",
607 				     chan,
608 				     &iio_show_scan_index,
609 				     NULL,
610 				     0,
611 				     IIO_SEPARATE,
612 				     &indio_dev->dev,
613 				     buffer,
614 				     &buffer->buffer_attr_list);
615 	if (ret)
616 		return ret;
617 	attrcount++;
618 	ret = __iio_add_chan_devattr("type",
619 				     chan,
620 				     &iio_show_fixed_type,
621 				     NULL,
622 				     0,
623 				     IIO_SEPARATE,
624 				     &indio_dev->dev,
625 				     buffer,
626 				     &buffer->buffer_attr_list);
627 	if (ret)
628 		return ret;
629 	attrcount++;
630 	if (chan->type != IIO_TIMESTAMP)
631 		ret = __iio_add_chan_devattr("en",
632 					     chan,
633 					     &iio_scan_el_show,
634 					     &iio_scan_el_store,
635 					     chan->scan_index,
636 					     IIO_SEPARATE,
637 					     &indio_dev->dev,
638 					     buffer,
639 					     &buffer->buffer_attr_list);
640 	else
641 		ret = __iio_add_chan_devattr("en",
642 					     chan,
643 					     &iio_scan_el_ts_show,
644 					     &iio_scan_el_ts_store,
645 					     chan->scan_index,
646 					     IIO_SEPARATE,
647 					     &indio_dev->dev,
648 					     buffer,
649 					     &buffer->buffer_attr_list);
650 	if (ret)
651 		return ret;
652 	attrcount++;
653 	ret = attrcount;
654 	return ret;
655 }
656 
657 static ssize_t length_show(struct device *dev, struct device_attribute *attr,
658 			   char *buf)
659 {
660 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
661 
662 	return sysfs_emit(buf, "%d\n", buffer->length);
663 }
664 
665 static ssize_t length_store(struct device *dev, struct device_attribute *attr,
666 			    const char *buf, size_t len)
667 {
668 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
669 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
670 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
671 	unsigned int val;
672 	int ret;
673 
674 	ret = kstrtouint(buf, 10, &val);
675 	if (ret)
676 		return ret;
677 
678 	if (val == buffer->length)
679 		return len;
680 
681 	guard(mutex)(&iio_dev_opaque->mlock);
682 	if (iio_buffer_is_active(buffer))
683 		return -EBUSY;
684 
685 	buffer->access->set_length(buffer, val);
686 
687 	if (buffer->length && buffer->length < buffer->watermark)
688 		buffer->watermark = buffer->length;
689 
690 	return len;
691 }
692 
693 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
694 			   char *buf)
695 {
696 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
697 
698 	return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
699 }
700 
701 static int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
702 				    unsigned int scan_index)
703 {
704 	const struct iio_chan_spec *ch;
705 	const struct iio_scan_type *scan_type;
706 	unsigned int bytes;
707 
708 	ch = iio_find_channel_from_si(indio_dev, scan_index);
709 	scan_type = iio_get_current_scan_type(indio_dev, ch);
710 	if (IS_ERR(scan_type))
711 		return PTR_ERR(scan_type);
712 
713 	bytes = scan_type->storagebits / 8;
714 
715 	if (scan_type->repeat > 1)
716 		bytes *= scan_type->repeat;
717 
718 	return bytes;
719 }
720 
721 static int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
722 {
723 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
724 
725 	return iio_storage_bytes_for_si(indio_dev,
726 					iio_dev_opaque->scan_index_timestamp);
727 }
728 
729 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
730 				  const unsigned long *mask, bool timestamp)
731 {
732 	unsigned int bytes = 0;
733 	int length, i, largest = 0;
734 
735 	/* How much space will the demuxed element take? */
736 	for_each_set_bit(i, mask,
737 			 indio_dev->masklength) {
738 		length = iio_storage_bytes_for_si(indio_dev, i);
739 		if (length < 0)
740 			return length;
741 
742 		bytes = ALIGN(bytes, length);
743 		bytes += length;
744 		largest = max(largest, length);
745 	}
746 
747 	if (timestamp) {
748 		length = iio_storage_bytes_for_timestamp(indio_dev);
749 		if (length < 0)
750 			return length;
751 
752 		bytes = ALIGN(bytes, length);
753 		bytes += length;
754 		largest = max(largest, length);
755 	}
756 
757 	bytes = ALIGN(bytes, largest);
758 	return bytes;
759 }
760 
761 static void iio_buffer_activate(struct iio_dev *indio_dev,
762 				struct iio_buffer *buffer)
763 {
764 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
765 
766 	iio_buffer_get(buffer);
767 	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
768 }
769 
770 static void iio_buffer_deactivate(struct iio_buffer *buffer)
771 {
772 	list_del_init(&buffer->buffer_list);
773 	wake_up_interruptible(&buffer->pollq);
774 	iio_buffer_put(buffer);
775 }
776 
777 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
778 {
779 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
780 	struct iio_buffer *buffer, *_buffer;
781 
782 	list_for_each_entry_safe(buffer, _buffer,
783 				 &iio_dev_opaque->buffer_list, buffer_list)
784 		iio_buffer_deactivate(buffer);
785 }
786 
787 static int iio_buffer_enable(struct iio_buffer *buffer,
788 			     struct iio_dev *indio_dev)
789 {
790 	if (!buffer->access->enable)
791 		return 0;
792 	return buffer->access->enable(buffer, indio_dev);
793 }
794 
795 static int iio_buffer_disable(struct iio_buffer *buffer,
796 			      struct iio_dev *indio_dev)
797 {
798 	if (!buffer->access->disable)
799 		return 0;
800 	return buffer->access->disable(buffer, indio_dev);
801 }
802 
803 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
804 					      struct iio_buffer *buffer)
805 {
806 	unsigned int bytes;
807 
808 	if (!buffer->access->set_bytes_per_datum)
809 		return;
810 
811 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
812 				       buffer->scan_timestamp);
813 
814 	buffer->access->set_bytes_per_datum(buffer, bytes);
815 }
816 
817 static int iio_buffer_request_update(struct iio_dev *indio_dev,
818 				     struct iio_buffer *buffer)
819 {
820 	int ret;
821 
822 	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
823 	if (buffer->access->request_update) {
824 		ret = buffer->access->request_update(buffer);
825 		if (ret) {
826 			dev_dbg(&indio_dev->dev,
827 				"Buffer not started: buffer parameter update failed (%d)\n",
828 				ret);
829 			return ret;
830 		}
831 	}
832 
833 	return 0;
834 }
835 
836 static void iio_free_scan_mask(struct iio_dev *indio_dev,
837 			       const unsigned long *mask)
838 {
839 	/* If the mask is dynamically allocated free it, otherwise do nothing */
840 	if (!indio_dev->available_scan_masks)
841 		bitmap_free(mask);
842 }
843 
844 struct iio_device_config {
845 	unsigned int mode;
846 	unsigned int watermark;
847 	const unsigned long *scan_mask;
848 	unsigned int scan_bytes;
849 	bool scan_timestamp;
850 };
851 
852 static int iio_verify_update(struct iio_dev *indio_dev,
853 			     struct iio_buffer *insert_buffer,
854 			     struct iio_buffer *remove_buffer,
855 			     struct iio_device_config *config)
856 {
857 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
858 	unsigned long *compound_mask;
859 	const unsigned long *scan_mask;
860 	bool strict_scanmask = false;
861 	struct iio_buffer *buffer;
862 	bool scan_timestamp;
863 	unsigned int modes;
864 
865 	if (insert_buffer &&
866 	    bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
867 		dev_dbg(&indio_dev->dev,
868 			"At least one scan element must be enabled first\n");
869 		return -EINVAL;
870 	}
871 
872 	memset(config, 0, sizeof(*config));
873 	config->watermark = ~0;
874 
875 	/*
876 	 * If there is just one buffer and we are removing it there is nothing
877 	 * to verify.
878 	 */
879 	if (remove_buffer && !insert_buffer &&
880 	    list_is_singular(&iio_dev_opaque->buffer_list))
881 		return 0;
882 
883 	modes = indio_dev->modes;
884 
885 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
886 		if (buffer == remove_buffer)
887 			continue;
888 		modes &= buffer->access->modes;
889 		config->watermark = min(config->watermark, buffer->watermark);
890 	}
891 
892 	if (insert_buffer) {
893 		modes &= insert_buffer->access->modes;
894 		config->watermark = min(config->watermark,
895 					insert_buffer->watermark);
896 	}
897 
898 	/* Definitely possible for devices to support both of these. */
899 	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
900 		config->mode = INDIO_BUFFER_TRIGGERED;
901 	} else if (modes & INDIO_BUFFER_HARDWARE) {
902 		/*
903 		 * Keep things simple for now and only allow a single buffer to
904 		 * be connected in hardware mode.
905 		 */
906 		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
907 			return -EINVAL;
908 		config->mode = INDIO_BUFFER_HARDWARE;
909 		strict_scanmask = true;
910 	} else if (modes & INDIO_BUFFER_SOFTWARE) {
911 		config->mode = INDIO_BUFFER_SOFTWARE;
912 	} else {
913 		/* Can only occur on first buffer */
914 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
915 			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
916 		return -EINVAL;
917 	}
918 
919 	/* What scan mask do we actually have? */
920 	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
921 	if (!compound_mask)
922 		return -ENOMEM;
923 
924 	scan_timestamp = false;
925 
926 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
927 		if (buffer == remove_buffer)
928 			continue;
929 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
930 			  indio_dev->masklength);
931 		scan_timestamp |= buffer->scan_timestamp;
932 	}
933 
934 	if (insert_buffer) {
935 		bitmap_or(compound_mask, compound_mask,
936 			  insert_buffer->scan_mask, indio_dev->masklength);
937 		scan_timestamp |= insert_buffer->scan_timestamp;
938 	}
939 
940 	if (indio_dev->available_scan_masks) {
941 		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
942 						indio_dev->masklength,
943 						compound_mask,
944 						strict_scanmask);
945 		bitmap_free(compound_mask);
946 		if (!scan_mask)
947 			return -EINVAL;
948 	} else {
949 		scan_mask = compound_mask;
950 	}
951 
952 	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
953 						    scan_mask, scan_timestamp);
954 	config->scan_mask = scan_mask;
955 	config->scan_timestamp = scan_timestamp;
956 
957 	return 0;
958 }
959 
960 /**
961  * struct iio_demux_table - table describing demux memcpy ops
962  * @from:	index to copy from
963  * @to:		index to copy to
964  * @length:	how many bytes to copy
965  * @l:		list head used for management
966  */
967 struct iio_demux_table {
968 	unsigned int from;
969 	unsigned int to;
970 	unsigned int length;
971 	struct list_head l;
972 };
973 
974 static void iio_buffer_demux_free(struct iio_buffer *buffer)
975 {
976 	struct iio_demux_table *p, *q;
977 
978 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
979 		list_del(&p->l);
980 		kfree(p);
981 	}
982 }
983 
984 static int iio_buffer_add_demux(struct iio_buffer *buffer,
985 				struct iio_demux_table **p, unsigned int in_loc,
986 				unsigned int out_loc,
987 				unsigned int length)
988 {
989 	if (*p && (*p)->from + (*p)->length == in_loc &&
990 	    (*p)->to + (*p)->length == out_loc) {
991 		(*p)->length += length;
992 	} else {
993 		*p = kmalloc(sizeof(**p), GFP_KERNEL);
994 		if (!(*p))
995 			return -ENOMEM;
996 		(*p)->from = in_loc;
997 		(*p)->to = out_loc;
998 		(*p)->length = length;
999 		list_add_tail(&(*p)->l, &buffer->demux_list);
1000 	}
1001 
1002 	return 0;
1003 }
1004 
1005 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1006 				   struct iio_buffer *buffer)
1007 {
1008 	int ret, in_ind = -1, out_ind, length;
1009 	unsigned int in_loc = 0, out_loc = 0;
1010 	struct iio_demux_table *p = NULL;
1011 
1012 	/* Clear out any old demux */
1013 	iio_buffer_demux_free(buffer);
1014 	kfree(buffer->demux_bounce);
1015 	buffer->demux_bounce = NULL;
1016 
1017 	/* First work out which scan mode we will actually have */
1018 	if (bitmap_equal(indio_dev->active_scan_mask,
1019 			 buffer->scan_mask,
1020 			 indio_dev->masklength))
1021 		return 0;
1022 
1023 	/* Now we have the two masks, work from least sig and build up sizes */
1024 	for_each_set_bit(out_ind,
1025 			 buffer->scan_mask,
1026 			 indio_dev->masklength) {
1027 		in_ind = find_next_bit(indio_dev->active_scan_mask,
1028 				       indio_dev->masklength,
1029 				       in_ind + 1);
1030 		while (in_ind != out_ind) {
1031 			ret = iio_storage_bytes_for_si(indio_dev, in_ind);
1032 			if (ret < 0)
1033 				goto error_clear_mux_table;
1034 
1035 			length = ret;
1036 			/* Make sure we are aligned */
1037 			in_loc = roundup(in_loc, length) + length;
1038 			in_ind = find_next_bit(indio_dev->active_scan_mask,
1039 					       indio_dev->masklength,
1040 					       in_ind + 1);
1041 		}
1042 		ret = iio_storage_bytes_for_si(indio_dev, in_ind);
1043 		if (ret < 0)
1044 			goto error_clear_mux_table;
1045 
1046 		length = ret;
1047 		out_loc = roundup(out_loc, length);
1048 		in_loc = roundup(in_loc, length);
1049 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1050 		if (ret)
1051 			goto error_clear_mux_table;
1052 		out_loc += length;
1053 		in_loc += length;
1054 	}
1055 	/* Relies on scan_timestamp being last */
1056 	if (buffer->scan_timestamp) {
1057 		ret = iio_storage_bytes_for_timestamp(indio_dev);
1058 		if (ret < 0)
1059 			goto error_clear_mux_table;
1060 
1061 		length = ret;
1062 		out_loc = roundup(out_loc, length);
1063 		in_loc = roundup(in_loc, length);
1064 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1065 		if (ret)
1066 			goto error_clear_mux_table;
1067 		out_loc += length;
1068 	}
1069 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1070 	if (!buffer->demux_bounce) {
1071 		ret = -ENOMEM;
1072 		goto error_clear_mux_table;
1073 	}
1074 	return 0;
1075 
1076 error_clear_mux_table:
1077 	iio_buffer_demux_free(buffer);
1078 
1079 	return ret;
1080 }
1081 
1082 static int iio_update_demux(struct iio_dev *indio_dev)
1083 {
1084 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1085 	struct iio_buffer *buffer;
1086 	int ret;
1087 
1088 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1089 		ret = iio_buffer_update_demux(indio_dev, buffer);
1090 		if (ret < 0)
1091 			goto error_clear_mux_table;
1092 	}
1093 	return 0;
1094 
1095 error_clear_mux_table:
1096 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1097 		iio_buffer_demux_free(buffer);
1098 
1099 	return ret;
1100 }
1101 
1102 static int iio_enable_buffers(struct iio_dev *indio_dev,
1103 			      struct iio_device_config *config)
1104 {
1105 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1106 	struct iio_buffer *buffer, *tmp = NULL;
1107 	int ret;
1108 
1109 	indio_dev->active_scan_mask = config->scan_mask;
1110 	indio_dev->scan_timestamp = config->scan_timestamp;
1111 	indio_dev->scan_bytes = config->scan_bytes;
1112 	iio_dev_opaque->currentmode = config->mode;
1113 
1114 	iio_update_demux(indio_dev);
1115 
1116 	/* Wind up again */
1117 	if (indio_dev->setup_ops->preenable) {
1118 		ret = indio_dev->setup_ops->preenable(indio_dev);
1119 		if (ret) {
1120 			dev_dbg(&indio_dev->dev,
1121 				"Buffer not started: buffer preenable failed (%d)\n", ret);
1122 			goto err_undo_config;
1123 		}
1124 	}
1125 
1126 	if (indio_dev->info->update_scan_mode) {
1127 		ret = indio_dev->info
1128 			->update_scan_mode(indio_dev,
1129 					   indio_dev->active_scan_mask);
1130 		if (ret < 0) {
1131 			dev_dbg(&indio_dev->dev,
1132 				"Buffer not started: update scan mode failed (%d)\n",
1133 				ret);
1134 			goto err_run_postdisable;
1135 		}
1136 	}
1137 
1138 	if (indio_dev->info->hwfifo_set_watermark)
1139 		indio_dev->info->hwfifo_set_watermark(indio_dev,
1140 			config->watermark);
1141 
1142 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1143 		ret = iio_buffer_enable(buffer, indio_dev);
1144 		if (ret) {
1145 			tmp = buffer;
1146 			goto err_disable_buffers;
1147 		}
1148 	}
1149 
1150 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1151 		ret = iio_trigger_attach_poll_func(indio_dev->trig,
1152 						   indio_dev->pollfunc);
1153 		if (ret)
1154 			goto err_disable_buffers;
1155 	}
1156 
1157 	if (indio_dev->setup_ops->postenable) {
1158 		ret = indio_dev->setup_ops->postenable(indio_dev);
1159 		if (ret) {
1160 			dev_dbg(&indio_dev->dev,
1161 				"Buffer not started: postenable failed (%d)\n", ret);
1162 			goto err_detach_pollfunc;
1163 		}
1164 	}
1165 
1166 	return 0;
1167 
1168 err_detach_pollfunc:
1169 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1170 		iio_trigger_detach_poll_func(indio_dev->trig,
1171 					     indio_dev->pollfunc);
1172 	}
1173 err_disable_buffers:
1174 	buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
1175 	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1176 					     buffer_list)
1177 		iio_buffer_disable(buffer, indio_dev);
1178 err_run_postdisable:
1179 	if (indio_dev->setup_ops->postdisable)
1180 		indio_dev->setup_ops->postdisable(indio_dev);
1181 err_undo_config:
1182 	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1183 	indio_dev->active_scan_mask = NULL;
1184 
1185 	return ret;
1186 }
1187 
1188 static int iio_disable_buffers(struct iio_dev *indio_dev)
1189 {
1190 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1191 	struct iio_buffer *buffer;
1192 	int ret = 0;
1193 	int ret2;
1194 
1195 	/* Wind down existing buffers - iff there are any */
1196 	if (list_empty(&iio_dev_opaque->buffer_list))
1197 		return 0;
1198 
1199 	/*
1200 	 * If things go wrong at some step in disable we still need to continue
1201 	 * to perform the other steps, otherwise we leave the device in a
1202 	 * inconsistent state. We return the error code for the first error we
1203 	 * encountered.
1204 	 */
1205 
1206 	if (indio_dev->setup_ops->predisable) {
1207 		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1208 		if (ret2 && !ret)
1209 			ret = ret2;
1210 	}
1211 
1212 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1213 		iio_trigger_detach_poll_func(indio_dev->trig,
1214 					     indio_dev->pollfunc);
1215 	}
1216 
1217 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1218 		ret2 = iio_buffer_disable(buffer, indio_dev);
1219 		if (ret2 && !ret)
1220 			ret = ret2;
1221 	}
1222 
1223 	if (indio_dev->setup_ops->postdisable) {
1224 		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1225 		if (ret2 && !ret)
1226 			ret = ret2;
1227 	}
1228 
1229 	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1230 	indio_dev->active_scan_mask = NULL;
1231 	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1232 
1233 	return ret;
1234 }
1235 
1236 static int __iio_update_buffers(struct iio_dev *indio_dev,
1237 				struct iio_buffer *insert_buffer,
1238 				struct iio_buffer *remove_buffer)
1239 {
1240 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1241 	struct iio_device_config new_config;
1242 	int ret;
1243 
1244 	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1245 				&new_config);
1246 	if (ret)
1247 		return ret;
1248 
1249 	if (insert_buffer) {
1250 		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1251 		if (ret)
1252 			goto err_free_config;
1253 	}
1254 
1255 	ret = iio_disable_buffers(indio_dev);
1256 	if (ret)
1257 		goto err_deactivate_all;
1258 
1259 	if (remove_buffer)
1260 		iio_buffer_deactivate(remove_buffer);
1261 	if (insert_buffer)
1262 		iio_buffer_activate(indio_dev, insert_buffer);
1263 
1264 	/* If no buffers in list, we are done */
1265 	if (list_empty(&iio_dev_opaque->buffer_list))
1266 		return 0;
1267 
1268 	ret = iio_enable_buffers(indio_dev, &new_config);
1269 	if (ret)
1270 		goto err_deactivate_all;
1271 
1272 	return 0;
1273 
1274 err_deactivate_all:
1275 	/*
1276 	 * We've already verified that the config is valid earlier. If things go
1277 	 * wrong in either enable or disable the most likely reason is an IO
1278 	 * error from the device. In this case there is no good recovery
1279 	 * strategy. Just make sure to disable everything and leave the device
1280 	 * in a sane state.  With a bit of luck the device might come back to
1281 	 * life again later and userspace can try again.
1282 	 */
1283 	iio_buffer_deactivate_all(indio_dev);
1284 
1285 err_free_config:
1286 	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1287 	return ret;
1288 }
1289 
1290 int iio_update_buffers(struct iio_dev *indio_dev,
1291 		       struct iio_buffer *insert_buffer,
1292 		       struct iio_buffer *remove_buffer)
1293 {
1294 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1295 
1296 	if (insert_buffer == remove_buffer)
1297 		return 0;
1298 
1299 	if (insert_buffer &&
1300 	    insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
1301 		return -EINVAL;
1302 
1303 	guard(mutex)(&iio_dev_opaque->info_exist_lock);
1304 	guard(mutex)(&iio_dev_opaque->mlock);
1305 
1306 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1307 		insert_buffer = NULL;
1308 
1309 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1310 		remove_buffer = NULL;
1311 
1312 	if (!insert_buffer && !remove_buffer)
1313 		return 0;
1314 
1315 	if (!indio_dev->info)
1316 		return -ENODEV;
1317 
1318 	return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1319 }
1320 EXPORT_SYMBOL_GPL(iio_update_buffers);
1321 
1322 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1323 {
1324 	iio_disable_buffers(indio_dev);
1325 	iio_buffer_deactivate_all(indio_dev);
1326 }
1327 
1328 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
1329 			    const char *buf, size_t len)
1330 {
1331 	int ret;
1332 	bool requested_state;
1333 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1334 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1335 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1336 	bool inlist;
1337 
1338 	ret = kstrtobool(buf, &requested_state);
1339 	if (ret < 0)
1340 		return ret;
1341 
1342 	guard(mutex)(&iio_dev_opaque->mlock);
1343 
1344 	/* Find out if it is in the list */
1345 	inlist = iio_buffer_is_active(buffer);
1346 	/* Already in desired state */
1347 	if (inlist == requested_state)
1348 		return len;
1349 
1350 	if (requested_state)
1351 		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1352 	else
1353 		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1354 	if (ret)
1355 		return ret;
1356 
1357 	return len;
1358 }
1359 
1360 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
1361 			      char *buf)
1362 {
1363 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1364 
1365 	return sysfs_emit(buf, "%u\n", buffer->watermark);
1366 }
1367 
1368 static ssize_t watermark_store(struct device *dev,
1369 			       struct device_attribute *attr,
1370 			       const char *buf, size_t len)
1371 {
1372 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1373 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1374 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1375 	unsigned int val;
1376 	int ret;
1377 
1378 	ret = kstrtouint(buf, 10, &val);
1379 	if (ret)
1380 		return ret;
1381 	if (!val)
1382 		return -EINVAL;
1383 
1384 	guard(mutex)(&iio_dev_opaque->mlock);
1385 
1386 	if (val > buffer->length)
1387 		return -EINVAL;
1388 
1389 	if (iio_buffer_is_active(buffer))
1390 		return -EBUSY;
1391 
1392 	buffer->watermark = val;
1393 
1394 	return len;
1395 }
1396 
1397 static ssize_t data_available_show(struct device *dev,
1398 				   struct device_attribute *attr, char *buf)
1399 {
1400 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1401 
1402 	return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1403 }
1404 
1405 static ssize_t direction_show(struct device *dev,
1406 			      struct device_attribute *attr,
1407 			      char *buf)
1408 {
1409 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1410 
1411 	switch (buffer->direction) {
1412 	case IIO_BUFFER_DIRECTION_IN:
1413 		return sysfs_emit(buf, "in\n");
1414 	case IIO_BUFFER_DIRECTION_OUT:
1415 		return sysfs_emit(buf, "out\n");
1416 	default:
1417 		return -EINVAL;
1418 	}
1419 }
1420 
1421 static DEVICE_ATTR_RW(length);
1422 static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
1423 static DEVICE_ATTR_RW(enable);
1424 static DEVICE_ATTR_RW(watermark);
1425 static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
1426 static DEVICE_ATTR_RO(data_available);
1427 static DEVICE_ATTR_RO(direction);
1428 
1429 /*
1430  * When adding new attributes here, put the at the end, at least until
1431  * the code that handles the length/length_ro & watermark/watermark_ro
1432  * assignments gets cleaned up. Otherwise these can create some weird
1433  * duplicate attributes errors under some setups.
1434  */
1435 static struct attribute *iio_buffer_attrs[] = {
1436 	&dev_attr_length.attr,
1437 	&dev_attr_enable.attr,
1438 	&dev_attr_watermark.attr,
1439 	&dev_attr_data_available.attr,
1440 	&dev_attr_direction.attr,
1441 };
1442 
1443 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1444 
1445 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1446 					      struct attribute *attr)
1447 {
1448 	struct device_attribute *dattr = to_dev_attr(attr);
1449 	struct iio_dev_attr *iio_attr;
1450 
1451 	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1452 	if (!iio_attr)
1453 		return NULL;
1454 
1455 	iio_attr->buffer = buffer;
1456 	memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1457 	iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1458 	if (!iio_attr->dev_attr.attr.name) {
1459 		kfree(iio_attr);
1460 		return NULL;
1461 	}
1462 
1463 	sysfs_attr_init(&iio_attr->dev_attr.attr);
1464 
1465 	list_add(&iio_attr->l, &buffer->buffer_attr_list);
1466 
1467 	return &iio_attr->dev_attr.attr;
1468 }
1469 
1470 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1471 						   struct attribute **buffer_attrs,
1472 						   int buffer_attrcount,
1473 						   int scan_el_attrcount)
1474 {
1475 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1476 	struct attribute_group *group;
1477 	struct attribute **attrs;
1478 	int ret;
1479 
1480 	attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1481 	if (!attrs)
1482 		return -ENOMEM;
1483 
1484 	memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1485 
1486 	group = &iio_dev_opaque->legacy_buffer_group;
1487 	group->attrs = attrs;
1488 	group->name = "buffer";
1489 
1490 	ret = iio_device_register_sysfs_group(indio_dev, group);
1491 	if (ret)
1492 		goto error_free_buffer_attrs;
1493 
1494 	attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1495 	if (!attrs) {
1496 		ret = -ENOMEM;
1497 		goto error_free_buffer_attrs;
1498 	}
1499 
1500 	memcpy(attrs, &buffer_attrs[buffer_attrcount],
1501 	       scan_el_attrcount * sizeof(*attrs));
1502 
1503 	group = &iio_dev_opaque->legacy_scan_el_group;
1504 	group->attrs = attrs;
1505 	group->name = "scan_elements";
1506 
1507 	ret = iio_device_register_sysfs_group(indio_dev, group);
1508 	if (ret)
1509 		goto error_free_scan_el_attrs;
1510 
1511 	return 0;
1512 
1513 error_free_scan_el_attrs:
1514 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1515 error_free_buffer_attrs:
1516 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1517 
1518 	return ret;
1519 }
1520 
1521 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1522 {
1523 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1524 
1525 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1526 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1527 }
1528 
1529 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1530 {
1531 	struct iio_dev_buffer_pair *ib = filep->private_data;
1532 	struct iio_dev *indio_dev = ib->indio_dev;
1533 	struct iio_buffer *buffer = ib->buffer;
1534 
1535 	wake_up(&buffer->pollq);
1536 
1537 	kfree(ib);
1538 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1539 	iio_device_put(indio_dev);
1540 
1541 	return 0;
1542 }
1543 
1544 static const struct file_operations iio_buffer_chrdev_fileops = {
1545 	.owner = THIS_MODULE,
1546 	.llseek = noop_llseek,
1547 	.read = iio_buffer_read,
1548 	.write = iio_buffer_write,
1549 	.poll = iio_buffer_poll,
1550 	.release = iio_buffer_chrdev_release,
1551 };
1552 
1553 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1554 {
1555 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1556 	int __user *ival = (int __user *)arg;
1557 	struct iio_dev_buffer_pair *ib;
1558 	struct iio_buffer *buffer;
1559 	int fd, idx, ret;
1560 
1561 	if (copy_from_user(&idx, ival, sizeof(idx)))
1562 		return -EFAULT;
1563 
1564 	if (idx >= iio_dev_opaque->attached_buffers_cnt)
1565 		return -ENODEV;
1566 
1567 	iio_device_get(indio_dev);
1568 
1569 	buffer = iio_dev_opaque->attached_buffers[idx];
1570 
1571 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1572 		ret = -EBUSY;
1573 		goto error_iio_dev_put;
1574 	}
1575 
1576 	ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1577 	if (!ib) {
1578 		ret = -ENOMEM;
1579 		goto error_clear_busy_bit;
1580 	}
1581 
1582 	ib->indio_dev = indio_dev;
1583 	ib->buffer = buffer;
1584 
1585 	fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1586 			      ib, O_RDWR | O_CLOEXEC);
1587 	if (fd < 0) {
1588 		ret = fd;
1589 		goto error_free_ib;
1590 	}
1591 
1592 	if (copy_to_user(ival, &fd, sizeof(fd))) {
1593 		/*
1594 		 * "Leak" the fd, as there's not much we can do about this
1595 		 * anyway. 'fd' might have been closed already, as
1596 		 * anon_inode_getfd() called fd_install() on it, which made
1597 		 * it reachable by userland.
1598 		 *
1599 		 * Instead of allowing a malicious user to play tricks with
1600 		 * us, rely on the process exit path to do any necessary
1601 		 * cleanup, as in releasing the file, if still needed.
1602 		 */
1603 		return -EFAULT;
1604 	}
1605 
1606 	return 0;
1607 
1608 error_free_ib:
1609 	kfree(ib);
1610 error_clear_busy_bit:
1611 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1612 error_iio_dev_put:
1613 	iio_device_put(indio_dev);
1614 	return ret;
1615 }
1616 
1617 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1618 				    unsigned int cmd, unsigned long arg)
1619 {
1620 	switch (cmd) {
1621 	case IIO_BUFFER_GET_FD_IOCTL:
1622 		return iio_device_buffer_getfd(indio_dev, arg);
1623 	default:
1624 		return IIO_IOCTL_UNHANDLED;
1625 	}
1626 }
1627 
1628 static int iio_channel_validate_scan_type(struct device *dev, int ch,
1629 					  const struct iio_scan_type *scan_type)
1630 {
1631 	/* Verify that sample bits fit into storage */
1632 	if (scan_type->storagebits < scan_type->realbits + scan_type->shift) {
1633 		dev_err(dev,
1634 			"Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
1635 			ch, scan_type->storagebits,
1636 			scan_type->realbits,
1637 			scan_type->shift);
1638 		return -EINVAL;
1639 	}
1640 
1641 	return 0;
1642 }
1643 
1644 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1645 					     struct iio_dev *indio_dev,
1646 					     int index)
1647 {
1648 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1649 	struct iio_dev_attr *p;
1650 	const struct iio_dev_attr *id_attr;
1651 	struct attribute **attr;
1652 	int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1653 	const struct iio_chan_spec *channels;
1654 
1655 	buffer_attrcount = 0;
1656 	if (buffer->attrs) {
1657 		while (buffer->attrs[buffer_attrcount])
1658 			buffer_attrcount++;
1659 	}
1660 	buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1661 
1662 	scan_el_attrcount = 0;
1663 	INIT_LIST_HEAD(&buffer->buffer_attr_list);
1664 	channels = indio_dev->channels;
1665 	if (channels) {
1666 		/* new magic */
1667 		for (i = 0; i < indio_dev->num_channels; i++) {
1668 			const struct iio_scan_type *scan_type;
1669 
1670 			if (channels[i].scan_index < 0)
1671 				continue;
1672 
1673 			if (channels[i].has_ext_scan_type) {
1674 				int j;
1675 
1676 				/*
1677 				 * get_current_scan_type is required when using
1678 				 * extended scan types.
1679 				 */
1680 				if (!indio_dev->info->get_current_scan_type) {
1681 					ret = -EINVAL;
1682 					goto error_cleanup_dynamic;
1683 				}
1684 
1685 				for (j = 0; j < channels[i].num_ext_scan_type; j++) {
1686 					scan_type = &channels[i].ext_scan_type[j];
1687 
1688 					ret = iio_channel_validate_scan_type(
1689 						&indio_dev->dev, i, scan_type);
1690 					if (ret)
1691 						goto error_cleanup_dynamic;
1692 				}
1693 			} else {
1694 				scan_type = &channels[i].scan_type;
1695 
1696 				ret = iio_channel_validate_scan_type(
1697 						&indio_dev->dev, i, scan_type);
1698 				if (ret)
1699 					goto error_cleanup_dynamic;
1700 			}
1701 
1702 			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1703 							   &channels[i]);
1704 			if (ret < 0)
1705 				goto error_cleanup_dynamic;
1706 			scan_el_attrcount += ret;
1707 			if (channels[i].type == IIO_TIMESTAMP)
1708 				iio_dev_opaque->scan_index_timestamp =
1709 					channels[i].scan_index;
1710 		}
1711 		if (indio_dev->masklength && !buffer->scan_mask) {
1712 			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1713 							  GFP_KERNEL);
1714 			if (!buffer->scan_mask) {
1715 				ret = -ENOMEM;
1716 				goto error_cleanup_dynamic;
1717 			}
1718 		}
1719 	}
1720 
1721 	attrn = buffer_attrcount + scan_el_attrcount;
1722 	attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
1723 	if (!attr) {
1724 		ret = -ENOMEM;
1725 		goto error_free_scan_mask;
1726 	}
1727 
1728 	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1729 	if (!buffer->access->set_length)
1730 		attr[0] = &dev_attr_length_ro.attr;
1731 
1732 	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1733 		attr[2] = &dev_attr_watermark_ro.attr;
1734 
1735 	if (buffer->attrs)
1736 		for (i = 0, id_attr = buffer->attrs[i];
1737 		     (id_attr = buffer->attrs[i]); i++)
1738 			attr[ARRAY_SIZE(iio_buffer_attrs) + i] =
1739 				(struct attribute *)&id_attr->dev_attr.attr;
1740 
1741 	buffer->buffer_group.attrs = attr;
1742 
1743 	for (i = 0; i < buffer_attrcount; i++) {
1744 		struct attribute *wrapped;
1745 
1746 		wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1747 		if (!wrapped) {
1748 			ret = -ENOMEM;
1749 			goto error_free_buffer_attrs;
1750 		}
1751 		attr[i] = wrapped;
1752 	}
1753 
1754 	attrn = 0;
1755 	list_for_each_entry(p, &buffer->buffer_attr_list, l)
1756 		attr[attrn++] = &p->dev_attr.attr;
1757 
1758 	buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1759 	if (!buffer->buffer_group.name) {
1760 		ret = -ENOMEM;
1761 		goto error_free_buffer_attrs;
1762 	}
1763 
1764 	ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1765 	if (ret)
1766 		goto error_free_buffer_attr_group_name;
1767 
1768 	/* we only need to register the legacy groups for the first buffer */
1769 	if (index > 0)
1770 		return 0;
1771 
1772 	ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1773 						      buffer_attrcount,
1774 						      scan_el_attrcount);
1775 	if (ret)
1776 		goto error_free_buffer_attr_group_name;
1777 
1778 	return 0;
1779 
1780 error_free_buffer_attr_group_name:
1781 	kfree(buffer->buffer_group.name);
1782 error_free_buffer_attrs:
1783 	kfree(buffer->buffer_group.attrs);
1784 error_free_scan_mask:
1785 	bitmap_free(buffer->scan_mask);
1786 error_cleanup_dynamic:
1787 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1788 
1789 	return ret;
1790 }
1791 
1792 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
1793 					     struct iio_dev *indio_dev,
1794 					     int index)
1795 {
1796 	if (index == 0)
1797 		iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1798 	bitmap_free(buffer->scan_mask);
1799 	kfree(buffer->buffer_group.name);
1800 	kfree(buffer->buffer_group.attrs);
1801 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1802 }
1803 
1804 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1805 {
1806 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1807 	const struct iio_chan_spec *channels;
1808 	struct iio_buffer *buffer;
1809 	int ret, i, idx;
1810 	size_t sz;
1811 
1812 	channels = indio_dev->channels;
1813 	if (channels) {
1814 		int ml = 0;
1815 
1816 		for (i = 0; i < indio_dev->num_channels; i++)
1817 			ml = max(ml, channels[i].scan_index + 1);
1818 		indio_dev->masklength = ml;
1819 	}
1820 
1821 	if (!iio_dev_opaque->attached_buffers_cnt)
1822 		return 0;
1823 
1824 	for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
1825 		buffer = iio_dev_opaque->attached_buffers[idx];
1826 		ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
1827 		if (ret)
1828 			goto error_unwind_sysfs_and_mask;
1829 	}
1830 
1831 	sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
1832 	iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1833 	if (!iio_dev_opaque->buffer_ioctl_handler) {
1834 		ret = -ENOMEM;
1835 		goto error_unwind_sysfs_and_mask;
1836 	}
1837 
1838 	iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1839 	iio_device_ioctl_handler_register(indio_dev,
1840 					  iio_dev_opaque->buffer_ioctl_handler);
1841 
1842 	return 0;
1843 
1844 error_unwind_sysfs_and_mask:
1845 	while (idx--) {
1846 		buffer = iio_dev_opaque->attached_buffers[idx];
1847 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
1848 	}
1849 	return ret;
1850 }
1851 
1852 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1853 {
1854 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1855 	struct iio_buffer *buffer;
1856 	int i;
1857 
1858 	if (!iio_dev_opaque->attached_buffers_cnt)
1859 		return;
1860 
1861 	iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1862 	kfree(iio_dev_opaque->buffer_ioctl_handler);
1863 
1864 	for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1865 		buffer = iio_dev_opaque->attached_buffers[i];
1866 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
1867 	}
1868 }
1869 
1870 /**
1871  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1872  * @indio_dev: the iio device
1873  * @mask: scan mask to be checked
1874  *
1875  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1876  * can be used for devices where only one channel can be active for sampling at
1877  * a time.
1878  */
1879 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1880 				   const unsigned long *mask)
1881 {
1882 	return bitmap_weight(mask, indio_dev->masklength) == 1;
1883 }
1884 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1885 
1886 static const void *iio_demux(struct iio_buffer *buffer,
1887 			     const void *datain)
1888 {
1889 	struct iio_demux_table *t;
1890 
1891 	if (list_empty(&buffer->demux_list))
1892 		return datain;
1893 	list_for_each_entry(t, &buffer->demux_list, l)
1894 		memcpy(buffer->demux_bounce + t->to,
1895 		       datain + t->from, t->length);
1896 
1897 	return buffer->demux_bounce;
1898 }
1899 
1900 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1901 {
1902 	const void *dataout = iio_demux(buffer, data);
1903 	int ret;
1904 
1905 	ret = buffer->access->store_to(buffer, dataout);
1906 	if (ret)
1907 		return ret;
1908 
1909 	/*
1910 	 * We can't just test for watermark to decide if we wake the poll queue
1911 	 * because read may request less samples than the watermark.
1912 	 */
1913 	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1914 	return 0;
1915 }
1916 
1917 /**
1918  * iio_push_to_buffers() - push to a registered buffer.
1919  * @indio_dev:		iio_dev structure for device.
1920  * @data:		Full scan.
1921  */
1922 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1923 {
1924 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1925 	int ret;
1926 	struct iio_buffer *buf;
1927 
1928 	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1929 		ret = iio_push_to_buffer(buf, data);
1930 		if (ret < 0)
1931 			return ret;
1932 	}
1933 
1934 	return 0;
1935 }
1936 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1937 
1938 /**
1939  * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
1940  *    no alignment or space requirements.
1941  * @indio_dev:		iio_dev structure for device.
1942  * @data:		channel data excluding the timestamp.
1943  * @data_sz:		size of data.
1944  * @timestamp:		timestamp for the sample data.
1945  *
1946  * This special variant of iio_push_to_buffers_with_timestamp() does
1947  * not require space for the timestamp, or 8 byte alignment of data.
1948  * It does however require an allocation on first call and additional
1949  * copies on all calls, so should be avoided if possible.
1950  */
1951 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
1952 					  const void *data,
1953 					  size_t data_sz,
1954 					  int64_t timestamp)
1955 {
1956 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1957 
1958 	/*
1959 	 * Conservative estimate - we can always safely copy the minimum
1960 	 * of either the data provided or the length of the destination buffer.
1961 	 * This relaxed limit allows the calling drivers to be lax about
1962 	 * tracking the size of the data they are pushing, at the cost of
1963 	 * unnecessary copying of padding.
1964 	 */
1965 	data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
1966 	if (iio_dev_opaque->bounce_buffer_size !=  indio_dev->scan_bytes) {
1967 		void *bb;
1968 
1969 		bb = devm_krealloc(&indio_dev->dev,
1970 				   iio_dev_opaque->bounce_buffer,
1971 				   indio_dev->scan_bytes, GFP_KERNEL);
1972 		if (!bb)
1973 			return -ENOMEM;
1974 		iio_dev_opaque->bounce_buffer = bb;
1975 		iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
1976 	}
1977 	memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
1978 	return iio_push_to_buffers_with_timestamp(indio_dev,
1979 						  iio_dev_opaque->bounce_buffer,
1980 						  timestamp);
1981 }
1982 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
1983 
1984 /**
1985  * iio_buffer_release() - Free a buffer's resources
1986  * @ref: Pointer to the kref embedded in the iio_buffer struct
1987  *
1988  * This function is called when the last reference to the buffer has been
1989  * dropped. It will typically free all resources allocated by the buffer. Do not
1990  * call this function manually, always use iio_buffer_put() when done using a
1991  * buffer.
1992  */
1993 static void iio_buffer_release(struct kref *ref)
1994 {
1995 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1996 
1997 	buffer->access->release(buffer);
1998 }
1999 
2000 /**
2001  * iio_buffer_get() - Grab a reference to the buffer
2002  * @buffer: The buffer to grab a reference for, may be NULL
2003  *
2004  * Returns the pointer to the buffer that was passed into the function.
2005  */
2006 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
2007 {
2008 	if (buffer)
2009 		kref_get(&buffer->ref);
2010 
2011 	return buffer;
2012 }
2013 EXPORT_SYMBOL_GPL(iio_buffer_get);
2014 
2015 /**
2016  * iio_buffer_put() - Release the reference to the buffer
2017  * @buffer: The buffer to release the reference for, may be NULL
2018  */
2019 void iio_buffer_put(struct iio_buffer *buffer)
2020 {
2021 	if (buffer)
2022 		kref_put(&buffer->ref, iio_buffer_release);
2023 }
2024 EXPORT_SYMBOL_GPL(iio_buffer_put);
2025 
2026 /**
2027  * iio_device_attach_buffer - Attach a buffer to a IIO device
2028  * @indio_dev: The device the buffer should be attached to
2029  * @buffer: The buffer to attach to the device
2030  *
2031  * Return 0 if successful, negative if error.
2032  *
2033  * This function attaches a buffer to a IIO device. The buffer stays attached to
2034  * the device until the device is freed. For legacy reasons, the first attached
2035  * buffer will also be assigned to 'indio_dev->buffer'.
2036  * The array allocated here, will be free'd via the iio_device_detach_buffers()
2037  * call which is handled by the iio_device_free().
2038  */
2039 int iio_device_attach_buffer(struct iio_dev *indio_dev,
2040 			     struct iio_buffer *buffer)
2041 {
2042 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2043 	struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
2044 	unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
2045 
2046 	cnt++;
2047 
2048 	new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
2049 	if (!new)
2050 		return -ENOMEM;
2051 	iio_dev_opaque->attached_buffers = new;
2052 
2053 	buffer = iio_buffer_get(buffer);
2054 
2055 	/* first buffer is legacy; attach it to the IIO device directly */
2056 	if (!indio_dev->buffer)
2057 		indio_dev->buffer = buffer;
2058 
2059 	iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
2060 	iio_dev_opaque->attached_buffers_cnt = cnt;
2061 
2062 	return 0;
2063 }
2064 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
2065