xref: /linux/drivers/iio/industrialio-buffer.c (revision 001821b0e79716c4e17c71d8e053a23599a7a508)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/anon_inodes.h>
13 #include <linux/cleanup.h>
14 #include <linux/kernel.h>
15 #include <linux/export.h>
16 #include <linux/device.h>
17 #include <linux/file.h>
18 #include <linux/fs.h>
19 #include <linux/cdev.h>
20 #include <linux/slab.h>
21 #include <linux/poll.h>
22 #include <linux/sched/signal.h>
23 
24 #include <linux/iio/iio.h>
25 #include <linux/iio/iio-opaque.h>
26 #include "iio_core.h"
27 #include "iio_core_trigger.h"
28 #include <linux/iio/sysfs.h>
29 #include <linux/iio/buffer.h>
30 #include <linux/iio/buffer_impl.h>
31 
32 static const char * const iio_endian_prefix[] = {
33 	[IIO_BE] = "be",
34 	[IIO_LE] = "le",
35 };
36 
37 static bool iio_buffer_is_active(struct iio_buffer *buf)
38 {
39 	return !list_empty(&buf->buffer_list);
40 }
41 
42 static size_t iio_buffer_data_available(struct iio_buffer *buf)
43 {
44 	return buf->access->data_available(buf);
45 }
46 
47 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
48 				   struct iio_buffer *buf, size_t required)
49 {
50 	if (!indio_dev->info->hwfifo_flush_to_buffer)
51 		return -ENODEV;
52 
53 	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
54 }
55 
56 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
57 			     size_t to_wait, int to_flush)
58 {
59 	size_t avail;
60 	int flushed = 0;
61 
62 	/* wakeup if the device was unregistered */
63 	if (!indio_dev->info)
64 		return true;
65 
66 	/* drain the buffer if it was disabled */
67 	if (!iio_buffer_is_active(buf)) {
68 		to_wait = min_t(size_t, to_wait, 1);
69 		to_flush = 0;
70 	}
71 
72 	avail = iio_buffer_data_available(buf);
73 
74 	if (avail >= to_wait) {
75 		/* force a flush for non-blocking reads */
76 		if (!to_wait && avail < to_flush)
77 			iio_buffer_flush_hwfifo(indio_dev, buf,
78 						to_flush - avail);
79 		return true;
80 	}
81 
82 	if (to_flush)
83 		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
84 						  to_wait - avail);
85 	if (flushed <= 0)
86 		return false;
87 
88 	if (avail + flushed >= to_wait)
89 		return true;
90 
91 	return false;
92 }
93 
94 /**
95  * iio_buffer_read() - chrdev read for buffer access
96  * @filp:	File structure pointer for the char device
97  * @buf:	Destination buffer for iio buffer read
98  * @n:		First n bytes to read
99  * @f_ps:	Long offset provided by the user as a seek position
100  *
101  * This function relies on all buffer implementations having an
102  * iio_buffer as their first element.
103  *
104  * Return: negative values corresponding to error codes or ret != 0
105  *	   for ending the reading activity
106  **/
107 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
108 			       size_t n, loff_t *f_ps)
109 {
110 	struct iio_dev_buffer_pair *ib = filp->private_data;
111 	struct iio_buffer *rb = ib->buffer;
112 	struct iio_dev *indio_dev = ib->indio_dev;
113 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
114 	size_t datum_size;
115 	size_t to_wait;
116 	int ret = 0;
117 
118 	if (!indio_dev->info)
119 		return -ENODEV;
120 
121 	if (!rb || !rb->access->read)
122 		return -EINVAL;
123 
124 	if (rb->direction != IIO_BUFFER_DIRECTION_IN)
125 		return -EPERM;
126 
127 	datum_size = rb->bytes_per_datum;
128 
129 	/*
130 	 * If datum_size is 0 there will never be anything to read from the
131 	 * buffer, so signal end of file now.
132 	 */
133 	if (!datum_size)
134 		return 0;
135 
136 	if (filp->f_flags & O_NONBLOCK)
137 		to_wait = 0;
138 	else
139 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
140 
141 	add_wait_queue(&rb->pollq, &wait);
142 	do {
143 		if (!indio_dev->info) {
144 			ret = -ENODEV;
145 			break;
146 		}
147 
148 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
149 			if (signal_pending(current)) {
150 				ret = -ERESTARTSYS;
151 				break;
152 			}
153 
154 			wait_woken(&wait, TASK_INTERRUPTIBLE,
155 				   MAX_SCHEDULE_TIMEOUT);
156 			continue;
157 		}
158 
159 		ret = rb->access->read(rb, n, buf);
160 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
161 			ret = -EAGAIN;
162 	} while (ret == 0);
163 	remove_wait_queue(&rb->pollq, &wait);
164 
165 	return ret;
166 }
167 
168 static size_t iio_buffer_space_available(struct iio_buffer *buf)
169 {
170 	if (buf->access->space_available)
171 		return buf->access->space_available(buf);
172 
173 	return SIZE_MAX;
174 }
175 
176 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
177 				size_t n, loff_t *f_ps)
178 {
179 	struct iio_dev_buffer_pair *ib = filp->private_data;
180 	struct iio_buffer *rb = ib->buffer;
181 	struct iio_dev *indio_dev = ib->indio_dev;
182 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
183 	int ret = 0;
184 	size_t written;
185 
186 	if (!indio_dev->info)
187 		return -ENODEV;
188 
189 	if (!rb || !rb->access->write)
190 		return -EINVAL;
191 
192 	if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
193 		return -EPERM;
194 
195 	written = 0;
196 	add_wait_queue(&rb->pollq, &wait);
197 	do {
198 		if (!indio_dev->info)
199 			return -ENODEV;
200 
201 		if (!iio_buffer_space_available(rb)) {
202 			if (signal_pending(current)) {
203 				ret = -ERESTARTSYS;
204 				break;
205 			}
206 
207 			if (filp->f_flags & O_NONBLOCK) {
208 				if (!written)
209 					ret = -EAGAIN;
210 				break;
211 			}
212 
213 			wait_woken(&wait, TASK_INTERRUPTIBLE,
214 				   MAX_SCHEDULE_TIMEOUT);
215 			continue;
216 		}
217 
218 		ret = rb->access->write(rb, n - written, buf + written);
219 		if (ret < 0)
220 			break;
221 
222 		written += ret;
223 
224 	} while (written != n);
225 	remove_wait_queue(&rb->pollq, &wait);
226 
227 	return ret < 0 ? ret : written;
228 }
229 
230 /**
231  * iio_buffer_poll() - poll the buffer to find out if it has data
232  * @filp:	File structure pointer for device access
233  * @wait:	Poll table structure pointer for which the driver adds
234  *		a wait queue
235  *
236  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
237  *	   or 0 for other cases
238  */
239 static __poll_t iio_buffer_poll(struct file *filp,
240 				struct poll_table_struct *wait)
241 {
242 	struct iio_dev_buffer_pair *ib = filp->private_data;
243 	struct iio_buffer *rb = ib->buffer;
244 	struct iio_dev *indio_dev = ib->indio_dev;
245 
246 	if (!indio_dev->info || !rb)
247 		return 0;
248 
249 	poll_wait(filp, &rb->pollq, wait);
250 
251 	switch (rb->direction) {
252 	case IIO_BUFFER_DIRECTION_IN:
253 		if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
254 			return EPOLLIN | EPOLLRDNORM;
255 		break;
256 	case IIO_BUFFER_DIRECTION_OUT:
257 		if (iio_buffer_space_available(rb))
258 			return EPOLLOUT | EPOLLWRNORM;
259 		break;
260 	}
261 
262 	return 0;
263 }
264 
265 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
266 				size_t n, loff_t *f_ps)
267 {
268 	struct iio_dev_buffer_pair *ib = filp->private_data;
269 	struct iio_buffer *rb = ib->buffer;
270 
271 	/* check if buffer was opened through new API */
272 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
273 		return -EBUSY;
274 
275 	return iio_buffer_read(filp, buf, n, f_ps);
276 }
277 
278 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
279 				 size_t n, loff_t *f_ps)
280 {
281 	struct iio_dev_buffer_pair *ib = filp->private_data;
282 	struct iio_buffer *rb = ib->buffer;
283 
284 	/* check if buffer was opened through new API */
285 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
286 		return -EBUSY;
287 
288 	return iio_buffer_write(filp, buf, n, f_ps);
289 }
290 
291 __poll_t iio_buffer_poll_wrapper(struct file *filp,
292 				 struct poll_table_struct *wait)
293 {
294 	struct iio_dev_buffer_pair *ib = filp->private_data;
295 	struct iio_buffer *rb = ib->buffer;
296 
297 	/* check if buffer was opened through new API */
298 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
299 		return 0;
300 
301 	return iio_buffer_poll(filp, wait);
302 }
303 
304 /**
305  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
306  * @indio_dev: The IIO device
307  *
308  * Wakes up the event waitqueue used for poll(). Should usually
309  * be called when the device is unregistered.
310  */
311 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
312 {
313 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
314 	struct iio_buffer *buffer;
315 	unsigned int i;
316 
317 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
318 		buffer = iio_dev_opaque->attached_buffers[i];
319 		wake_up(&buffer->pollq);
320 	}
321 }
322 
323 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
324 {
325 	if (!buffer || !buffer->access || !buffer->access->remove_from)
326 		return -EINVAL;
327 
328 	return buffer->access->remove_from(buffer, data);
329 }
330 EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
331 
332 void iio_buffer_init(struct iio_buffer *buffer)
333 {
334 	INIT_LIST_HEAD(&buffer->demux_list);
335 	INIT_LIST_HEAD(&buffer->buffer_list);
336 	init_waitqueue_head(&buffer->pollq);
337 	kref_init(&buffer->ref);
338 	if (!buffer->watermark)
339 		buffer->watermark = 1;
340 }
341 EXPORT_SYMBOL(iio_buffer_init);
342 
343 void iio_device_detach_buffers(struct iio_dev *indio_dev)
344 {
345 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
346 	struct iio_buffer *buffer;
347 	unsigned int i;
348 
349 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
350 		buffer = iio_dev_opaque->attached_buffers[i];
351 		iio_buffer_put(buffer);
352 	}
353 
354 	kfree(iio_dev_opaque->attached_buffers);
355 }
356 
357 static ssize_t iio_show_scan_index(struct device *dev,
358 				   struct device_attribute *attr,
359 				   char *buf)
360 {
361 	return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
362 }
363 
364 static ssize_t iio_show_fixed_type(struct device *dev,
365 				   struct device_attribute *attr,
366 				   char *buf)
367 {
368 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
369 	u8 type = this_attr->c->scan_type.endianness;
370 
371 	if (type == IIO_CPU) {
372 #ifdef __LITTLE_ENDIAN
373 		type = IIO_LE;
374 #else
375 		type = IIO_BE;
376 #endif
377 	}
378 	if (this_attr->c->scan_type.repeat > 1)
379 		return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
380 		       iio_endian_prefix[type],
381 		       this_attr->c->scan_type.sign,
382 		       this_attr->c->scan_type.realbits,
383 		       this_attr->c->scan_type.storagebits,
384 		       this_attr->c->scan_type.repeat,
385 		       this_attr->c->scan_type.shift);
386 	else
387 		return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
388 		       iio_endian_prefix[type],
389 		       this_attr->c->scan_type.sign,
390 		       this_attr->c->scan_type.realbits,
391 		       this_attr->c->scan_type.storagebits,
392 		       this_attr->c->scan_type.shift);
393 }
394 
395 static ssize_t iio_scan_el_show(struct device *dev,
396 				struct device_attribute *attr,
397 				char *buf)
398 {
399 	int ret;
400 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
401 
402 	/* Ensure ret is 0 or 1. */
403 	ret = !!test_bit(to_iio_dev_attr(attr)->address,
404 		       buffer->scan_mask);
405 
406 	return sysfs_emit(buf, "%d\n", ret);
407 }
408 
409 /* Note NULL used as error indicator as it doesn't make sense. */
410 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
411 						unsigned int masklength,
412 						const unsigned long *mask,
413 						bool strict)
414 {
415 	if (bitmap_empty(mask, masklength))
416 		return NULL;
417 	/*
418 	 * The condition here do not handle multi-long masks correctly.
419 	 * It only checks the first long to be zero, and will use such mask
420 	 * as a terminator even if there was bits set after the first long.
421 	 *
422 	 * Correct check would require using:
423 	 * while (!bitmap_empty(av_masks, masklength))
424 	 * instead. This is potentially hazardous because the
425 	 * avaliable_scan_masks is a zero terminated array of longs - and
426 	 * using the proper bitmap_empty() check for multi-long wide masks
427 	 * would require the array to be terminated with multiple zero longs -
428 	 * which is not such an usual pattern.
429 	 *
430 	 * As writing of this no multi-long wide masks were found in-tree, so
431 	 * the simple while (*av_masks) check is working.
432 	 */
433 	while (*av_masks) {
434 		if (strict) {
435 			if (bitmap_equal(mask, av_masks, masklength))
436 				return av_masks;
437 		} else {
438 			if (bitmap_subset(mask, av_masks, masklength))
439 				return av_masks;
440 		}
441 		av_masks += BITS_TO_LONGS(masklength);
442 	}
443 	return NULL;
444 }
445 
446 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
447 				   const unsigned long *mask)
448 {
449 	if (!indio_dev->setup_ops->validate_scan_mask)
450 		return true;
451 
452 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
453 }
454 
455 /**
456  * iio_scan_mask_set() - set particular bit in the scan mask
457  * @indio_dev: the iio device
458  * @buffer: the buffer whose scan mask we are interested in
459  * @bit: the bit to be set.
460  *
461  * Note that at this point we have no way of knowing what other
462  * buffers might request, hence this code only verifies that the
463  * individual buffers request is plausible.
464  */
465 static int iio_scan_mask_set(struct iio_dev *indio_dev,
466 			     struct iio_buffer *buffer, int bit)
467 {
468 	const unsigned long *mask;
469 	unsigned long *trialmask;
470 
471 	if (!indio_dev->masklength) {
472 		WARN(1, "Trying to set scanmask prior to registering buffer\n");
473 		return -EINVAL;
474 	}
475 
476 	trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
477 	if (!trialmask)
478 		return -ENOMEM;
479 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
480 	set_bit(bit, trialmask);
481 
482 	if (!iio_validate_scan_mask(indio_dev, trialmask))
483 		goto err_invalid_mask;
484 
485 	if (indio_dev->available_scan_masks) {
486 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
487 					   indio_dev->masklength,
488 					   trialmask, false);
489 		if (!mask)
490 			goto err_invalid_mask;
491 	}
492 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
493 
494 	bitmap_free(trialmask);
495 
496 	return 0;
497 
498 err_invalid_mask:
499 	bitmap_free(trialmask);
500 	return -EINVAL;
501 }
502 
503 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
504 {
505 	clear_bit(bit, buffer->scan_mask);
506 	return 0;
507 }
508 
509 static int iio_scan_mask_query(struct iio_dev *indio_dev,
510 			       struct iio_buffer *buffer, int bit)
511 {
512 	if (bit > indio_dev->masklength)
513 		return -EINVAL;
514 
515 	if (!buffer->scan_mask)
516 		return 0;
517 
518 	/* Ensure return value is 0 or 1. */
519 	return !!test_bit(bit, buffer->scan_mask);
520 };
521 
522 static ssize_t iio_scan_el_store(struct device *dev,
523 				 struct device_attribute *attr,
524 				 const char *buf,
525 				 size_t len)
526 {
527 	int ret;
528 	bool state;
529 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
530 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
531 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
532 	struct iio_buffer *buffer = this_attr->buffer;
533 
534 	ret = kstrtobool(buf, &state);
535 	if (ret < 0)
536 		return ret;
537 
538 	guard(mutex)(&iio_dev_opaque->mlock);
539 	if (iio_buffer_is_active(buffer))
540 		return -EBUSY;
541 
542 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
543 	if (ret < 0)
544 		return ret;
545 
546 	if (state && ret)
547 		return len;
548 
549 	if (state)
550 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
551 	else
552 		ret = iio_scan_mask_clear(buffer, this_attr->address);
553 	if (ret)
554 		return ret;
555 
556 	return len;
557 }
558 
559 static ssize_t iio_scan_el_ts_show(struct device *dev,
560 				   struct device_attribute *attr,
561 				   char *buf)
562 {
563 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
564 
565 	return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
566 }
567 
568 static ssize_t iio_scan_el_ts_store(struct device *dev,
569 				    struct device_attribute *attr,
570 				    const char *buf,
571 				    size_t len)
572 {
573 	int ret;
574 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
575 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
576 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
577 	bool state;
578 
579 	ret = kstrtobool(buf, &state);
580 	if (ret < 0)
581 		return ret;
582 
583 	guard(mutex)(&iio_dev_opaque->mlock);
584 	if (iio_buffer_is_active(buffer))
585 		return -EBUSY;
586 
587 	buffer->scan_timestamp = state;
588 
589 	return len;
590 }
591 
592 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
593 					struct iio_buffer *buffer,
594 					const struct iio_chan_spec *chan)
595 {
596 	int ret, attrcount = 0;
597 
598 	ret = __iio_add_chan_devattr("index",
599 				     chan,
600 				     &iio_show_scan_index,
601 				     NULL,
602 				     0,
603 				     IIO_SEPARATE,
604 				     &indio_dev->dev,
605 				     buffer,
606 				     &buffer->buffer_attr_list);
607 	if (ret)
608 		return ret;
609 	attrcount++;
610 	ret = __iio_add_chan_devattr("type",
611 				     chan,
612 				     &iio_show_fixed_type,
613 				     NULL,
614 				     0,
615 				     IIO_SEPARATE,
616 				     &indio_dev->dev,
617 				     buffer,
618 				     &buffer->buffer_attr_list);
619 	if (ret)
620 		return ret;
621 	attrcount++;
622 	if (chan->type != IIO_TIMESTAMP)
623 		ret = __iio_add_chan_devattr("en",
624 					     chan,
625 					     &iio_scan_el_show,
626 					     &iio_scan_el_store,
627 					     chan->scan_index,
628 					     IIO_SEPARATE,
629 					     &indio_dev->dev,
630 					     buffer,
631 					     &buffer->buffer_attr_list);
632 	else
633 		ret = __iio_add_chan_devattr("en",
634 					     chan,
635 					     &iio_scan_el_ts_show,
636 					     &iio_scan_el_ts_store,
637 					     chan->scan_index,
638 					     IIO_SEPARATE,
639 					     &indio_dev->dev,
640 					     buffer,
641 					     &buffer->buffer_attr_list);
642 	if (ret)
643 		return ret;
644 	attrcount++;
645 	ret = attrcount;
646 	return ret;
647 }
648 
649 static ssize_t length_show(struct device *dev, struct device_attribute *attr,
650 			   char *buf)
651 {
652 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
653 
654 	return sysfs_emit(buf, "%d\n", buffer->length);
655 }
656 
657 static ssize_t length_store(struct device *dev, struct device_attribute *attr,
658 			    const char *buf, size_t len)
659 {
660 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
661 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
662 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
663 	unsigned int val;
664 	int ret;
665 
666 	ret = kstrtouint(buf, 10, &val);
667 	if (ret)
668 		return ret;
669 
670 	if (val == buffer->length)
671 		return len;
672 
673 	guard(mutex)(&iio_dev_opaque->mlock);
674 	if (iio_buffer_is_active(buffer))
675 		return -EBUSY;
676 
677 	buffer->access->set_length(buffer, val);
678 
679 	if (buffer->length && buffer->length < buffer->watermark)
680 		buffer->watermark = buffer->length;
681 
682 	return len;
683 }
684 
685 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
686 			   char *buf)
687 {
688 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
689 
690 	return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
691 }
692 
693 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
694 					     unsigned int scan_index)
695 {
696 	const struct iio_chan_spec *ch;
697 	unsigned int bytes;
698 
699 	ch = iio_find_channel_from_si(indio_dev, scan_index);
700 	bytes = ch->scan_type.storagebits / 8;
701 	if (ch->scan_type.repeat > 1)
702 		bytes *= ch->scan_type.repeat;
703 	return bytes;
704 }
705 
706 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
707 {
708 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
709 
710 	return iio_storage_bytes_for_si(indio_dev,
711 					iio_dev_opaque->scan_index_timestamp);
712 }
713 
714 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
715 				  const unsigned long *mask, bool timestamp)
716 {
717 	unsigned int bytes = 0;
718 	int length, i, largest = 0;
719 
720 	/* How much space will the demuxed element take? */
721 	for_each_set_bit(i, mask,
722 			 indio_dev->masklength) {
723 		length = iio_storage_bytes_for_si(indio_dev, i);
724 		bytes = ALIGN(bytes, length);
725 		bytes += length;
726 		largest = max(largest, length);
727 	}
728 
729 	if (timestamp) {
730 		length = iio_storage_bytes_for_timestamp(indio_dev);
731 		bytes = ALIGN(bytes, length);
732 		bytes += length;
733 		largest = max(largest, length);
734 	}
735 
736 	bytes = ALIGN(bytes, largest);
737 	return bytes;
738 }
739 
740 static void iio_buffer_activate(struct iio_dev *indio_dev,
741 				struct iio_buffer *buffer)
742 {
743 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
744 
745 	iio_buffer_get(buffer);
746 	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
747 }
748 
749 static void iio_buffer_deactivate(struct iio_buffer *buffer)
750 {
751 	list_del_init(&buffer->buffer_list);
752 	wake_up_interruptible(&buffer->pollq);
753 	iio_buffer_put(buffer);
754 }
755 
756 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
757 {
758 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
759 	struct iio_buffer *buffer, *_buffer;
760 
761 	list_for_each_entry_safe(buffer, _buffer,
762 				 &iio_dev_opaque->buffer_list, buffer_list)
763 		iio_buffer_deactivate(buffer);
764 }
765 
766 static int iio_buffer_enable(struct iio_buffer *buffer,
767 			     struct iio_dev *indio_dev)
768 {
769 	if (!buffer->access->enable)
770 		return 0;
771 	return buffer->access->enable(buffer, indio_dev);
772 }
773 
774 static int iio_buffer_disable(struct iio_buffer *buffer,
775 			      struct iio_dev *indio_dev)
776 {
777 	if (!buffer->access->disable)
778 		return 0;
779 	return buffer->access->disable(buffer, indio_dev);
780 }
781 
782 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
783 					      struct iio_buffer *buffer)
784 {
785 	unsigned int bytes;
786 
787 	if (!buffer->access->set_bytes_per_datum)
788 		return;
789 
790 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
791 				       buffer->scan_timestamp);
792 
793 	buffer->access->set_bytes_per_datum(buffer, bytes);
794 }
795 
796 static int iio_buffer_request_update(struct iio_dev *indio_dev,
797 				     struct iio_buffer *buffer)
798 {
799 	int ret;
800 
801 	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
802 	if (buffer->access->request_update) {
803 		ret = buffer->access->request_update(buffer);
804 		if (ret) {
805 			dev_dbg(&indio_dev->dev,
806 				"Buffer not started: buffer parameter update failed (%d)\n",
807 				ret);
808 			return ret;
809 		}
810 	}
811 
812 	return 0;
813 }
814 
815 static void iio_free_scan_mask(struct iio_dev *indio_dev,
816 			       const unsigned long *mask)
817 {
818 	/* If the mask is dynamically allocated free it, otherwise do nothing */
819 	if (!indio_dev->available_scan_masks)
820 		bitmap_free(mask);
821 }
822 
823 struct iio_device_config {
824 	unsigned int mode;
825 	unsigned int watermark;
826 	const unsigned long *scan_mask;
827 	unsigned int scan_bytes;
828 	bool scan_timestamp;
829 };
830 
831 static int iio_verify_update(struct iio_dev *indio_dev,
832 			     struct iio_buffer *insert_buffer,
833 			     struct iio_buffer *remove_buffer,
834 			     struct iio_device_config *config)
835 {
836 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
837 	unsigned long *compound_mask;
838 	const unsigned long *scan_mask;
839 	bool strict_scanmask = false;
840 	struct iio_buffer *buffer;
841 	bool scan_timestamp;
842 	unsigned int modes;
843 
844 	if (insert_buffer &&
845 	    bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
846 		dev_dbg(&indio_dev->dev,
847 			"At least one scan element must be enabled first\n");
848 		return -EINVAL;
849 	}
850 
851 	memset(config, 0, sizeof(*config));
852 	config->watermark = ~0;
853 
854 	/*
855 	 * If there is just one buffer and we are removing it there is nothing
856 	 * to verify.
857 	 */
858 	if (remove_buffer && !insert_buffer &&
859 	    list_is_singular(&iio_dev_opaque->buffer_list))
860 		return 0;
861 
862 	modes = indio_dev->modes;
863 
864 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
865 		if (buffer == remove_buffer)
866 			continue;
867 		modes &= buffer->access->modes;
868 		config->watermark = min(config->watermark, buffer->watermark);
869 	}
870 
871 	if (insert_buffer) {
872 		modes &= insert_buffer->access->modes;
873 		config->watermark = min(config->watermark,
874 					insert_buffer->watermark);
875 	}
876 
877 	/* Definitely possible for devices to support both of these. */
878 	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
879 		config->mode = INDIO_BUFFER_TRIGGERED;
880 	} else if (modes & INDIO_BUFFER_HARDWARE) {
881 		/*
882 		 * Keep things simple for now and only allow a single buffer to
883 		 * be connected in hardware mode.
884 		 */
885 		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
886 			return -EINVAL;
887 		config->mode = INDIO_BUFFER_HARDWARE;
888 		strict_scanmask = true;
889 	} else if (modes & INDIO_BUFFER_SOFTWARE) {
890 		config->mode = INDIO_BUFFER_SOFTWARE;
891 	} else {
892 		/* Can only occur on first buffer */
893 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
894 			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
895 		return -EINVAL;
896 	}
897 
898 	/* What scan mask do we actually have? */
899 	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
900 	if (!compound_mask)
901 		return -ENOMEM;
902 
903 	scan_timestamp = false;
904 
905 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
906 		if (buffer == remove_buffer)
907 			continue;
908 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
909 			  indio_dev->masklength);
910 		scan_timestamp |= buffer->scan_timestamp;
911 	}
912 
913 	if (insert_buffer) {
914 		bitmap_or(compound_mask, compound_mask,
915 			  insert_buffer->scan_mask, indio_dev->masklength);
916 		scan_timestamp |= insert_buffer->scan_timestamp;
917 	}
918 
919 	if (indio_dev->available_scan_masks) {
920 		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
921 						indio_dev->masklength,
922 						compound_mask,
923 						strict_scanmask);
924 		bitmap_free(compound_mask);
925 		if (!scan_mask)
926 			return -EINVAL;
927 	} else {
928 		scan_mask = compound_mask;
929 	}
930 
931 	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
932 						    scan_mask, scan_timestamp);
933 	config->scan_mask = scan_mask;
934 	config->scan_timestamp = scan_timestamp;
935 
936 	return 0;
937 }
938 
939 /**
940  * struct iio_demux_table - table describing demux memcpy ops
941  * @from:	index to copy from
942  * @to:		index to copy to
943  * @length:	how many bytes to copy
944  * @l:		list head used for management
945  */
946 struct iio_demux_table {
947 	unsigned int from;
948 	unsigned int to;
949 	unsigned int length;
950 	struct list_head l;
951 };
952 
953 static void iio_buffer_demux_free(struct iio_buffer *buffer)
954 {
955 	struct iio_demux_table *p, *q;
956 
957 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
958 		list_del(&p->l);
959 		kfree(p);
960 	}
961 }
962 
963 static int iio_buffer_add_demux(struct iio_buffer *buffer,
964 				struct iio_demux_table **p, unsigned int in_loc,
965 				unsigned int out_loc,
966 				unsigned int length)
967 {
968 	if (*p && (*p)->from + (*p)->length == in_loc &&
969 	    (*p)->to + (*p)->length == out_loc) {
970 		(*p)->length += length;
971 	} else {
972 		*p = kmalloc(sizeof(**p), GFP_KERNEL);
973 		if (!(*p))
974 			return -ENOMEM;
975 		(*p)->from = in_loc;
976 		(*p)->to = out_loc;
977 		(*p)->length = length;
978 		list_add_tail(&(*p)->l, &buffer->demux_list);
979 	}
980 
981 	return 0;
982 }
983 
984 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
985 				   struct iio_buffer *buffer)
986 {
987 	int ret, in_ind = -1, out_ind, length;
988 	unsigned int in_loc = 0, out_loc = 0;
989 	struct iio_demux_table *p = NULL;
990 
991 	/* Clear out any old demux */
992 	iio_buffer_demux_free(buffer);
993 	kfree(buffer->demux_bounce);
994 	buffer->demux_bounce = NULL;
995 
996 	/* First work out which scan mode we will actually have */
997 	if (bitmap_equal(indio_dev->active_scan_mask,
998 			 buffer->scan_mask,
999 			 indio_dev->masklength))
1000 		return 0;
1001 
1002 	/* Now we have the two masks, work from least sig and build up sizes */
1003 	for_each_set_bit(out_ind,
1004 			 buffer->scan_mask,
1005 			 indio_dev->masklength) {
1006 		in_ind = find_next_bit(indio_dev->active_scan_mask,
1007 				       indio_dev->masklength,
1008 				       in_ind + 1);
1009 		while (in_ind != out_ind) {
1010 			length = iio_storage_bytes_for_si(indio_dev, in_ind);
1011 			/* Make sure we are aligned */
1012 			in_loc = roundup(in_loc, length) + length;
1013 			in_ind = find_next_bit(indio_dev->active_scan_mask,
1014 					       indio_dev->masklength,
1015 					       in_ind + 1);
1016 		}
1017 		length = iio_storage_bytes_for_si(indio_dev, in_ind);
1018 		out_loc = roundup(out_loc, length);
1019 		in_loc = roundup(in_loc, length);
1020 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1021 		if (ret)
1022 			goto error_clear_mux_table;
1023 		out_loc += length;
1024 		in_loc += length;
1025 	}
1026 	/* Relies on scan_timestamp being last */
1027 	if (buffer->scan_timestamp) {
1028 		length = iio_storage_bytes_for_timestamp(indio_dev);
1029 		out_loc = roundup(out_loc, length);
1030 		in_loc = roundup(in_loc, length);
1031 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1032 		if (ret)
1033 			goto error_clear_mux_table;
1034 		out_loc += length;
1035 	}
1036 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1037 	if (!buffer->demux_bounce) {
1038 		ret = -ENOMEM;
1039 		goto error_clear_mux_table;
1040 	}
1041 	return 0;
1042 
1043 error_clear_mux_table:
1044 	iio_buffer_demux_free(buffer);
1045 
1046 	return ret;
1047 }
1048 
1049 static int iio_update_demux(struct iio_dev *indio_dev)
1050 {
1051 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1052 	struct iio_buffer *buffer;
1053 	int ret;
1054 
1055 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1056 		ret = iio_buffer_update_demux(indio_dev, buffer);
1057 		if (ret < 0)
1058 			goto error_clear_mux_table;
1059 	}
1060 	return 0;
1061 
1062 error_clear_mux_table:
1063 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1064 		iio_buffer_demux_free(buffer);
1065 
1066 	return ret;
1067 }
1068 
1069 static int iio_enable_buffers(struct iio_dev *indio_dev,
1070 			      struct iio_device_config *config)
1071 {
1072 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1073 	struct iio_buffer *buffer, *tmp = NULL;
1074 	int ret;
1075 
1076 	indio_dev->active_scan_mask = config->scan_mask;
1077 	indio_dev->scan_timestamp = config->scan_timestamp;
1078 	indio_dev->scan_bytes = config->scan_bytes;
1079 	iio_dev_opaque->currentmode = config->mode;
1080 
1081 	iio_update_demux(indio_dev);
1082 
1083 	/* Wind up again */
1084 	if (indio_dev->setup_ops->preenable) {
1085 		ret = indio_dev->setup_ops->preenable(indio_dev);
1086 		if (ret) {
1087 			dev_dbg(&indio_dev->dev,
1088 				"Buffer not started: buffer preenable failed (%d)\n", ret);
1089 			goto err_undo_config;
1090 		}
1091 	}
1092 
1093 	if (indio_dev->info->update_scan_mode) {
1094 		ret = indio_dev->info
1095 			->update_scan_mode(indio_dev,
1096 					   indio_dev->active_scan_mask);
1097 		if (ret < 0) {
1098 			dev_dbg(&indio_dev->dev,
1099 				"Buffer not started: update scan mode failed (%d)\n",
1100 				ret);
1101 			goto err_run_postdisable;
1102 		}
1103 	}
1104 
1105 	if (indio_dev->info->hwfifo_set_watermark)
1106 		indio_dev->info->hwfifo_set_watermark(indio_dev,
1107 			config->watermark);
1108 
1109 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1110 		ret = iio_buffer_enable(buffer, indio_dev);
1111 		if (ret) {
1112 			tmp = buffer;
1113 			goto err_disable_buffers;
1114 		}
1115 	}
1116 
1117 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1118 		ret = iio_trigger_attach_poll_func(indio_dev->trig,
1119 						   indio_dev->pollfunc);
1120 		if (ret)
1121 			goto err_disable_buffers;
1122 	}
1123 
1124 	if (indio_dev->setup_ops->postenable) {
1125 		ret = indio_dev->setup_ops->postenable(indio_dev);
1126 		if (ret) {
1127 			dev_dbg(&indio_dev->dev,
1128 				"Buffer not started: postenable failed (%d)\n", ret);
1129 			goto err_detach_pollfunc;
1130 		}
1131 	}
1132 
1133 	return 0;
1134 
1135 err_detach_pollfunc:
1136 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1137 		iio_trigger_detach_poll_func(indio_dev->trig,
1138 					     indio_dev->pollfunc);
1139 	}
1140 err_disable_buffers:
1141 	buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
1142 	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1143 					     buffer_list)
1144 		iio_buffer_disable(buffer, indio_dev);
1145 err_run_postdisable:
1146 	if (indio_dev->setup_ops->postdisable)
1147 		indio_dev->setup_ops->postdisable(indio_dev);
1148 err_undo_config:
1149 	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1150 	indio_dev->active_scan_mask = NULL;
1151 
1152 	return ret;
1153 }
1154 
1155 static int iio_disable_buffers(struct iio_dev *indio_dev)
1156 {
1157 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1158 	struct iio_buffer *buffer;
1159 	int ret = 0;
1160 	int ret2;
1161 
1162 	/* Wind down existing buffers - iff there are any */
1163 	if (list_empty(&iio_dev_opaque->buffer_list))
1164 		return 0;
1165 
1166 	/*
1167 	 * If things go wrong at some step in disable we still need to continue
1168 	 * to perform the other steps, otherwise we leave the device in a
1169 	 * inconsistent state. We return the error code for the first error we
1170 	 * encountered.
1171 	 */
1172 
1173 	if (indio_dev->setup_ops->predisable) {
1174 		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1175 		if (ret2 && !ret)
1176 			ret = ret2;
1177 	}
1178 
1179 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1180 		iio_trigger_detach_poll_func(indio_dev->trig,
1181 					     indio_dev->pollfunc);
1182 	}
1183 
1184 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1185 		ret2 = iio_buffer_disable(buffer, indio_dev);
1186 		if (ret2 && !ret)
1187 			ret = ret2;
1188 	}
1189 
1190 	if (indio_dev->setup_ops->postdisable) {
1191 		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1192 		if (ret2 && !ret)
1193 			ret = ret2;
1194 	}
1195 
1196 	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1197 	indio_dev->active_scan_mask = NULL;
1198 	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1199 
1200 	return ret;
1201 }
1202 
1203 static int __iio_update_buffers(struct iio_dev *indio_dev,
1204 				struct iio_buffer *insert_buffer,
1205 				struct iio_buffer *remove_buffer)
1206 {
1207 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1208 	struct iio_device_config new_config;
1209 	int ret;
1210 
1211 	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1212 				&new_config);
1213 	if (ret)
1214 		return ret;
1215 
1216 	if (insert_buffer) {
1217 		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1218 		if (ret)
1219 			goto err_free_config;
1220 	}
1221 
1222 	ret = iio_disable_buffers(indio_dev);
1223 	if (ret)
1224 		goto err_deactivate_all;
1225 
1226 	if (remove_buffer)
1227 		iio_buffer_deactivate(remove_buffer);
1228 	if (insert_buffer)
1229 		iio_buffer_activate(indio_dev, insert_buffer);
1230 
1231 	/* If no buffers in list, we are done */
1232 	if (list_empty(&iio_dev_opaque->buffer_list))
1233 		return 0;
1234 
1235 	ret = iio_enable_buffers(indio_dev, &new_config);
1236 	if (ret)
1237 		goto err_deactivate_all;
1238 
1239 	return 0;
1240 
1241 err_deactivate_all:
1242 	/*
1243 	 * We've already verified that the config is valid earlier. If things go
1244 	 * wrong in either enable or disable the most likely reason is an IO
1245 	 * error from the device. In this case there is no good recovery
1246 	 * strategy. Just make sure to disable everything and leave the device
1247 	 * in a sane state.  With a bit of luck the device might come back to
1248 	 * life again later and userspace can try again.
1249 	 */
1250 	iio_buffer_deactivate_all(indio_dev);
1251 
1252 err_free_config:
1253 	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1254 	return ret;
1255 }
1256 
1257 int iio_update_buffers(struct iio_dev *indio_dev,
1258 		       struct iio_buffer *insert_buffer,
1259 		       struct iio_buffer *remove_buffer)
1260 {
1261 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1262 
1263 	if (insert_buffer == remove_buffer)
1264 		return 0;
1265 
1266 	if (insert_buffer &&
1267 	    insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
1268 		return -EINVAL;
1269 
1270 	guard(mutex)(&iio_dev_opaque->info_exist_lock);
1271 	guard(mutex)(&iio_dev_opaque->mlock);
1272 
1273 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1274 		insert_buffer = NULL;
1275 
1276 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1277 		remove_buffer = NULL;
1278 
1279 	if (!insert_buffer && !remove_buffer)
1280 		return 0;
1281 
1282 	if (!indio_dev->info)
1283 		return -ENODEV;
1284 
1285 	return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1286 }
1287 EXPORT_SYMBOL_GPL(iio_update_buffers);
1288 
1289 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1290 {
1291 	iio_disable_buffers(indio_dev);
1292 	iio_buffer_deactivate_all(indio_dev);
1293 }
1294 
1295 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
1296 			    const char *buf, size_t len)
1297 {
1298 	int ret;
1299 	bool requested_state;
1300 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1301 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1302 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1303 	bool inlist;
1304 
1305 	ret = kstrtobool(buf, &requested_state);
1306 	if (ret < 0)
1307 		return ret;
1308 
1309 	guard(mutex)(&iio_dev_opaque->mlock);
1310 
1311 	/* Find out if it is in the list */
1312 	inlist = iio_buffer_is_active(buffer);
1313 	/* Already in desired state */
1314 	if (inlist == requested_state)
1315 		return len;
1316 
1317 	if (requested_state)
1318 		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1319 	else
1320 		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1321 	if (ret)
1322 		return ret;
1323 
1324 	return len;
1325 }
1326 
1327 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
1328 			      char *buf)
1329 {
1330 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1331 
1332 	return sysfs_emit(buf, "%u\n", buffer->watermark);
1333 }
1334 
1335 static ssize_t watermark_store(struct device *dev,
1336 			       struct device_attribute *attr,
1337 			       const char *buf, size_t len)
1338 {
1339 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1340 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1341 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1342 	unsigned int val;
1343 	int ret;
1344 
1345 	ret = kstrtouint(buf, 10, &val);
1346 	if (ret)
1347 		return ret;
1348 	if (!val)
1349 		return -EINVAL;
1350 
1351 	guard(mutex)(&iio_dev_opaque->mlock);
1352 
1353 	if (val > buffer->length)
1354 		return -EINVAL;
1355 
1356 	if (iio_buffer_is_active(buffer))
1357 		return -EBUSY;
1358 
1359 	buffer->watermark = val;
1360 
1361 	return len;
1362 }
1363 
1364 static ssize_t data_available_show(struct device *dev,
1365 				   struct device_attribute *attr, char *buf)
1366 {
1367 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1368 
1369 	return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1370 }
1371 
1372 static ssize_t direction_show(struct device *dev,
1373 			      struct device_attribute *attr,
1374 			      char *buf)
1375 {
1376 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1377 
1378 	switch (buffer->direction) {
1379 	case IIO_BUFFER_DIRECTION_IN:
1380 		return sysfs_emit(buf, "in\n");
1381 	case IIO_BUFFER_DIRECTION_OUT:
1382 		return sysfs_emit(buf, "out\n");
1383 	default:
1384 		return -EINVAL;
1385 	}
1386 }
1387 
1388 static DEVICE_ATTR_RW(length);
1389 static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
1390 static DEVICE_ATTR_RW(enable);
1391 static DEVICE_ATTR_RW(watermark);
1392 static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
1393 static DEVICE_ATTR_RO(data_available);
1394 static DEVICE_ATTR_RO(direction);
1395 
1396 /*
1397  * When adding new attributes here, put the at the end, at least until
1398  * the code that handles the length/length_ro & watermark/watermark_ro
1399  * assignments gets cleaned up. Otherwise these can create some weird
1400  * duplicate attributes errors under some setups.
1401  */
1402 static struct attribute *iio_buffer_attrs[] = {
1403 	&dev_attr_length.attr,
1404 	&dev_attr_enable.attr,
1405 	&dev_attr_watermark.attr,
1406 	&dev_attr_data_available.attr,
1407 	&dev_attr_direction.attr,
1408 };
1409 
1410 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1411 
1412 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1413 					      struct attribute *attr)
1414 {
1415 	struct device_attribute *dattr = to_dev_attr(attr);
1416 	struct iio_dev_attr *iio_attr;
1417 
1418 	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1419 	if (!iio_attr)
1420 		return NULL;
1421 
1422 	iio_attr->buffer = buffer;
1423 	memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1424 	iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1425 	if (!iio_attr->dev_attr.attr.name) {
1426 		kfree(iio_attr);
1427 		return NULL;
1428 	}
1429 
1430 	sysfs_attr_init(&iio_attr->dev_attr.attr);
1431 
1432 	list_add(&iio_attr->l, &buffer->buffer_attr_list);
1433 
1434 	return &iio_attr->dev_attr.attr;
1435 }
1436 
1437 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1438 						   struct attribute **buffer_attrs,
1439 						   int buffer_attrcount,
1440 						   int scan_el_attrcount)
1441 {
1442 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1443 	struct attribute_group *group;
1444 	struct attribute **attrs;
1445 	int ret;
1446 
1447 	attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1448 	if (!attrs)
1449 		return -ENOMEM;
1450 
1451 	memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1452 
1453 	group = &iio_dev_opaque->legacy_buffer_group;
1454 	group->attrs = attrs;
1455 	group->name = "buffer";
1456 
1457 	ret = iio_device_register_sysfs_group(indio_dev, group);
1458 	if (ret)
1459 		goto error_free_buffer_attrs;
1460 
1461 	attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1462 	if (!attrs) {
1463 		ret = -ENOMEM;
1464 		goto error_free_buffer_attrs;
1465 	}
1466 
1467 	memcpy(attrs, &buffer_attrs[buffer_attrcount],
1468 	       scan_el_attrcount * sizeof(*attrs));
1469 
1470 	group = &iio_dev_opaque->legacy_scan_el_group;
1471 	group->attrs = attrs;
1472 	group->name = "scan_elements";
1473 
1474 	ret = iio_device_register_sysfs_group(indio_dev, group);
1475 	if (ret)
1476 		goto error_free_scan_el_attrs;
1477 
1478 	return 0;
1479 
1480 error_free_scan_el_attrs:
1481 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1482 error_free_buffer_attrs:
1483 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1484 
1485 	return ret;
1486 }
1487 
1488 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1489 {
1490 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1491 
1492 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1493 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1494 }
1495 
1496 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1497 {
1498 	struct iio_dev_buffer_pair *ib = filep->private_data;
1499 	struct iio_dev *indio_dev = ib->indio_dev;
1500 	struct iio_buffer *buffer = ib->buffer;
1501 
1502 	wake_up(&buffer->pollq);
1503 
1504 	kfree(ib);
1505 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1506 	iio_device_put(indio_dev);
1507 
1508 	return 0;
1509 }
1510 
1511 static const struct file_operations iio_buffer_chrdev_fileops = {
1512 	.owner = THIS_MODULE,
1513 	.llseek = noop_llseek,
1514 	.read = iio_buffer_read,
1515 	.write = iio_buffer_write,
1516 	.poll = iio_buffer_poll,
1517 	.release = iio_buffer_chrdev_release,
1518 };
1519 
1520 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1521 {
1522 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1523 	int __user *ival = (int __user *)arg;
1524 	struct iio_dev_buffer_pair *ib;
1525 	struct iio_buffer *buffer;
1526 	int fd, idx, ret;
1527 
1528 	if (copy_from_user(&idx, ival, sizeof(idx)))
1529 		return -EFAULT;
1530 
1531 	if (idx >= iio_dev_opaque->attached_buffers_cnt)
1532 		return -ENODEV;
1533 
1534 	iio_device_get(indio_dev);
1535 
1536 	buffer = iio_dev_opaque->attached_buffers[idx];
1537 
1538 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1539 		ret = -EBUSY;
1540 		goto error_iio_dev_put;
1541 	}
1542 
1543 	ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1544 	if (!ib) {
1545 		ret = -ENOMEM;
1546 		goto error_clear_busy_bit;
1547 	}
1548 
1549 	ib->indio_dev = indio_dev;
1550 	ib->buffer = buffer;
1551 
1552 	fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1553 			      ib, O_RDWR | O_CLOEXEC);
1554 	if (fd < 0) {
1555 		ret = fd;
1556 		goto error_free_ib;
1557 	}
1558 
1559 	if (copy_to_user(ival, &fd, sizeof(fd))) {
1560 		/*
1561 		 * "Leak" the fd, as there's not much we can do about this
1562 		 * anyway. 'fd' might have been closed already, as
1563 		 * anon_inode_getfd() called fd_install() on it, which made
1564 		 * it reachable by userland.
1565 		 *
1566 		 * Instead of allowing a malicious user to play tricks with
1567 		 * us, rely on the process exit path to do any necessary
1568 		 * cleanup, as in releasing the file, if still needed.
1569 		 */
1570 		return -EFAULT;
1571 	}
1572 
1573 	return 0;
1574 
1575 error_free_ib:
1576 	kfree(ib);
1577 error_clear_busy_bit:
1578 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1579 error_iio_dev_put:
1580 	iio_device_put(indio_dev);
1581 	return ret;
1582 }
1583 
1584 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1585 				    unsigned int cmd, unsigned long arg)
1586 {
1587 	switch (cmd) {
1588 	case IIO_BUFFER_GET_FD_IOCTL:
1589 		return iio_device_buffer_getfd(indio_dev, arg);
1590 	default:
1591 		return IIO_IOCTL_UNHANDLED;
1592 	}
1593 }
1594 
1595 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1596 					     struct iio_dev *indio_dev,
1597 					     int index)
1598 {
1599 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1600 	struct iio_dev_attr *p;
1601 	const struct iio_dev_attr *id_attr;
1602 	struct attribute **attr;
1603 	int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1604 	const struct iio_chan_spec *channels;
1605 
1606 	buffer_attrcount = 0;
1607 	if (buffer->attrs) {
1608 		while (buffer->attrs[buffer_attrcount])
1609 			buffer_attrcount++;
1610 	}
1611 	buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1612 
1613 	scan_el_attrcount = 0;
1614 	INIT_LIST_HEAD(&buffer->buffer_attr_list);
1615 	channels = indio_dev->channels;
1616 	if (channels) {
1617 		/* new magic */
1618 		for (i = 0; i < indio_dev->num_channels; i++) {
1619 			if (channels[i].scan_index < 0)
1620 				continue;
1621 
1622 			/* Verify that sample bits fit into storage */
1623 			if (channels[i].scan_type.storagebits <
1624 			    channels[i].scan_type.realbits +
1625 			    channels[i].scan_type.shift) {
1626 				dev_err(&indio_dev->dev,
1627 					"Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
1628 					i, channels[i].scan_type.storagebits,
1629 					channels[i].scan_type.realbits,
1630 					channels[i].scan_type.shift);
1631 				ret = -EINVAL;
1632 				goto error_cleanup_dynamic;
1633 			}
1634 
1635 			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1636 							   &channels[i]);
1637 			if (ret < 0)
1638 				goto error_cleanup_dynamic;
1639 			scan_el_attrcount += ret;
1640 			if (channels[i].type == IIO_TIMESTAMP)
1641 				iio_dev_opaque->scan_index_timestamp =
1642 					channels[i].scan_index;
1643 		}
1644 		if (indio_dev->masklength && !buffer->scan_mask) {
1645 			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1646 							  GFP_KERNEL);
1647 			if (!buffer->scan_mask) {
1648 				ret = -ENOMEM;
1649 				goto error_cleanup_dynamic;
1650 			}
1651 		}
1652 	}
1653 
1654 	attrn = buffer_attrcount + scan_el_attrcount;
1655 	attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL);
1656 	if (!attr) {
1657 		ret = -ENOMEM;
1658 		goto error_free_scan_mask;
1659 	}
1660 
1661 	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1662 	if (!buffer->access->set_length)
1663 		attr[0] = &dev_attr_length_ro.attr;
1664 
1665 	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1666 		attr[2] = &dev_attr_watermark_ro.attr;
1667 
1668 	if (buffer->attrs)
1669 		for (i = 0, id_attr = buffer->attrs[i];
1670 		     (id_attr = buffer->attrs[i]); i++)
1671 			attr[ARRAY_SIZE(iio_buffer_attrs) + i] =
1672 				(struct attribute *)&id_attr->dev_attr.attr;
1673 
1674 	buffer->buffer_group.attrs = attr;
1675 
1676 	for (i = 0; i < buffer_attrcount; i++) {
1677 		struct attribute *wrapped;
1678 
1679 		wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1680 		if (!wrapped) {
1681 			ret = -ENOMEM;
1682 			goto error_free_buffer_attrs;
1683 		}
1684 		attr[i] = wrapped;
1685 	}
1686 
1687 	attrn = 0;
1688 	list_for_each_entry(p, &buffer->buffer_attr_list, l)
1689 		attr[attrn++] = &p->dev_attr.attr;
1690 
1691 	buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1692 	if (!buffer->buffer_group.name) {
1693 		ret = -ENOMEM;
1694 		goto error_free_buffer_attrs;
1695 	}
1696 
1697 	ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1698 	if (ret)
1699 		goto error_free_buffer_attr_group_name;
1700 
1701 	/* we only need to register the legacy groups for the first buffer */
1702 	if (index > 0)
1703 		return 0;
1704 
1705 	ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1706 						      buffer_attrcount,
1707 						      scan_el_attrcount);
1708 	if (ret)
1709 		goto error_free_buffer_attr_group_name;
1710 
1711 	return 0;
1712 
1713 error_free_buffer_attr_group_name:
1714 	kfree(buffer->buffer_group.name);
1715 error_free_buffer_attrs:
1716 	kfree(buffer->buffer_group.attrs);
1717 error_free_scan_mask:
1718 	bitmap_free(buffer->scan_mask);
1719 error_cleanup_dynamic:
1720 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1721 
1722 	return ret;
1723 }
1724 
1725 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
1726 					     struct iio_dev *indio_dev,
1727 					     int index)
1728 {
1729 	if (index == 0)
1730 		iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1731 	bitmap_free(buffer->scan_mask);
1732 	kfree(buffer->buffer_group.name);
1733 	kfree(buffer->buffer_group.attrs);
1734 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1735 }
1736 
1737 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1738 {
1739 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1740 	const struct iio_chan_spec *channels;
1741 	struct iio_buffer *buffer;
1742 	int ret, i, idx;
1743 	size_t sz;
1744 
1745 	channels = indio_dev->channels;
1746 	if (channels) {
1747 		int ml = 0;
1748 
1749 		for (i = 0; i < indio_dev->num_channels; i++)
1750 			ml = max(ml, channels[i].scan_index + 1);
1751 		indio_dev->masklength = ml;
1752 	}
1753 
1754 	if (!iio_dev_opaque->attached_buffers_cnt)
1755 		return 0;
1756 
1757 	for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
1758 		buffer = iio_dev_opaque->attached_buffers[idx];
1759 		ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
1760 		if (ret)
1761 			goto error_unwind_sysfs_and_mask;
1762 	}
1763 
1764 	sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
1765 	iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1766 	if (!iio_dev_opaque->buffer_ioctl_handler) {
1767 		ret = -ENOMEM;
1768 		goto error_unwind_sysfs_and_mask;
1769 	}
1770 
1771 	iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1772 	iio_device_ioctl_handler_register(indio_dev,
1773 					  iio_dev_opaque->buffer_ioctl_handler);
1774 
1775 	return 0;
1776 
1777 error_unwind_sysfs_and_mask:
1778 	while (idx--) {
1779 		buffer = iio_dev_opaque->attached_buffers[idx];
1780 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
1781 	}
1782 	return ret;
1783 }
1784 
1785 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1786 {
1787 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1788 	struct iio_buffer *buffer;
1789 	int i;
1790 
1791 	if (!iio_dev_opaque->attached_buffers_cnt)
1792 		return;
1793 
1794 	iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1795 	kfree(iio_dev_opaque->buffer_ioctl_handler);
1796 
1797 	for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1798 		buffer = iio_dev_opaque->attached_buffers[i];
1799 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
1800 	}
1801 }
1802 
1803 /**
1804  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1805  * @indio_dev: the iio device
1806  * @mask: scan mask to be checked
1807  *
1808  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1809  * can be used for devices where only one channel can be active for sampling at
1810  * a time.
1811  */
1812 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1813 				   const unsigned long *mask)
1814 {
1815 	return bitmap_weight(mask, indio_dev->masklength) == 1;
1816 }
1817 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1818 
1819 static const void *iio_demux(struct iio_buffer *buffer,
1820 			     const void *datain)
1821 {
1822 	struct iio_demux_table *t;
1823 
1824 	if (list_empty(&buffer->demux_list))
1825 		return datain;
1826 	list_for_each_entry(t, &buffer->demux_list, l)
1827 		memcpy(buffer->demux_bounce + t->to,
1828 		       datain + t->from, t->length);
1829 
1830 	return buffer->demux_bounce;
1831 }
1832 
1833 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1834 {
1835 	const void *dataout = iio_demux(buffer, data);
1836 	int ret;
1837 
1838 	ret = buffer->access->store_to(buffer, dataout);
1839 	if (ret)
1840 		return ret;
1841 
1842 	/*
1843 	 * We can't just test for watermark to decide if we wake the poll queue
1844 	 * because read may request less samples than the watermark.
1845 	 */
1846 	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1847 	return 0;
1848 }
1849 
1850 /**
1851  * iio_push_to_buffers() - push to a registered buffer.
1852  * @indio_dev:		iio_dev structure for device.
1853  * @data:		Full scan.
1854  */
1855 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1856 {
1857 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1858 	int ret;
1859 	struct iio_buffer *buf;
1860 
1861 	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1862 		ret = iio_push_to_buffer(buf, data);
1863 		if (ret < 0)
1864 			return ret;
1865 	}
1866 
1867 	return 0;
1868 }
1869 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1870 
1871 /**
1872  * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
1873  *    no alignment or space requirements.
1874  * @indio_dev:		iio_dev structure for device.
1875  * @data:		channel data excluding the timestamp.
1876  * @data_sz:		size of data.
1877  * @timestamp:		timestamp for the sample data.
1878  *
1879  * This special variant of iio_push_to_buffers_with_timestamp() does
1880  * not require space for the timestamp, or 8 byte alignment of data.
1881  * It does however require an allocation on first call and additional
1882  * copies on all calls, so should be avoided if possible.
1883  */
1884 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
1885 					  const void *data,
1886 					  size_t data_sz,
1887 					  int64_t timestamp)
1888 {
1889 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1890 
1891 	/*
1892 	 * Conservative estimate - we can always safely copy the minimum
1893 	 * of either the data provided or the length of the destination buffer.
1894 	 * This relaxed limit allows the calling drivers to be lax about
1895 	 * tracking the size of the data they are pushing, at the cost of
1896 	 * unnecessary copying of padding.
1897 	 */
1898 	data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
1899 	if (iio_dev_opaque->bounce_buffer_size !=  indio_dev->scan_bytes) {
1900 		void *bb;
1901 
1902 		bb = devm_krealloc(&indio_dev->dev,
1903 				   iio_dev_opaque->bounce_buffer,
1904 				   indio_dev->scan_bytes, GFP_KERNEL);
1905 		if (!bb)
1906 			return -ENOMEM;
1907 		iio_dev_opaque->bounce_buffer = bb;
1908 		iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
1909 	}
1910 	memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
1911 	return iio_push_to_buffers_with_timestamp(indio_dev,
1912 						  iio_dev_opaque->bounce_buffer,
1913 						  timestamp);
1914 }
1915 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
1916 
1917 /**
1918  * iio_buffer_release() - Free a buffer's resources
1919  * @ref: Pointer to the kref embedded in the iio_buffer struct
1920  *
1921  * This function is called when the last reference to the buffer has been
1922  * dropped. It will typically free all resources allocated by the buffer. Do not
1923  * call this function manually, always use iio_buffer_put() when done using a
1924  * buffer.
1925  */
1926 static void iio_buffer_release(struct kref *ref)
1927 {
1928 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1929 
1930 	buffer->access->release(buffer);
1931 }
1932 
1933 /**
1934  * iio_buffer_get() - Grab a reference to the buffer
1935  * @buffer: The buffer to grab a reference for, may be NULL
1936  *
1937  * Returns the pointer to the buffer that was passed into the function.
1938  */
1939 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1940 {
1941 	if (buffer)
1942 		kref_get(&buffer->ref);
1943 
1944 	return buffer;
1945 }
1946 EXPORT_SYMBOL_GPL(iio_buffer_get);
1947 
1948 /**
1949  * iio_buffer_put() - Release the reference to the buffer
1950  * @buffer: The buffer to release the reference for, may be NULL
1951  */
1952 void iio_buffer_put(struct iio_buffer *buffer)
1953 {
1954 	if (buffer)
1955 		kref_put(&buffer->ref, iio_buffer_release);
1956 }
1957 EXPORT_SYMBOL_GPL(iio_buffer_put);
1958 
1959 /**
1960  * iio_device_attach_buffer - Attach a buffer to a IIO device
1961  * @indio_dev: The device the buffer should be attached to
1962  * @buffer: The buffer to attach to the device
1963  *
1964  * Return 0 if successful, negative if error.
1965  *
1966  * This function attaches a buffer to a IIO device. The buffer stays attached to
1967  * the device until the device is freed. For legacy reasons, the first attached
1968  * buffer will also be assigned to 'indio_dev->buffer'.
1969  * The array allocated here, will be free'd via the iio_device_detach_buffers()
1970  * call which is handled by the iio_device_free().
1971  */
1972 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1973 			     struct iio_buffer *buffer)
1974 {
1975 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1976 	struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1977 	unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1978 
1979 	cnt++;
1980 
1981 	new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1982 	if (!new)
1983 		return -ENOMEM;
1984 	iio_dev_opaque->attached_buffers = new;
1985 
1986 	buffer = iio_buffer_get(buffer);
1987 
1988 	/* first buffer is legacy; attach it to the IIO device directly */
1989 	if (!indio_dev->buffer)
1990 		indio_dev->buffer = buffer;
1991 
1992 	iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1993 	iio_dev_opaque->attached_buffers_cnt = cnt;
1994 
1995 	return 0;
1996 }
1997 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
1998