xref: /linux/drivers/iio/industrialio-buffer.c (revision 64b14a184e83eb62ea0615e31a409956049d40e7)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/anon_inodes.h>
13 #include <linux/kernel.h>
14 #include <linux/export.h>
15 #include <linux/device.h>
16 #include <linux/file.h>
17 #include <linux/fs.h>
18 #include <linux/cdev.h>
19 #include <linux/slab.h>
20 #include <linux/poll.h>
21 #include <linux/sched/signal.h>
22 
23 #include <linux/iio/iio.h>
24 #include <linux/iio/iio-opaque.h>
25 #include "iio_core.h"
26 #include "iio_core_trigger.h"
27 #include <linux/iio/sysfs.h>
28 #include <linux/iio/buffer.h>
29 #include <linux/iio/buffer_impl.h>
30 
31 static const char * const iio_endian_prefix[] = {
32 	[IIO_BE] = "be",
33 	[IIO_LE] = "le",
34 };
35 
36 static bool iio_buffer_is_active(struct iio_buffer *buf)
37 {
38 	return !list_empty(&buf->buffer_list);
39 }
40 
41 static size_t iio_buffer_data_available(struct iio_buffer *buf)
42 {
43 	return buf->access->data_available(buf);
44 }
45 
46 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
47 				   struct iio_buffer *buf, size_t required)
48 {
49 	if (!indio_dev->info->hwfifo_flush_to_buffer)
50 		return -ENODEV;
51 
52 	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
53 }
54 
55 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
56 			     size_t to_wait, int to_flush)
57 {
58 	size_t avail;
59 	int flushed = 0;
60 
61 	/* wakeup if the device was unregistered */
62 	if (!indio_dev->info)
63 		return true;
64 
65 	/* drain the buffer if it was disabled */
66 	if (!iio_buffer_is_active(buf)) {
67 		to_wait = min_t(size_t, to_wait, 1);
68 		to_flush = 0;
69 	}
70 
71 	avail = iio_buffer_data_available(buf);
72 
73 	if (avail >= to_wait) {
74 		/* force a flush for non-blocking reads */
75 		if (!to_wait && avail < to_flush)
76 			iio_buffer_flush_hwfifo(indio_dev, buf,
77 						to_flush - avail);
78 		return true;
79 	}
80 
81 	if (to_flush)
82 		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
83 						  to_wait - avail);
84 	if (flushed <= 0)
85 		return false;
86 
87 	if (avail + flushed >= to_wait)
88 		return true;
89 
90 	return false;
91 }
92 
93 /**
94  * iio_buffer_read() - chrdev read for buffer access
95  * @filp:	File structure pointer for the char device
96  * @buf:	Destination buffer for iio buffer read
97  * @n:		First n bytes to read
98  * @f_ps:	Long offset provided by the user as a seek position
99  *
100  * This function relies on all buffer implementations having an
101  * iio_buffer as their first element.
102  *
103  * Return: negative values corresponding to error codes or ret != 0
104  *	   for ending the reading activity
105  **/
106 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
107 			       size_t n, loff_t *f_ps)
108 {
109 	struct iio_dev_buffer_pair *ib = filp->private_data;
110 	struct iio_buffer *rb = ib->buffer;
111 	struct iio_dev *indio_dev = ib->indio_dev;
112 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
113 	size_t datum_size;
114 	size_t to_wait;
115 	int ret = 0;
116 
117 	if (!indio_dev->info)
118 		return -ENODEV;
119 
120 	if (!rb || !rb->access->read)
121 		return -EINVAL;
122 
123 	if (rb->direction != IIO_BUFFER_DIRECTION_IN)
124 		return -EPERM;
125 
126 	datum_size = rb->bytes_per_datum;
127 
128 	/*
129 	 * If datum_size is 0 there will never be anything to read from the
130 	 * buffer, so signal end of file now.
131 	 */
132 	if (!datum_size)
133 		return 0;
134 
135 	if (filp->f_flags & O_NONBLOCK)
136 		to_wait = 0;
137 	else
138 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
139 
140 	add_wait_queue(&rb->pollq, &wait);
141 	do {
142 		if (!indio_dev->info) {
143 			ret = -ENODEV;
144 			break;
145 		}
146 
147 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
148 			if (signal_pending(current)) {
149 				ret = -ERESTARTSYS;
150 				break;
151 			}
152 
153 			wait_woken(&wait, TASK_INTERRUPTIBLE,
154 				   MAX_SCHEDULE_TIMEOUT);
155 			continue;
156 		}
157 
158 		ret = rb->access->read(rb, n, buf);
159 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
160 			ret = -EAGAIN;
161 	} while (ret == 0);
162 	remove_wait_queue(&rb->pollq, &wait);
163 
164 	return ret;
165 }
166 
167 static size_t iio_buffer_space_available(struct iio_buffer *buf)
168 {
169 	if (buf->access->space_available)
170 		return buf->access->space_available(buf);
171 
172 	return SIZE_MAX;
173 }
174 
175 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
176 				size_t n, loff_t *f_ps)
177 {
178 	struct iio_dev_buffer_pair *ib = filp->private_data;
179 	struct iio_buffer *rb = ib->buffer;
180 	struct iio_dev *indio_dev = ib->indio_dev;
181 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
182 	int ret = 0;
183 	size_t written;
184 
185 	if (!indio_dev->info)
186 		return -ENODEV;
187 
188 	if (!rb || !rb->access->write)
189 		return -EINVAL;
190 
191 	if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
192 		return -EPERM;
193 
194 	written = 0;
195 	add_wait_queue(&rb->pollq, &wait);
196 	do {
197 		if (indio_dev->info == NULL)
198 			return -ENODEV;
199 
200 		if (!iio_buffer_space_available(rb)) {
201 			if (signal_pending(current)) {
202 				ret = -ERESTARTSYS;
203 				break;
204 			}
205 
206 			wait_woken(&wait, TASK_INTERRUPTIBLE,
207 					MAX_SCHEDULE_TIMEOUT);
208 			continue;
209 		}
210 
211 		ret = rb->access->write(rb, n - written, buf + written);
212 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
213 			ret = -EAGAIN;
214 
215 		if (ret > 0) {
216 			written += ret;
217 			if (written != n && !(filp->f_flags & O_NONBLOCK))
218 				continue;
219 		}
220 	} while (ret == 0);
221 	remove_wait_queue(&rb->pollq, &wait);
222 
223 	return ret < 0 ? ret : n;
224 }
225 
226 /**
227  * iio_buffer_poll() - poll the buffer to find out if it has data
228  * @filp:	File structure pointer for device access
229  * @wait:	Poll table structure pointer for which the driver adds
230  *		a wait queue
231  *
232  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
233  *	   or 0 for other cases
234  */
235 static __poll_t iio_buffer_poll(struct file *filp,
236 				struct poll_table_struct *wait)
237 {
238 	struct iio_dev_buffer_pair *ib = filp->private_data;
239 	struct iio_buffer *rb = ib->buffer;
240 	struct iio_dev *indio_dev = ib->indio_dev;
241 
242 	if (!indio_dev->info || rb == NULL)
243 		return 0;
244 
245 	poll_wait(filp, &rb->pollq, wait);
246 
247 	switch (rb->direction) {
248 	case IIO_BUFFER_DIRECTION_IN:
249 		if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
250 			return EPOLLIN | EPOLLRDNORM;
251 		break;
252 	case IIO_BUFFER_DIRECTION_OUT:
253 		if (iio_buffer_space_available(rb))
254 			return EPOLLOUT | EPOLLWRNORM;
255 		break;
256 	}
257 
258 	return 0;
259 }
260 
261 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
262 				size_t n, loff_t *f_ps)
263 {
264 	struct iio_dev_buffer_pair *ib = filp->private_data;
265 	struct iio_buffer *rb = ib->buffer;
266 
267 	/* check if buffer was opened through new API */
268 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
269 		return -EBUSY;
270 
271 	return iio_buffer_read(filp, buf, n, f_ps);
272 }
273 
274 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
275 				 size_t n, loff_t *f_ps)
276 {
277 	struct iio_dev_buffer_pair *ib = filp->private_data;
278 	struct iio_buffer *rb = ib->buffer;
279 
280 	/* check if buffer was opened through new API */
281 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
282 		return -EBUSY;
283 
284 	return iio_buffer_write(filp, buf, n, f_ps);
285 }
286 
287 __poll_t iio_buffer_poll_wrapper(struct file *filp,
288 				 struct poll_table_struct *wait)
289 {
290 	struct iio_dev_buffer_pair *ib = filp->private_data;
291 	struct iio_buffer *rb = ib->buffer;
292 
293 	/* check if buffer was opened through new API */
294 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
295 		return 0;
296 
297 	return iio_buffer_poll(filp, wait);
298 }
299 
300 /**
301  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
302  * @indio_dev: The IIO device
303  *
304  * Wakes up the event waitqueue used for poll(). Should usually
305  * be called when the device is unregistered.
306  */
307 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
308 {
309 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
310 	struct iio_buffer *buffer;
311 	unsigned int i;
312 
313 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
314 		buffer = iio_dev_opaque->attached_buffers[i];
315 		wake_up(&buffer->pollq);
316 	}
317 }
318 
319 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
320 {
321 	if (!buffer || !buffer->access || !buffer->access->remove_from)
322 		return -EINVAL;
323 
324 	return buffer->access->remove_from(buffer, data);
325 }
326 EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
327 
328 void iio_buffer_init(struct iio_buffer *buffer)
329 {
330 	INIT_LIST_HEAD(&buffer->demux_list);
331 	INIT_LIST_HEAD(&buffer->buffer_list);
332 	init_waitqueue_head(&buffer->pollq);
333 	kref_init(&buffer->ref);
334 	if (!buffer->watermark)
335 		buffer->watermark = 1;
336 }
337 EXPORT_SYMBOL(iio_buffer_init);
338 
339 void iio_device_detach_buffers(struct iio_dev *indio_dev)
340 {
341 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
342 	struct iio_buffer *buffer;
343 	unsigned int i;
344 
345 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
346 		buffer = iio_dev_opaque->attached_buffers[i];
347 		iio_buffer_put(buffer);
348 	}
349 
350 	kfree(iio_dev_opaque->attached_buffers);
351 }
352 
353 static ssize_t iio_show_scan_index(struct device *dev,
354 				   struct device_attribute *attr,
355 				   char *buf)
356 {
357 	return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
358 }
359 
360 static ssize_t iio_show_fixed_type(struct device *dev,
361 				   struct device_attribute *attr,
362 				   char *buf)
363 {
364 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
365 	u8 type = this_attr->c->scan_type.endianness;
366 
367 	if (type == IIO_CPU) {
368 #ifdef __LITTLE_ENDIAN
369 		type = IIO_LE;
370 #else
371 		type = IIO_BE;
372 #endif
373 	}
374 	if (this_attr->c->scan_type.repeat > 1)
375 		return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
376 		       iio_endian_prefix[type],
377 		       this_attr->c->scan_type.sign,
378 		       this_attr->c->scan_type.realbits,
379 		       this_attr->c->scan_type.storagebits,
380 		       this_attr->c->scan_type.repeat,
381 		       this_attr->c->scan_type.shift);
382 	else
383 		return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
384 		       iio_endian_prefix[type],
385 		       this_attr->c->scan_type.sign,
386 		       this_attr->c->scan_type.realbits,
387 		       this_attr->c->scan_type.storagebits,
388 		       this_attr->c->scan_type.shift);
389 }
390 
391 static ssize_t iio_scan_el_show(struct device *dev,
392 				struct device_attribute *attr,
393 				char *buf)
394 {
395 	int ret;
396 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
397 
398 	/* Ensure ret is 0 or 1. */
399 	ret = !!test_bit(to_iio_dev_attr(attr)->address,
400 		       buffer->scan_mask);
401 
402 	return sysfs_emit(buf, "%d\n", ret);
403 }
404 
405 /* Note NULL used as error indicator as it doesn't make sense. */
406 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
407 					  unsigned int masklength,
408 					  const unsigned long *mask,
409 					  bool strict)
410 {
411 	if (bitmap_empty(mask, masklength))
412 		return NULL;
413 	while (*av_masks) {
414 		if (strict) {
415 			if (bitmap_equal(mask, av_masks, masklength))
416 				return av_masks;
417 		} else {
418 			if (bitmap_subset(mask, av_masks, masklength))
419 				return av_masks;
420 		}
421 		av_masks += BITS_TO_LONGS(masklength);
422 	}
423 	return NULL;
424 }
425 
426 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
427 	const unsigned long *mask)
428 {
429 	if (!indio_dev->setup_ops->validate_scan_mask)
430 		return true;
431 
432 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
433 }
434 
435 /**
436  * iio_scan_mask_set() - set particular bit in the scan mask
437  * @indio_dev: the iio device
438  * @buffer: the buffer whose scan mask we are interested in
439  * @bit: the bit to be set.
440  *
441  * Note that at this point we have no way of knowing what other
442  * buffers might request, hence this code only verifies that the
443  * individual buffers request is plausible.
444  */
445 static int iio_scan_mask_set(struct iio_dev *indio_dev,
446 		      struct iio_buffer *buffer, int bit)
447 {
448 	const unsigned long *mask;
449 	unsigned long *trialmask;
450 
451 	if (!indio_dev->masklength) {
452 		WARN(1, "Trying to set scanmask prior to registering buffer\n");
453 		return -EINVAL;
454 	}
455 
456 	trialmask = bitmap_alloc(indio_dev->masklength, GFP_KERNEL);
457 	if (!trialmask)
458 		return -ENOMEM;
459 	bitmap_copy(trialmask, buffer->scan_mask, indio_dev->masklength);
460 	set_bit(bit, trialmask);
461 
462 	if (!iio_validate_scan_mask(indio_dev, trialmask))
463 		goto err_invalid_mask;
464 
465 	if (indio_dev->available_scan_masks) {
466 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
467 					   indio_dev->masklength,
468 					   trialmask, false);
469 		if (!mask)
470 			goto err_invalid_mask;
471 	}
472 	bitmap_copy(buffer->scan_mask, trialmask, indio_dev->masklength);
473 
474 	bitmap_free(trialmask);
475 
476 	return 0;
477 
478 err_invalid_mask:
479 	bitmap_free(trialmask);
480 	return -EINVAL;
481 }
482 
483 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
484 {
485 	clear_bit(bit, buffer->scan_mask);
486 	return 0;
487 }
488 
489 static int iio_scan_mask_query(struct iio_dev *indio_dev,
490 			       struct iio_buffer *buffer, int bit)
491 {
492 	if (bit > indio_dev->masklength)
493 		return -EINVAL;
494 
495 	if (!buffer->scan_mask)
496 		return 0;
497 
498 	/* Ensure return value is 0 or 1. */
499 	return !!test_bit(bit, buffer->scan_mask);
500 };
501 
502 static ssize_t iio_scan_el_store(struct device *dev,
503 				 struct device_attribute *attr,
504 				 const char *buf,
505 				 size_t len)
506 {
507 	int ret;
508 	bool state;
509 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
510 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
511 	struct iio_buffer *buffer = this_attr->buffer;
512 
513 	ret = strtobool(buf, &state);
514 	if (ret < 0)
515 		return ret;
516 	mutex_lock(&indio_dev->mlock);
517 	if (iio_buffer_is_active(buffer)) {
518 		ret = -EBUSY;
519 		goto error_ret;
520 	}
521 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
522 	if (ret < 0)
523 		goto error_ret;
524 	if (!state && ret) {
525 		ret = iio_scan_mask_clear(buffer, this_attr->address);
526 		if (ret)
527 			goto error_ret;
528 	} else if (state && !ret) {
529 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
530 		if (ret)
531 			goto error_ret;
532 	}
533 
534 error_ret:
535 	mutex_unlock(&indio_dev->mlock);
536 
537 	return ret < 0 ? ret : len;
538 
539 }
540 
541 static ssize_t iio_scan_el_ts_show(struct device *dev,
542 				   struct device_attribute *attr,
543 				   char *buf)
544 {
545 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
546 
547 	return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
548 }
549 
550 static ssize_t iio_scan_el_ts_store(struct device *dev,
551 				    struct device_attribute *attr,
552 				    const char *buf,
553 				    size_t len)
554 {
555 	int ret;
556 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
557 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
558 	bool state;
559 
560 	ret = strtobool(buf, &state);
561 	if (ret < 0)
562 		return ret;
563 
564 	mutex_lock(&indio_dev->mlock);
565 	if (iio_buffer_is_active(buffer)) {
566 		ret = -EBUSY;
567 		goto error_ret;
568 	}
569 	buffer->scan_timestamp = state;
570 error_ret:
571 	mutex_unlock(&indio_dev->mlock);
572 
573 	return ret ? ret : len;
574 }
575 
576 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
577 					struct iio_buffer *buffer,
578 					const struct iio_chan_spec *chan)
579 {
580 	int ret, attrcount = 0;
581 
582 	ret = __iio_add_chan_devattr("index",
583 				     chan,
584 				     &iio_show_scan_index,
585 				     NULL,
586 				     0,
587 				     IIO_SEPARATE,
588 				     &indio_dev->dev,
589 				     buffer,
590 				     &buffer->buffer_attr_list);
591 	if (ret)
592 		return ret;
593 	attrcount++;
594 	ret = __iio_add_chan_devattr("type",
595 				     chan,
596 				     &iio_show_fixed_type,
597 				     NULL,
598 				     0,
599 				     0,
600 				     &indio_dev->dev,
601 				     buffer,
602 				     &buffer->buffer_attr_list);
603 	if (ret)
604 		return ret;
605 	attrcount++;
606 	if (chan->type != IIO_TIMESTAMP)
607 		ret = __iio_add_chan_devattr("en",
608 					     chan,
609 					     &iio_scan_el_show,
610 					     &iio_scan_el_store,
611 					     chan->scan_index,
612 					     0,
613 					     &indio_dev->dev,
614 					     buffer,
615 					     &buffer->buffer_attr_list);
616 	else
617 		ret = __iio_add_chan_devattr("en",
618 					     chan,
619 					     &iio_scan_el_ts_show,
620 					     &iio_scan_el_ts_store,
621 					     chan->scan_index,
622 					     0,
623 					     &indio_dev->dev,
624 					     buffer,
625 					     &buffer->buffer_attr_list);
626 	if (ret)
627 		return ret;
628 	attrcount++;
629 	ret = attrcount;
630 	return ret;
631 }
632 
633 static ssize_t iio_buffer_read_length(struct device *dev,
634 				      struct device_attribute *attr,
635 				      char *buf)
636 {
637 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
638 
639 	return sysfs_emit(buf, "%d\n", buffer->length);
640 }
641 
642 static ssize_t iio_buffer_write_length(struct device *dev,
643 				       struct device_attribute *attr,
644 				       const char *buf, size_t len)
645 {
646 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
647 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
648 	unsigned int val;
649 	int ret;
650 
651 	ret = kstrtouint(buf, 10, &val);
652 	if (ret)
653 		return ret;
654 
655 	if (val == buffer->length)
656 		return len;
657 
658 	mutex_lock(&indio_dev->mlock);
659 	if (iio_buffer_is_active(buffer)) {
660 		ret = -EBUSY;
661 	} else {
662 		buffer->access->set_length(buffer, val);
663 		ret = 0;
664 	}
665 	if (ret)
666 		goto out;
667 	if (buffer->length && buffer->length < buffer->watermark)
668 		buffer->watermark = buffer->length;
669 out:
670 	mutex_unlock(&indio_dev->mlock);
671 
672 	return ret ? ret : len;
673 }
674 
675 static ssize_t iio_buffer_show_enable(struct device *dev,
676 				      struct device_attribute *attr,
677 				      char *buf)
678 {
679 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
680 
681 	return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
682 }
683 
684 static unsigned int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
685 					     unsigned int scan_index)
686 {
687 	const struct iio_chan_spec *ch;
688 	unsigned int bytes;
689 
690 	ch = iio_find_channel_from_si(indio_dev, scan_index);
691 	bytes = ch->scan_type.storagebits / 8;
692 	if (ch->scan_type.repeat > 1)
693 		bytes *= ch->scan_type.repeat;
694 	return bytes;
695 }
696 
697 static unsigned int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
698 {
699 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
700 
701 	return iio_storage_bytes_for_si(indio_dev,
702 					iio_dev_opaque->scan_index_timestamp);
703 }
704 
705 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
706 				const unsigned long *mask, bool timestamp)
707 {
708 	unsigned bytes = 0;
709 	int length, i, largest = 0;
710 
711 	/* How much space will the demuxed element take? */
712 	for_each_set_bit(i, mask,
713 			 indio_dev->masklength) {
714 		length = iio_storage_bytes_for_si(indio_dev, i);
715 		bytes = ALIGN(bytes, length);
716 		bytes += length;
717 		largest = max(largest, length);
718 	}
719 
720 	if (timestamp) {
721 		length = iio_storage_bytes_for_timestamp(indio_dev);
722 		bytes = ALIGN(bytes, length);
723 		bytes += length;
724 		largest = max(largest, length);
725 	}
726 
727 	bytes = ALIGN(bytes, largest);
728 	return bytes;
729 }
730 
731 static void iio_buffer_activate(struct iio_dev *indio_dev,
732 	struct iio_buffer *buffer)
733 {
734 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
735 
736 	iio_buffer_get(buffer);
737 	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
738 }
739 
740 static void iio_buffer_deactivate(struct iio_buffer *buffer)
741 {
742 	list_del_init(&buffer->buffer_list);
743 	wake_up_interruptible(&buffer->pollq);
744 	iio_buffer_put(buffer);
745 }
746 
747 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
748 {
749 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
750 	struct iio_buffer *buffer, *_buffer;
751 
752 	list_for_each_entry_safe(buffer, _buffer,
753 			&iio_dev_opaque->buffer_list, buffer_list)
754 		iio_buffer_deactivate(buffer);
755 }
756 
757 static int iio_buffer_enable(struct iio_buffer *buffer,
758 	struct iio_dev *indio_dev)
759 {
760 	if (!buffer->access->enable)
761 		return 0;
762 	return buffer->access->enable(buffer, indio_dev);
763 }
764 
765 static int iio_buffer_disable(struct iio_buffer *buffer,
766 	struct iio_dev *indio_dev)
767 {
768 	if (!buffer->access->disable)
769 		return 0;
770 	return buffer->access->disable(buffer, indio_dev);
771 }
772 
773 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
774 	struct iio_buffer *buffer)
775 {
776 	unsigned int bytes;
777 
778 	if (!buffer->access->set_bytes_per_datum)
779 		return;
780 
781 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
782 		buffer->scan_timestamp);
783 
784 	buffer->access->set_bytes_per_datum(buffer, bytes);
785 }
786 
787 static int iio_buffer_request_update(struct iio_dev *indio_dev,
788 	struct iio_buffer *buffer)
789 {
790 	int ret;
791 
792 	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
793 	if (buffer->access->request_update) {
794 		ret = buffer->access->request_update(buffer);
795 		if (ret) {
796 			dev_dbg(&indio_dev->dev,
797 			       "Buffer not started: buffer parameter update failed (%d)\n",
798 				ret);
799 			return ret;
800 		}
801 	}
802 
803 	return 0;
804 }
805 
806 static void iio_free_scan_mask(struct iio_dev *indio_dev,
807 	const unsigned long *mask)
808 {
809 	/* If the mask is dynamically allocated free it, otherwise do nothing */
810 	if (!indio_dev->available_scan_masks)
811 		bitmap_free(mask);
812 }
813 
814 struct iio_device_config {
815 	unsigned int mode;
816 	unsigned int watermark;
817 	const unsigned long *scan_mask;
818 	unsigned int scan_bytes;
819 	bool scan_timestamp;
820 };
821 
822 static int iio_verify_update(struct iio_dev *indio_dev,
823 	struct iio_buffer *insert_buffer, struct iio_buffer *remove_buffer,
824 	struct iio_device_config *config)
825 {
826 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
827 	unsigned long *compound_mask;
828 	const unsigned long *scan_mask;
829 	bool strict_scanmask = false;
830 	struct iio_buffer *buffer;
831 	bool scan_timestamp;
832 	unsigned int modes;
833 
834 	if (insert_buffer &&
835 	    bitmap_empty(insert_buffer->scan_mask, indio_dev->masklength)) {
836 		dev_dbg(&indio_dev->dev,
837 			"At least one scan element must be enabled first\n");
838 		return -EINVAL;
839 	}
840 
841 	memset(config, 0, sizeof(*config));
842 	config->watermark = ~0;
843 
844 	/*
845 	 * If there is just one buffer and we are removing it there is nothing
846 	 * to verify.
847 	 */
848 	if (remove_buffer && !insert_buffer &&
849 		list_is_singular(&iio_dev_opaque->buffer_list))
850 			return 0;
851 
852 	modes = indio_dev->modes;
853 
854 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
855 		if (buffer == remove_buffer)
856 			continue;
857 		modes &= buffer->access->modes;
858 		config->watermark = min(config->watermark, buffer->watermark);
859 	}
860 
861 	if (insert_buffer) {
862 		modes &= insert_buffer->access->modes;
863 		config->watermark = min(config->watermark,
864 			insert_buffer->watermark);
865 	}
866 
867 	/* Definitely possible for devices to support both of these. */
868 	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
869 		config->mode = INDIO_BUFFER_TRIGGERED;
870 	} else if (modes & INDIO_BUFFER_HARDWARE) {
871 		/*
872 		 * Keep things simple for now and only allow a single buffer to
873 		 * be connected in hardware mode.
874 		 */
875 		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
876 			return -EINVAL;
877 		config->mode = INDIO_BUFFER_HARDWARE;
878 		strict_scanmask = true;
879 	} else if (modes & INDIO_BUFFER_SOFTWARE) {
880 		config->mode = INDIO_BUFFER_SOFTWARE;
881 	} else {
882 		/* Can only occur on first buffer */
883 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
884 			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
885 		return -EINVAL;
886 	}
887 
888 	/* What scan mask do we actually have? */
889 	compound_mask = bitmap_zalloc(indio_dev->masklength, GFP_KERNEL);
890 	if (compound_mask == NULL)
891 		return -ENOMEM;
892 
893 	scan_timestamp = false;
894 
895 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
896 		if (buffer == remove_buffer)
897 			continue;
898 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
899 			  indio_dev->masklength);
900 		scan_timestamp |= buffer->scan_timestamp;
901 	}
902 
903 	if (insert_buffer) {
904 		bitmap_or(compound_mask, compound_mask,
905 			  insert_buffer->scan_mask, indio_dev->masklength);
906 		scan_timestamp |= insert_buffer->scan_timestamp;
907 	}
908 
909 	if (indio_dev->available_scan_masks) {
910 		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
911 				    indio_dev->masklength,
912 				    compound_mask,
913 				    strict_scanmask);
914 		bitmap_free(compound_mask);
915 		if (scan_mask == NULL)
916 			return -EINVAL;
917 	} else {
918 	    scan_mask = compound_mask;
919 	}
920 
921 	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
922 				    scan_mask, scan_timestamp);
923 	config->scan_mask = scan_mask;
924 	config->scan_timestamp = scan_timestamp;
925 
926 	return 0;
927 }
928 
929 /**
930  * struct iio_demux_table - table describing demux memcpy ops
931  * @from:	index to copy from
932  * @to:		index to copy to
933  * @length:	how many bytes to copy
934  * @l:		list head used for management
935  */
936 struct iio_demux_table {
937 	unsigned from;
938 	unsigned to;
939 	unsigned length;
940 	struct list_head l;
941 };
942 
943 static void iio_buffer_demux_free(struct iio_buffer *buffer)
944 {
945 	struct iio_demux_table *p, *q;
946 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
947 		list_del(&p->l);
948 		kfree(p);
949 	}
950 }
951 
952 static int iio_buffer_add_demux(struct iio_buffer *buffer,
953 	struct iio_demux_table **p, unsigned int in_loc, unsigned int out_loc,
954 	unsigned int length)
955 {
956 
957 	if (*p && (*p)->from + (*p)->length == in_loc &&
958 		(*p)->to + (*p)->length == out_loc) {
959 		(*p)->length += length;
960 	} else {
961 		*p = kmalloc(sizeof(**p), GFP_KERNEL);
962 		if (*p == NULL)
963 			return -ENOMEM;
964 		(*p)->from = in_loc;
965 		(*p)->to = out_loc;
966 		(*p)->length = length;
967 		list_add_tail(&(*p)->l, &buffer->demux_list);
968 	}
969 
970 	return 0;
971 }
972 
973 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
974 				   struct iio_buffer *buffer)
975 {
976 	int ret, in_ind = -1, out_ind, length;
977 	unsigned in_loc = 0, out_loc = 0;
978 	struct iio_demux_table *p = NULL;
979 
980 	/* Clear out any old demux */
981 	iio_buffer_demux_free(buffer);
982 	kfree(buffer->demux_bounce);
983 	buffer->demux_bounce = NULL;
984 
985 	/* First work out which scan mode we will actually have */
986 	if (bitmap_equal(indio_dev->active_scan_mask,
987 			 buffer->scan_mask,
988 			 indio_dev->masklength))
989 		return 0;
990 
991 	/* Now we have the two masks, work from least sig and build up sizes */
992 	for_each_set_bit(out_ind,
993 			 buffer->scan_mask,
994 			 indio_dev->masklength) {
995 		in_ind = find_next_bit(indio_dev->active_scan_mask,
996 				       indio_dev->masklength,
997 				       in_ind + 1);
998 		while (in_ind != out_ind) {
999 			length = iio_storage_bytes_for_si(indio_dev, in_ind);
1000 			/* Make sure we are aligned */
1001 			in_loc = roundup(in_loc, length) + length;
1002 			in_ind = find_next_bit(indio_dev->active_scan_mask,
1003 					       indio_dev->masklength,
1004 					       in_ind + 1);
1005 		}
1006 		length = iio_storage_bytes_for_si(indio_dev, in_ind);
1007 		out_loc = roundup(out_loc, length);
1008 		in_loc = roundup(in_loc, length);
1009 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1010 		if (ret)
1011 			goto error_clear_mux_table;
1012 		out_loc += length;
1013 		in_loc += length;
1014 	}
1015 	/* Relies on scan_timestamp being last */
1016 	if (buffer->scan_timestamp) {
1017 		length = iio_storage_bytes_for_timestamp(indio_dev);
1018 		out_loc = roundup(out_loc, length);
1019 		in_loc = roundup(in_loc, length);
1020 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1021 		if (ret)
1022 			goto error_clear_mux_table;
1023 		out_loc += length;
1024 	}
1025 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1026 	if (buffer->demux_bounce == NULL) {
1027 		ret = -ENOMEM;
1028 		goto error_clear_mux_table;
1029 	}
1030 	return 0;
1031 
1032 error_clear_mux_table:
1033 	iio_buffer_demux_free(buffer);
1034 
1035 	return ret;
1036 }
1037 
1038 static int iio_update_demux(struct iio_dev *indio_dev)
1039 {
1040 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1041 	struct iio_buffer *buffer;
1042 	int ret;
1043 
1044 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1045 		ret = iio_buffer_update_demux(indio_dev, buffer);
1046 		if (ret < 0)
1047 			goto error_clear_mux_table;
1048 	}
1049 	return 0;
1050 
1051 error_clear_mux_table:
1052 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1053 		iio_buffer_demux_free(buffer);
1054 
1055 	return ret;
1056 }
1057 
1058 static int iio_enable_buffers(struct iio_dev *indio_dev,
1059 	struct iio_device_config *config)
1060 {
1061 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1062 	struct iio_buffer *buffer;
1063 	int ret;
1064 
1065 	indio_dev->active_scan_mask = config->scan_mask;
1066 	indio_dev->scan_timestamp = config->scan_timestamp;
1067 	indio_dev->scan_bytes = config->scan_bytes;
1068 	indio_dev->currentmode = config->mode;
1069 
1070 	iio_update_demux(indio_dev);
1071 
1072 	/* Wind up again */
1073 	if (indio_dev->setup_ops->preenable) {
1074 		ret = indio_dev->setup_ops->preenable(indio_dev);
1075 		if (ret) {
1076 			dev_dbg(&indio_dev->dev,
1077 			       "Buffer not started: buffer preenable failed (%d)\n", ret);
1078 			goto err_undo_config;
1079 		}
1080 	}
1081 
1082 	if (indio_dev->info->update_scan_mode) {
1083 		ret = indio_dev->info
1084 			->update_scan_mode(indio_dev,
1085 					   indio_dev->active_scan_mask);
1086 		if (ret < 0) {
1087 			dev_dbg(&indio_dev->dev,
1088 				"Buffer not started: update scan mode failed (%d)\n",
1089 				ret);
1090 			goto err_run_postdisable;
1091 		}
1092 	}
1093 
1094 	if (indio_dev->info->hwfifo_set_watermark)
1095 		indio_dev->info->hwfifo_set_watermark(indio_dev,
1096 			config->watermark);
1097 
1098 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1099 		ret = iio_buffer_enable(buffer, indio_dev);
1100 		if (ret)
1101 			goto err_disable_buffers;
1102 	}
1103 
1104 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1105 		ret = iio_trigger_attach_poll_func(indio_dev->trig,
1106 						   indio_dev->pollfunc);
1107 		if (ret)
1108 			goto err_disable_buffers;
1109 	}
1110 
1111 	if (indio_dev->setup_ops->postenable) {
1112 		ret = indio_dev->setup_ops->postenable(indio_dev);
1113 		if (ret) {
1114 			dev_dbg(&indio_dev->dev,
1115 			       "Buffer not started: postenable failed (%d)\n", ret);
1116 			goto err_detach_pollfunc;
1117 		}
1118 	}
1119 
1120 	return 0;
1121 
1122 err_detach_pollfunc:
1123 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1124 		iio_trigger_detach_poll_func(indio_dev->trig,
1125 					     indio_dev->pollfunc);
1126 	}
1127 err_disable_buffers:
1128 	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1129 					     buffer_list)
1130 		iio_buffer_disable(buffer, indio_dev);
1131 err_run_postdisable:
1132 	if (indio_dev->setup_ops->postdisable)
1133 		indio_dev->setup_ops->postdisable(indio_dev);
1134 err_undo_config:
1135 	indio_dev->currentmode = INDIO_DIRECT_MODE;
1136 	indio_dev->active_scan_mask = NULL;
1137 
1138 	return ret;
1139 }
1140 
1141 static int iio_disable_buffers(struct iio_dev *indio_dev)
1142 {
1143 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1144 	struct iio_buffer *buffer;
1145 	int ret = 0;
1146 	int ret2;
1147 
1148 	/* Wind down existing buffers - iff there are any */
1149 	if (list_empty(&iio_dev_opaque->buffer_list))
1150 		return 0;
1151 
1152 	/*
1153 	 * If things go wrong at some step in disable we still need to continue
1154 	 * to perform the other steps, otherwise we leave the device in a
1155 	 * inconsistent state. We return the error code for the first error we
1156 	 * encountered.
1157 	 */
1158 
1159 	if (indio_dev->setup_ops->predisable) {
1160 		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1161 		if (ret2 && !ret)
1162 			ret = ret2;
1163 	}
1164 
1165 	if (indio_dev->currentmode == INDIO_BUFFER_TRIGGERED) {
1166 		iio_trigger_detach_poll_func(indio_dev->trig,
1167 					     indio_dev->pollfunc);
1168 	}
1169 
1170 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1171 		ret2 = iio_buffer_disable(buffer, indio_dev);
1172 		if (ret2 && !ret)
1173 			ret = ret2;
1174 	}
1175 
1176 	if (indio_dev->setup_ops->postdisable) {
1177 		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1178 		if (ret2 && !ret)
1179 			ret = ret2;
1180 	}
1181 
1182 	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1183 	indio_dev->active_scan_mask = NULL;
1184 	indio_dev->currentmode = INDIO_DIRECT_MODE;
1185 
1186 	return ret;
1187 }
1188 
1189 static int __iio_update_buffers(struct iio_dev *indio_dev,
1190 		       struct iio_buffer *insert_buffer,
1191 		       struct iio_buffer *remove_buffer)
1192 {
1193 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1194 	struct iio_device_config new_config;
1195 	int ret;
1196 
1197 	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1198 		&new_config);
1199 	if (ret)
1200 		return ret;
1201 
1202 	if (insert_buffer) {
1203 		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1204 		if (ret)
1205 			goto err_free_config;
1206 	}
1207 
1208 	ret = iio_disable_buffers(indio_dev);
1209 	if (ret)
1210 		goto err_deactivate_all;
1211 
1212 	if (remove_buffer)
1213 		iio_buffer_deactivate(remove_buffer);
1214 	if (insert_buffer)
1215 		iio_buffer_activate(indio_dev, insert_buffer);
1216 
1217 	/* If no buffers in list, we are done */
1218 	if (list_empty(&iio_dev_opaque->buffer_list))
1219 		return 0;
1220 
1221 	ret = iio_enable_buffers(indio_dev, &new_config);
1222 	if (ret)
1223 		goto err_deactivate_all;
1224 
1225 	return 0;
1226 
1227 err_deactivate_all:
1228 	/*
1229 	 * We've already verified that the config is valid earlier. If things go
1230 	 * wrong in either enable or disable the most likely reason is an IO
1231 	 * error from the device. In this case there is no good recovery
1232 	 * strategy. Just make sure to disable everything and leave the device
1233 	 * in a sane state.  With a bit of luck the device might come back to
1234 	 * life again later and userspace can try again.
1235 	 */
1236 	iio_buffer_deactivate_all(indio_dev);
1237 
1238 err_free_config:
1239 	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1240 	return ret;
1241 }
1242 
1243 int iio_update_buffers(struct iio_dev *indio_dev,
1244 		       struct iio_buffer *insert_buffer,
1245 		       struct iio_buffer *remove_buffer)
1246 {
1247 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1248 	int ret;
1249 
1250 	if (insert_buffer == remove_buffer)
1251 		return 0;
1252 
1253 	if (insert_buffer &&
1254 	    (insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT))
1255 		return -EINVAL;
1256 
1257 	mutex_lock(&iio_dev_opaque->info_exist_lock);
1258 	mutex_lock(&indio_dev->mlock);
1259 
1260 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1261 		insert_buffer = NULL;
1262 
1263 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1264 		remove_buffer = NULL;
1265 
1266 	if (!insert_buffer && !remove_buffer) {
1267 		ret = 0;
1268 		goto out_unlock;
1269 	}
1270 
1271 	if (indio_dev->info == NULL) {
1272 		ret = -ENODEV;
1273 		goto out_unlock;
1274 	}
1275 
1276 	ret = __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1277 
1278 out_unlock:
1279 	mutex_unlock(&indio_dev->mlock);
1280 	mutex_unlock(&iio_dev_opaque->info_exist_lock);
1281 
1282 	return ret;
1283 }
1284 EXPORT_SYMBOL_GPL(iio_update_buffers);
1285 
1286 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1287 {
1288 	iio_disable_buffers(indio_dev);
1289 	iio_buffer_deactivate_all(indio_dev);
1290 }
1291 
1292 static ssize_t iio_buffer_store_enable(struct device *dev,
1293 				       struct device_attribute *attr,
1294 				       const char *buf,
1295 				       size_t len)
1296 {
1297 	int ret;
1298 	bool requested_state;
1299 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1300 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1301 	bool inlist;
1302 
1303 	ret = strtobool(buf, &requested_state);
1304 	if (ret < 0)
1305 		return ret;
1306 
1307 	mutex_lock(&indio_dev->mlock);
1308 
1309 	/* Find out if it is in the list */
1310 	inlist = iio_buffer_is_active(buffer);
1311 	/* Already in desired state */
1312 	if (inlist == requested_state)
1313 		goto done;
1314 
1315 	if (requested_state)
1316 		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1317 	else
1318 		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1319 
1320 done:
1321 	mutex_unlock(&indio_dev->mlock);
1322 	return (ret < 0) ? ret : len;
1323 }
1324 
1325 static ssize_t iio_buffer_show_watermark(struct device *dev,
1326 					 struct device_attribute *attr,
1327 					 char *buf)
1328 {
1329 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1330 
1331 	return sysfs_emit(buf, "%u\n", buffer->watermark);
1332 }
1333 
1334 static ssize_t iio_buffer_store_watermark(struct device *dev,
1335 					  struct device_attribute *attr,
1336 					  const char *buf,
1337 					  size_t len)
1338 {
1339 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1340 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1341 	unsigned int val;
1342 	int ret;
1343 
1344 	ret = kstrtouint(buf, 10, &val);
1345 	if (ret)
1346 		return ret;
1347 	if (!val)
1348 		return -EINVAL;
1349 
1350 	mutex_lock(&indio_dev->mlock);
1351 
1352 	if (val > buffer->length) {
1353 		ret = -EINVAL;
1354 		goto out;
1355 	}
1356 
1357 	if (iio_buffer_is_active(buffer)) {
1358 		ret = -EBUSY;
1359 		goto out;
1360 	}
1361 
1362 	buffer->watermark = val;
1363 out:
1364 	mutex_unlock(&indio_dev->mlock);
1365 
1366 	return ret ? ret : len;
1367 }
1368 
1369 static ssize_t iio_dma_show_data_available(struct device *dev,
1370 						struct device_attribute *attr,
1371 						char *buf)
1372 {
1373 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1374 
1375 	return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1376 }
1377 
1378 static ssize_t direction_show(struct device *dev,
1379 			      struct device_attribute *attr,
1380 			      char *buf)
1381 {
1382 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1383 
1384 	switch (buffer->direction) {
1385 	case IIO_BUFFER_DIRECTION_IN:
1386 		return sprintf(buf, "in\n");
1387 	case IIO_BUFFER_DIRECTION_OUT:
1388 		return sprintf(buf, "out\n");
1389 	default:
1390 		return -EINVAL;
1391 	}
1392 }
1393 
1394 static DEVICE_ATTR(length, S_IRUGO | S_IWUSR, iio_buffer_read_length,
1395 		   iio_buffer_write_length);
1396 static struct device_attribute dev_attr_length_ro = __ATTR(length,
1397 	S_IRUGO, iio_buffer_read_length, NULL);
1398 static DEVICE_ATTR(enable, S_IRUGO | S_IWUSR,
1399 		   iio_buffer_show_enable, iio_buffer_store_enable);
1400 static DEVICE_ATTR(watermark, S_IRUGO | S_IWUSR,
1401 		   iio_buffer_show_watermark, iio_buffer_store_watermark);
1402 static struct device_attribute dev_attr_watermark_ro = __ATTR(watermark,
1403 	S_IRUGO, iio_buffer_show_watermark, NULL);
1404 static DEVICE_ATTR(data_available, S_IRUGO,
1405 		iio_dma_show_data_available, NULL);
1406 static DEVICE_ATTR_RO(direction);
1407 
1408 /*
1409  * When adding new attributes here, put the at the end, at least until
1410  * the code that handles the length/length_ro & watermark/watermark_ro
1411  * assignments gets cleaned up. Otherwise these can create some weird
1412  * duplicate attributes errors under some setups.
1413  */
1414 static struct attribute *iio_buffer_attrs[] = {
1415 	&dev_attr_length.attr,
1416 	&dev_attr_enable.attr,
1417 	&dev_attr_watermark.attr,
1418 	&dev_attr_data_available.attr,
1419 	&dev_attr_direction.attr,
1420 };
1421 
1422 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1423 
1424 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1425 					      struct attribute *attr)
1426 {
1427 	struct device_attribute *dattr = to_dev_attr(attr);
1428 	struct iio_dev_attr *iio_attr;
1429 
1430 	iio_attr = kzalloc(sizeof(*iio_attr), GFP_KERNEL);
1431 	if (!iio_attr)
1432 		return NULL;
1433 
1434 	iio_attr->buffer = buffer;
1435 	memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1436 	iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1437 	if (!iio_attr->dev_attr.attr.name) {
1438 		kfree(iio_attr);
1439 		return NULL;
1440 	}
1441 
1442 	sysfs_attr_init(&iio_attr->dev_attr.attr);
1443 
1444 	list_add(&iio_attr->l, &buffer->buffer_attr_list);
1445 
1446 	return &iio_attr->dev_attr.attr;
1447 }
1448 
1449 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1450 						   struct attribute **buffer_attrs,
1451 						   int buffer_attrcount,
1452 						   int scan_el_attrcount)
1453 {
1454 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1455 	struct attribute_group *group;
1456 	struct attribute **attrs;
1457 	int ret;
1458 
1459 	attrs = kcalloc(buffer_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1460 	if (!attrs)
1461 		return -ENOMEM;
1462 
1463 	memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1464 
1465 	group = &iio_dev_opaque->legacy_buffer_group;
1466 	group->attrs = attrs;
1467 	group->name = "buffer";
1468 
1469 	ret = iio_device_register_sysfs_group(indio_dev, group);
1470 	if (ret)
1471 		goto error_free_buffer_attrs;
1472 
1473 	attrs = kcalloc(scan_el_attrcount + 1, sizeof(*attrs), GFP_KERNEL);
1474 	if (!attrs) {
1475 		ret = -ENOMEM;
1476 		goto error_free_buffer_attrs;
1477 	}
1478 
1479 	memcpy(attrs, &buffer_attrs[buffer_attrcount],
1480 	       scan_el_attrcount * sizeof(*attrs));
1481 
1482 	group = &iio_dev_opaque->legacy_scan_el_group;
1483 	group->attrs = attrs;
1484 	group->name = "scan_elements";
1485 
1486 	ret = iio_device_register_sysfs_group(indio_dev, group);
1487 	if (ret)
1488 		goto error_free_scan_el_attrs;
1489 
1490 	return 0;
1491 
1492 error_free_scan_el_attrs:
1493 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1494 error_free_buffer_attrs:
1495 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1496 
1497 	return ret;
1498 }
1499 
1500 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1501 {
1502 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1503 
1504 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1505 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1506 }
1507 
1508 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1509 {
1510 	struct iio_dev_buffer_pair *ib = filep->private_data;
1511 	struct iio_dev *indio_dev = ib->indio_dev;
1512 	struct iio_buffer *buffer = ib->buffer;
1513 
1514 	wake_up(&buffer->pollq);
1515 
1516 	kfree(ib);
1517 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1518 	iio_device_put(indio_dev);
1519 
1520 	return 0;
1521 }
1522 
1523 static const struct file_operations iio_buffer_chrdev_fileops = {
1524 	.owner = THIS_MODULE,
1525 	.llseek = noop_llseek,
1526 	.read = iio_buffer_read,
1527 	.write = iio_buffer_write,
1528 	.poll = iio_buffer_poll,
1529 	.release = iio_buffer_chrdev_release,
1530 };
1531 
1532 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
1533 {
1534 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1535 	int __user *ival = (int __user *)arg;
1536 	struct iio_dev_buffer_pair *ib;
1537 	struct iio_buffer *buffer;
1538 	int fd, idx, ret;
1539 
1540 	if (copy_from_user(&idx, ival, sizeof(idx)))
1541 		return -EFAULT;
1542 
1543 	if (idx >= iio_dev_opaque->attached_buffers_cnt)
1544 		return -ENODEV;
1545 
1546 	iio_device_get(indio_dev);
1547 
1548 	buffer = iio_dev_opaque->attached_buffers[idx];
1549 
1550 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
1551 		ret = -EBUSY;
1552 		goto error_iio_dev_put;
1553 	}
1554 
1555 	ib = kzalloc(sizeof(*ib), GFP_KERNEL);
1556 	if (!ib) {
1557 		ret = -ENOMEM;
1558 		goto error_clear_busy_bit;
1559 	}
1560 
1561 	ib->indio_dev = indio_dev;
1562 	ib->buffer = buffer;
1563 
1564 	fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
1565 			      ib, O_RDWR | O_CLOEXEC);
1566 	if (fd < 0) {
1567 		ret = fd;
1568 		goto error_free_ib;
1569 	}
1570 
1571 	if (copy_to_user(ival, &fd, sizeof(fd))) {
1572 		/*
1573 		 * "Leak" the fd, as there's not much we can do about this
1574 		 * anyway. 'fd' might have been closed already, as
1575 		 * anon_inode_getfd() called fd_install() on it, which made
1576 		 * it reachable by userland.
1577 		 *
1578 		 * Instead of allowing a malicious user to play tricks with
1579 		 * us, rely on the process exit path to do any necessary
1580 		 * cleanup, as in releasing the file, if still needed.
1581 		 */
1582 		return -EFAULT;
1583 	}
1584 
1585 	return 0;
1586 
1587 error_free_ib:
1588 	kfree(ib);
1589 error_clear_busy_bit:
1590 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1591 error_iio_dev_put:
1592 	iio_device_put(indio_dev);
1593 	return ret;
1594 }
1595 
1596 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
1597 				    unsigned int cmd, unsigned long arg)
1598 {
1599 	switch (cmd) {
1600 	case IIO_BUFFER_GET_FD_IOCTL:
1601 		return iio_device_buffer_getfd(indio_dev, arg);
1602 	default:
1603 		return IIO_IOCTL_UNHANDLED;
1604 	}
1605 }
1606 
1607 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
1608 					     struct iio_dev *indio_dev,
1609 					     int index)
1610 {
1611 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1612 	struct iio_dev_attr *p;
1613 	struct attribute **attr;
1614 	int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
1615 	const struct iio_chan_spec *channels;
1616 
1617 	buffer_attrcount = 0;
1618 	if (buffer->attrs) {
1619 		while (buffer->attrs[buffer_attrcount] != NULL)
1620 			buffer_attrcount++;
1621 	}
1622 
1623 	scan_el_attrcount = 0;
1624 	INIT_LIST_HEAD(&buffer->buffer_attr_list);
1625 	channels = indio_dev->channels;
1626 	if (channels) {
1627 		/* new magic */
1628 		for (i = 0; i < indio_dev->num_channels; i++) {
1629 			if (channels[i].scan_index < 0)
1630 				continue;
1631 
1632 			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
1633 							 &channels[i]);
1634 			if (ret < 0)
1635 				goto error_cleanup_dynamic;
1636 			scan_el_attrcount += ret;
1637 			if (channels[i].type == IIO_TIMESTAMP)
1638 				iio_dev_opaque->scan_index_timestamp =
1639 					channels[i].scan_index;
1640 		}
1641 		if (indio_dev->masklength && buffer->scan_mask == NULL) {
1642 			buffer->scan_mask = bitmap_zalloc(indio_dev->masklength,
1643 							  GFP_KERNEL);
1644 			if (buffer->scan_mask == NULL) {
1645 				ret = -ENOMEM;
1646 				goto error_cleanup_dynamic;
1647 			}
1648 		}
1649 	}
1650 
1651 	attrn = buffer_attrcount + scan_el_attrcount + ARRAY_SIZE(iio_buffer_attrs);
1652 	attr = kcalloc(attrn + 1, sizeof(* attr), GFP_KERNEL);
1653 	if (!attr) {
1654 		ret = -ENOMEM;
1655 		goto error_free_scan_mask;
1656 	}
1657 
1658 	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
1659 	if (!buffer->access->set_length)
1660 		attr[0] = &dev_attr_length_ro.attr;
1661 
1662 	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
1663 		attr[2] = &dev_attr_watermark_ro.attr;
1664 
1665 	if (buffer->attrs)
1666 		memcpy(&attr[ARRAY_SIZE(iio_buffer_attrs)], buffer->attrs,
1667 		       sizeof(struct attribute *) * buffer_attrcount);
1668 
1669 	buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
1670 	buffer->buffer_group.attrs = attr;
1671 
1672 	for (i = 0; i < buffer_attrcount; i++) {
1673 		struct attribute *wrapped;
1674 
1675 		wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
1676 		if (!wrapped) {
1677 			ret = -ENOMEM;
1678 			goto error_free_buffer_attrs;
1679 		}
1680 		attr[i] = wrapped;
1681 	}
1682 
1683 	attrn = 0;
1684 	list_for_each_entry(p, &buffer->buffer_attr_list, l)
1685 		attr[attrn++] = &p->dev_attr.attr;
1686 
1687 	buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
1688 	if (!buffer->buffer_group.name) {
1689 		ret = -ENOMEM;
1690 		goto error_free_buffer_attrs;
1691 	}
1692 
1693 	ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
1694 	if (ret)
1695 		goto error_free_buffer_attr_group_name;
1696 
1697 	/* we only need to register the legacy groups for the first buffer */
1698 	if (index > 0)
1699 		return 0;
1700 
1701 	ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
1702 						      buffer_attrcount,
1703 						      scan_el_attrcount);
1704 	if (ret)
1705 		goto error_free_buffer_attr_group_name;
1706 
1707 	return 0;
1708 
1709 error_free_buffer_attr_group_name:
1710 	kfree(buffer->buffer_group.name);
1711 error_free_buffer_attrs:
1712 	kfree(buffer->buffer_group.attrs);
1713 error_free_scan_mask:
1714 	bitmap_free(buffer->scan_mask);
1715 error_cleanup_dynamic:
1716 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1717 
1718 	return ret;
1719 }
1720 
1721 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
1722 					     struct iio_dev *indio_dev,
1723 					     int index)
1724 {
1725 	if (index == 0)
1726 		iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
1727 	bitmap_free(buffer->scan_mask);
1728 	kfree(buffer->buffer_group.name);
1729 	kfree(buffer->buffer_group.attrs);
1730 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
1731 }
1732 
1733 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
1734 {
1735 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1736 	const struct iio_chan_spec *channels;
1737 	struct iio_buffer *buffer;
1738 	int ret, i, idx;
1739 	size_t sz;
1740 
1741 	channels = indio_dev->channels;
1742 	if (channels) {
1743 		int ml = indio_dev->masklength;
1744 
1745 		for (i = 0; i < indio_dev->num_channels; i++)
1746 			ml = max(ml, channels[i].scan_index + 1);
1747 		indio_dev->masklength = ml;
1748 	}
1749 
1750 	if (!iio_dev_opaque->attached_buffers_cnt)
1751 		return 0;
1752 
1753 	for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
1754 		buffer = iio_dev_opaque->attached_buffers[idx];
1755 		ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
1756 		if (ret)
1757 			goto error_unwind_sysfs_and_mask;
1758 	}
1759 
1760 	sz = sizeof(*(iio_dev_opaque->buffer_ioctl_handler));
1761 	iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
1762 	if (!iio_dev_opaque->buffer_ioctl_handler) {
1763 		ret = -ENOMEM;
1764 		goto error_unwind_sysfs_and_mask;
1765 	}
1766 
1767 	iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
1768 	iio_device_ioctl_handler_register(indio_dev,
1769 					  iio_dev_opaque->buffer_ioctl_handler);
1770 
1771 	return 0;
1772 
1773 error_unwind_sysfs_and_mask:
1774 	while (idx--) {
1775 		buffer = iio_dev_opaque->attached_buffers[idx];
1776 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
1777 	}
1778 	return ret;
1779 }
1780 
1781 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
1782 {
1783 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1784 	struct iio_buffer *buffer;
1785 	int i;
1786 
1787 	if (!iio_dev_opaque->attached_buffers_cnt)
1788 		return;
1789 
1790 	iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
1791 	kfree(iio_dev_opaque->buffer_ioctl_handler);
1792 
1793 	for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
1794 		buffer = iio_dev_opaque->attached_buffers[i];
1795 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
1796 	}
1797 }
1798 
1799 /**
1800  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
1801  * @indio_dev: the iio device
1802  * @mask: scan mask to be checked
1803  *
1804  * Return true if exactly one bit is set in the scan mask, false otherwise. It
1805  * can be used for devices where only one channel can be active for sampling at
1806  * a time.
1807  */
1808 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
1809 	const unsigned long *mask)
1810 {
1811 	return bitmap_weight(mask, indio_dev->masklength) == 1;
1812 }
1813 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
1814 
1815 static const void *iio_demux(struct iio_buffer *buffer,
1816 				 const void *datain)
1817 {
1818 	struct iio_demux_table *t;
1819 
1820 	if (list_empty(&buffer->demux_list))
1821 		return datain;
1822 	list_for_each_entry(t, &buffer->demux_list, l)
1823 		memcpy(buffer->demux_bounce + t->to,
1824 		       datain + t->from, t->length);
1825 
1826 	return buffer->demux_bounce;
1827 }
1828 
1829 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
1830 {
1831 	const void *dataout = iio_demux(buffer, data);
1832 	int ret;
1833 
1834 	ret = buffer->access->store_to(buffer, dataout);
1835 	if (ret)
1836 		return ret;
1837 
1838 	/*
1839 	 * We can't just test for watermark to decide if we wake the poll queue
1840 	 * because read may request less samples than the watermark.
1841 	 */
1842 	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
1843 	return 0;
1844 }
1845 
1846 /**
1847  * iio_push_to_buffers() - push to a registered buffer.
1848  * @indio_dev:		iio_dev structure for device.
1849  * @data:		Full scan.
1850  */
1851 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
1852 {
1853 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1854 	int ret;
1855 	struct iio_buffer *buf;
1856 
1857 	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
1858 		ret = iio_push_to_buffer(buf, data);
1859 		if (ret < 0)
1860 			return ret;
1861 	}
1862 
1863 	return 0;
1864 }
1865 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
1866 
1867 /**
1868  * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
1869  *    no alignment or space requirements.
1870  * @indio_dev:		iio_dev structure for device.
1871  * @data:		channel data excluding the timestamp.
1872  * @data_sz:		size of data.
1873  * @timestamp:		timestamp for the sample data.
1874  *
1875  * This special variant of iio_push_to_buffers_with_timestamp() does
1876  * not require space for the timestamp, or 8 byte alignment of data.
1877  * It does however require an allocation on first call and additional
1878  * copies on all calls, so should be avoided if possible.
1879  */
1880 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
1881 					  const void *data,
1882 					  size_t data_sz,
1883 					  int64_t timestamp)
1884 {
1885 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1886 
1887 	/*
1888 	 * Conservative estimate - we can always safely copy the minimum
1889 	 * of either the data provided or the length of the destination buffer.
1890 	 * This relaxed limit allows the calling drivers to be lax about
1891 	 * tracking the size of the data they are pushing, at the cost of
1892 	 * unnecessary copying of padding.
1893 	 */
1894 	data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
1895 	if (iio_dev_opaque->bounce_buffer_size !=  indio_dev->scan_bytes) {
1896 		void *bb;
1897 
1898 		bb = devm_krealloc(&indio_dev->dev,
1899 				   iio_dev_opaque->bounce_buffer,
1900 				   indio_dev->scan_bytes, GFP_KERNEL);
1901 		if (!bb)
1902 			return -ENOMEM;
1903 		iio_dev_opaque->bounce_buffer = bb;
1904 		iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
1905 	}
1906 	memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
1907 	return iio_push_to_buffers_with_timestamp(indio_dev,
1908 						  iio_dev_opaque->bounce_buffer,
1909 						  timestamp);
1910 }
1911 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
1912 
1913 /**
1914  * iio_buffer_release() - Free a buffer's resources
1915  * @ref: Pointer to the kref embedded in the iio_buffer struct
1916  *
1917  * This function is called when the last reference to the buffer has been
1918  * dropped. It will typically free all resources allocated by the buffer. Do not
1919  * call this function manually, always use iio_buffer_put() when done using a
1920  * buffer.
1921  */
1922 static void iio_buffer_release(struct kref *ref)
1923 {
1924 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
1925 
1926 	buffer->access->release(buffer);
1927 }
1928 
1929 /**
1930  * iio_buffer_get() - Grab a reference to the buffer
1931  * @buffer: The buffer to grab a reference for, may be NULL
1932  *
1933  * Returns the pointer to the buffer that was passed into the function.
1934  */
1935 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
1936 {
1937 	if (buffer)
1938 		kref_get(&buffer->ref);
1939 
1940 	return buffer;
1941 }
1942 EXPORT_SYMBOL_GPL(iio_buffer_get);
1943 
1944 /**
1945  * iio_buffer_put() - Release the reference to the buffer
1946  * @buffer: The buffer to release the reference for, may be NULL
1947  */
1948 void iio_buffer_put(struct iio_buffer *buffer)
1949 {
1950 	if (buffer)
1951 		kref_put(&buffer->ref, iio_buffer_release);
1952 }
1953 EXPORT_SYMBOL_GPL(iio_buffer_put);
1954 
1955 /**
1956  * iio_device_attach_buffer - Attach a buffer to a IIO device
1957  * @indio_dev: The device the buffer should be attached to
1958  * @buffer: The buffer to attach to the device
1959  *
1960  * Return 0 if successful, negative if error.
1961  *
1962  * This function attaches a buffer to a IIO device. The buffer stays attached to
1963  * the device until the device is freed. For legacy reasons, the first attached
1964  * buffer will also be assigned to 'indio_dev->buffer'.
1965  * The array allocated here, will be free'd via the iio_device_detach_buffers()
1966  * call which is handled by the iio_device_free().
1967  */
1968 int iio_device_attach_buffer(struct iio_dev *indio_dev,
1969 			     struct iio_buffer *buffer)
1970 {
1971 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1972 	struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
1973 	unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
1974 
1975 	cnt++;
1976 
1977 	new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
1978 	if (!new)
1979 		return -ENOMEM;
1980 	iio_dev_opaque->attached_buffers = new;
1981 
1982 	buffer = iio_buffer_get(buffer);
1983 
1984 	/* first buffer is legacy; attach it to the IIO device directly */
1985 	if (!indio_dev->buffer)
1986 		indio_dev->buffer = buffer;
1987 
1988 	iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
1989 	iio_dev_opaque->attached_buffers_cnt = cnt;
1990 
1991 	return 0;
1992 }
1993 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
1994