xref: /linux/drivers/iio/industrialio-buffer.c (revision 163cc462dea7d5b75be4db49ca78a2b99c55375e)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* The industrial I/O core
3  *
4  * Copyright (c) 2008 Jonathan Cameron
5  *
6  * Handling of buffer allocation / resizing.
7  *
8  * Things to look at here.
9  * - Better memory allocation techniques?
10  * - Alternative access techniques?
11  */
12 #include <linux/atomic.h>
13 #include <linux/anon_inodes.h>
14 #include <linux/cleanup.h>
15 #include <linux/kernel.h>
16 #include <linux/export.h>
17 #include <linux/device.h>
18 #include <linux/dma-buf.h>
19 #include <linux/dma-fence.h>
20 #include <linux/dma-resv.h>
21 #include <linux/file.h>
22 #include <linux/fs.h>
23 #include <linux/cdev.h>
24 #include <linux/slab.h>
25 #include <linux/mm.h>
26 #include <linux/poll.h>
27 #include <linux/sched/signal.h>
28 
29 #include <linux/iio/iio.h>
30 #include <linux/iio/iio-opaque.h>
31 #include "iio_core.h"
32 #include "iio_core_trigger.h"
33 #include <linux/iio/sysfs.h>
34 #include <linux/iio/buffer.h>
35 #include <linux/iio/buffer_impl.h>
36 
37 #define DMABUF_ENQUEUE_TIMEOUT_MS 5000
38 
39 MODULE_IMPORT_NS("DMA_BUF");
40 
41 struct iio_dmabuf_priv {
42 	struct list_head entry;
43 	struct kref ref;
44 
45 	struct iio_buffer *buffer;
46 	struct iio_dma_buffer_block *block;
47 
48 	u64 context;
49 
50 	/* Spinlock used for locking the dma_fence */
51 	spinlock_t lock;
52 
53 	struct dma_buf_attachment *attach;
54 	struct sg_table *sgt;
55 	enum dma_data_direction dir;
56 	atomic_t seqno;
57 };
58 
59 struct iio_dma_fence {
60 	struct dma_fence base;
61 	struct iio_dmabuf_priv *priv;
62 	struct work_struct work;
63 };
64 
65 static const char * const iio_endian_prefix[] = {
66 	[IIO_BE] = "be",
67 	[IIO_LE] = "le",
68 };
69 
70 static bool iio_buffer_is_active(struct iio_buffer *buf)
71 {
72 	return !list_empty(&buf->buffer_list);
73 }
74 
75 static size_t iio_buffer_data_available(struct iio_buffer *buf)
76 {
77 	return buf->access->data_available(buf);
78 }
79 
80 static int iio_buffer_flush_hwfifo(struct iio_dev *indio_dev,
81 				   struct iio_buffer *buf, size_t required)
82 {
83 	if (!indio_dev->info->hwfifo_flush_to_buffer)
84 		return -ENODEV;
85 
86 	return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required);
87 }
88 
89 static bool iio_buffer_ready(struct iio_dev *indio_dev, struct iio_buffer *buf,
90 			     size_t to_wait, int to_flush)
91 {
92 	size_t avail;
93 	int flushed = 0;
94 
95 	/* wakeup if the device was unregistered */
96 	if (!indio_dev->info)
97 		return true;
98 
99 	/* drain the buffer if it was disabled */
100 	if (!iio_buffer_is_active(buf)) {
101 		to_wait = min_t(size_t, to_wait, 1);
102 		to_flush = 0;
103 	}
104 
105 	avail = iio_buffer_data_available(buf);
106 
107 	if (avail >= to_wait) {
108 		/* force a flush for non-blocking reads */
109 		if (!to_wait && avail < to_flush)
110 			iio_buffer_flush_hwfifo(indio_dev, buf,
111 						to_flush - avail);
112 		return true;
113 	}
114 
115 	if (to_flush)
116 		flushed = iio_buffer_flush_hwfifo(indio_dev, buf,
117 						  to_wait - avail);
118 	if (flushed <= 0)
119 		return false;
120 
121 	if (avail + flushed >= to_wait)
122 		return true;
123 
124 	return false;
125 }
126 
127 /**
128  * iio_buffer_read() - chrdev read for buffer access
129  * @filp:	File structure pointer for the char device
130  * @buf:	Destination buffer for iio buffer read
131  * @n:		First n bytes to read
132  * @f_ps:	Long offset provided by the user as a seek position
133  *
134  * This function relies on all buffer implementations having an
135  * iio_buffer as their first element.
136  *
137  * Return: negative values corresponding to error codes or ret != 0
138  *	   for ending the reading activity
139  **/
140 static ssize_t iio_buffer_read(struct file *filp, char __user *buf,
141 			       size_t n, loff_t *f_ps)
142 {
143 	struct iio_dev_buffer_pair *ib = filp->private_data;
144 	struct iio_buffer *rb = ib->buffer;
145 	struct iio_dev *indio_dev = ib->indio_dev;
146 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
147 	size_t datum_size;
148 	size_t to_wait;
149 	int ret = 0;
150 
151 	if (!indio_dev->info)
152 		return -ENODEV;
153 
154 	if (!rb || !rb->access->read)
155 		return -EINVAL;
156 
157 	if (rb->direction != IIO_BUFFER_DIRECTION_IN)
158 		return -EPERM;
159 
160 	datum_size = rb->bytes_per_datum;
161 
162 	/*
163 	 * If datum_size is 0 there will never be anything to read from the
164 	 * buffer, so signal end of file now.
165 	 */
166 	if (!datum_size)
167 		return 0;
168 
169 	if (filp->f_flags & O_NONBLOCK)
170 		to_wait = 0;
171 	else
172 		to_wait = min_t(size_t, n / datum_size, rb->watermark);
173 
174 	add_wait_queue(&rb->pollq, &wait);
175 	do {
176 		if (!indio_dev->info) {
177 			ret = -ENODEV;
178 			break;
179 		}
180 
181 		if (!iio_buffer_ready(indio_dev, rb, to_wait, n / datum_size)) {
182 			if (signal_pending(current)) {
183 				ret = -ERESTARTSYS;
184 				break;
185 			}
186 
187 			wait_woken(&wait, TASK_INTERRUPTIBLE,
188 				   MAX_SCHEDULE_TIMEOUT);
189 			continue;
190 		}
191 
192 		ret = rb->access->read(rb, n, buf);
193 		if (ret == 0 && (filp->f_flags & O_NONBLOCK))
194 			ret = -EAGAIN;
195 	} while (ret == 0);
196 	remove_wait_queue(&rb->pollq, &wait);
197 
198 	return ret;
199 }
200 
201 static size_t iio_buffer_space_available(struct iio_buffer *buf)
202 {
203 	if (buf->access->space_available)
204 		return buf->access->space_available(buf);
205 
206 	return SIZE_MAX;
207 }
208 
209 static ssize_t iio_buffer_write(struct file *filp, const char __user *buf,
210 				size_t n, loff_t *f_ps)
211 {
212 	struct iio_dev_buffer_pair *ib = filp->private_data;
213 	struct iio_buffer *rb = ib->buffer;
214 	struct iio_dev *indio_dev = ib->indio_dev;
215 	DEFINE_WAIT_FUNC(wait, woken_wake_function);
216 	int ret = 0;
217 	size_t written;
218 
219 	if (!indio_dev->info)
220 		return -ENODEV;
221 
222 	if (!rb || !rb->access->write)
223 		return -EINVAL;
224 
225 	if (rb->direction != IIO_BUFFER_DIRECTION_OUT)
226 		return -EPERM;
227 
228 	written = 0;
229 	add_wait_queue(&rb->pollq, &wait);
230 	do {
231 		if (!indio_dev->info) {
232 			ret = -ENODEV;
233 			break;
234 		}
235 
236 		if (!iio_buffer_space_available(rb)) {
237 			if (signal_pending(current)) {
238 				ret = -ERESTARTSYS;
239 				break;
240 			}
241 
242 			if (filp->f_flags & O_NONBLOCK) {
243 				if (!written)
244 					ret = -EAGAIN;
245 				break;
246 			}
247 
248 			wait_woken(&wait, TASK_INTERRUPTIBLE,
249 				   MAX_SCHEDULE_TIMEOUT);
250 			continue;
251 		}
252 
253 		ret = rb->access->write(rb, n - written, buf + written);
254 		if (ret < 0)
255 			break;
256 
257 		written += ret;
258 
259 	} while (written != n);
260 	remove_wait_queue(&rb->pollq, &wait);
261 
262 	return ret < 0 ? ret : written;
263 }
264 
265 /**
266  * iio_buffer_poll() - poll the buffer to find out if it has data
267  * @filp:	File structure pointer for device access
268  * @wait:	Poll table structure pointer for which the driver adds
269  *		a wait queue
270  *
271  * Return: (EPOLLIN | EPOLLRDNORM) if data is available for reading
272  *	   or 0 for other cases
273  */
274 static __poll_t iio_buffer_poll(struct file *filp,
275 				struct poll_table_struct *wait)
276 {
277 	struct iio_dev_buffer_pair *ib = filp->private_data;
278 	struct iio_buffer *rb = ib->buffer;
279 	struct iio_dev *indio_dev = ib->indio_dev;
280 
281 	if (!indio_dev->info || !rb)
282 		return 0;
283 
284 	poll_wait(filp, &rb->pollq, wait);
285 
286 	switch (rb->direction) {
287 	case IIO_BUFFER_DIRECTION_IN:
288 		if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0))
289 			return EPOLLIN | EPOLLRDNORM;
290 		break;
291 	case IIO_BUFFER_DIRECTION_OUT:
292 		if (iio_buffer_space_available(rb))
293 			return EPOLLOUT | EPOLLWRNORM;
294 		break;
295 	}
296 
297 	return 0;
298 }
299 
300 ssize_t iio_buffer_read_wrapper(struct file *filp, char __user *buf,
301 				size_t n, loff_t *f_ps)
302 {
303 	struct iio_dev_buffer_pair *ib = filp->private_data;
304 	struct iio_buffer *rb = ib->buffer;
305 
306 	/* check if buffer was opened through new API */
307 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
308 		return -EBUSY;
309 
310 	return iio_buffer_read(filp, buf, n, f_ps);
311 }
312 
313 ssize_t iio_buffer_write_wrapper(struct file *filp, const char __user *buf,
314 				 size_t n, loff_t *f_ps)
315 {
316 	struct iio_dev_buffer_pair *ib = filp->private_data;
317 	struct iio_buffer *rb = ib->buffer;
318 
319 	/* check if buffer was opened through new API */
320 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
321 		return -EBUSY;
322 
323 	return iio_buffer_write(filp, buf, n, f_ps);
324 }
325 
326 __poll_t iio_buffer_poll_wrapper(struct file *filp,
327 				 struct poll_table_struct *wait)
328 {
329 	struct iio_dev_buffer_pair *ib = filp->private_data;
330 	struct iio_buffer *rb = ib->buffer;
331 
332 	/* check if buffer was opened through new API */
333 	if (test_bit(IIO_BUSY_BIT_POS, &rb->flags))
334 		return 0;
335 
336 	return iio_buffer_poll(filp, wait);
337 }
338 
339 /**
340  * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
341  * @indio_dev: The IIO device
342  *
343  * Wakes up the event waitqueue used for poll(). Should usually
344  * be called when the device is unregistered.
345  */
346 void iio_buffer_wakeup_poll(struct iio_dev *indio_dev)
347 {
348 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
349 	struct iio_buffer *buffer;
350 	unsigned int i;
351 
352 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
353 		buffer = iio_dev_opaque->attached_buffers[i];
354 		wake_up(&buffer->pollq);
355 	}
356 }
357 
358 int iio_pop_from_buffer(struct iio_buffer *buffer, void *data)
359 {
360 	if (!buffer || !buffer->access || !buffer->access->remove_from)
361 		return -EINVAL;
362 
363 	return buffer->access->remove_from(buffer, data);
364 }
365 EXPORT_SYMBOL_GPL(iio_pop_from_buffer);
366 
367 void iio_buffer_init(struct iio_buffer *buffer)
368 {
369 	INIT_LIST_HEAD(&buffer->demux_list);
370 	INIT_LIST_HEAD(&buffer->buffer_list);
371 	INIT_LIST_HEAD(&buffer->dmabufs);
372 	mutex_init(&buffer->dmabufs_mutex);
373 	init_waitqueue_head(&buffer->pollq);
374 	kref_init(&buffer->ref);
375 	if (!buffer->watermark)
376 		buffer->watermark = 1;
377 }
378 EXPORT_SYMBOL(iio_buffer_init);
379 
380 void iio_device_detach_buffers(struct iio_dev *indio_dev)
381 {
382 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
383 	struct iio_buffer *buffer;
384 	unsigned int i;
385 
386 	for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) {
387 		buffer = iio_dev_opaque->attached_buffers[i];
388 		iio_buffer_put(buffer);
389 	}
390 
391 	kfree(iio_dev_opaque->attached_buffers);
392 }
393 
394 static ssize_t iio_show_scan_index(struct device *dev,
395 				   struct device_attribute *attr,
396 				   char *buf)
397 {
398 	return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index);
399 }
400 
401 static ssize_t iio_show_fixed_type(struct device *dev,
402 				   struct device_attribute *attr,
403 				   char *buf)
404 {
405 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
406 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
407 	const struct iio_scan_type *scan_type;
408 	u8 type;
409 
410 	scan_type = iio_get_current_scan_type(indio_dev, this_attr->c);
411 	if (IS_ERR(scan_type))
412 		return PTR_ERR(scan_type);
413 
414 	type = scan_type->endianness;
415 
416 	if (type == IIO_CPU) {
417 #ifdef __LITTLE_ENDIAN
418 		type = IIO_LE;
419 #else
420 		type = IIO_BE;
421 #endif
422 	}
423 	if (scan_type->repeat > 1)
424 		return sysfs_emit(buf, "%s:%c%d/%dX%d>>%u\n",
425 		       iio_endian_prefix[type],
426 		       scan_type->sign,
427 		       scan_type->realbits,
428 		       scan_type->storagebits,
429 		       scan_type->repeat,
430 		       scan_type->shift);
431 	else
432 		return sysfs_emit(buf, "%s:%c%d/%d>>%u\n",
433 		       iio_endian_prefix[type],
434 		       scan_type->sign,
435 		       scan_type->realbits,
436 		       scan_type->storagebits,
437 		       scan_type->shift);
438 }
439 
440 static ssize_t iio_scan_el_show(struct device *dev,
441 				struct device_attribute *attr,
442 				char *buf)
443 {
444 	int ret;
445 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
446 
447 	/* Ensure ret is 0 or 1. */
448 	ret = !!test_bit(to_iio_dev_attr(attr)->address,
449 		       buffer->scan_mask);
450 
451 	return sysfs_emit(buf, "%d\n", ret);
452 }
453 
454 /* Note NULL used as error indicator as it doesn't make sense. */
455 static const unsigned long *iio_scan_mask_match(const unsigned long *av_masks,
456 						unsigned int masklength,
457 						const unsigned long *mask,
458 						bool strict)
459 {
460 	if (bitmap_empty(mask, masklength))
461 		return NULL;
462 	/*
463 	 * The condition here do not handle multi-long masks correctly.
464 	 * It only checks the first long to be zero, and will use such mask
465 	 * as a terminator even if there was bits set after the first long.
466 	 *
467 	 * Correct check would require using:
468 	 * while (!bitmap_empty(av_masks, masklength))
469 	 * instead. This is potentially hazardous because the
470 	 * avaliable_scan_masks is a zero terminated array of longs - and
471 	 * using the proper bitmap_empty() check for multi-long wide masks
472 	 * would require the array to be terminated with multiple zero longs -
473 	 * which is not such an usual pattern.
474 	 *
475 	 * As writing of this no multi-long wide masks were found in-tree, so
476 	 * the simple while (*av_masks) check is working.
477 	 */
478 	while (*av_masks) {
479 		if (strict) {
480 			if (bitmap_equal(mask, av_masks, masklength))
481 				return av_masks;
482 		} else {
483 			if (bitmap_subset(mask, av_masks, masklength))
484 				return av_masks;
485 		}
486 		av_masks += BITS_TO_LONGS(masklength);
487 	}
488 	return NULL;
489 }
490 
491 static bool iio_validate_scan_mask(struct iio_dev *indio_dev,
492 				   const unsigned long *mask)
493 {
494 	if (!indio_dev->setup_ops->validate_scan_mask)
495 		return true;
496 
497 	return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask);
498 }
499 
500 /**
501  * iio_scan_mask_set() - set particular bit in the scan mask
502  * @indio_dev: the iio device
503  * @buffer: the buffer whose scan mask we are interested in
504  * @bit: the bit to be set.
505  *
506  * Note that at this point we have no way of knowing what other
507  * buffers might request, hence this code only verifies that the
508  * individual buffers request is plausible.
509  */
510 static int iio_scan_mask_set(struct iio_dev *indio_dev,
511 			     struct iio_buffer *buffer, int bit)
512 {
513 	unsigned int masklength = iio_get_masklength(indio_dev);
514 	const unsigned long *mask;
515 	unsigned long *trialmask;
516 
517 	if (!masklength) {
518 		WARN(1, "Trying to set scanmask prior to registering buffer\n");
519 		return -EINVAL;
520 	}
521 
522 	trialmask = bitmap_alloc(masklength, GFP_KERNEL);
523 	if (!trialmask)
524 		return -ENOMEM;
525 	bitmap_copy(trialmask, buffer->scan_mask, masklength);
526 	set_bit(bit, trialmask);
527 
528 	if (!iio_validate_scan_mask(indio_dev, trialmask))
529 		goto err_invalid_mask;
530 
531 	if (indio_dev->available_scan_masks) {
532 		mask = iio_scan_mask_match(indio_dev->available_scan_masks,
533 					   masklength, trialmask, false);
534 		if (!mask)
535 			goto err_invalid_mask;
536 	}
537 	bitmap_copy(buffer->scan_mask, trialmask, masklength);
538 
539 	bitmap_free(trialmask);
540 
541 	return 0;
542 
543 err_invalid_mask:
544 	bitmap_free(trialmask);
545 	return -EINVAL;
546 }
547 
548 static int iio_scan_mask_clear(struct iio_buffer *buffer, int bit)
549 {
550 	clear_bit(bit, buffer->scan_mask);
551 	return 0;
552 }
553 
554 static int iio_scan_mask_query(struct iio_dev *indio_dev,
555 			       struct iio_buffer *buffer, int bit)
556 {
557 	if (bit > iio_get_masklength(indio_dev))
558 		return -EINVAL;
559 
560 	if (!buffer->scan_mask)
561 		return 0;
562 
563 	/* Ensure return value is 0 or 1. */
564 	return !!test_bit(bit, buffer->scan_mask);
565 };
566 
567 static ssize_t iio_scan_el_store(struct device *dev,
568 				 struct device_attribute *attr,
569 				 const char *buf,
570 				 size_t len)
571 {
572 	int ret;
573 	bool state;
574 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
575 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
576 	struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
577 	struct iio_buffer *buffer = this_attr->buffer;
578 
579 	ret = kstrtobool(buf, &state);
580 	if (ret < 0)
581 		return ret;
582 
583 	guard(mutex)(&iio_dev_opaque->mlock);
584 	if (iio_buffer_is_active(buffer))
585 		return -EBUSY;
586 
587 	ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address);
588 	if (ret < 0)
589 		return ret;
590 
591 	if (state && ret)
592 		return len;
593 
594 	if (state)
595 		ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address);
596 	else
597 		ret = iio_scan_mask_clear(buffer, this_attr->address);
598 	if (ret)
599 		return ret;
600 
601 	return len;
602 }
603 
604 static ssize_t iio_scan_el_ts_show(struct device *dev,
605 				   struct device_attribute *attr,
606 				   char *buf)
607 {
608 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
609 
610 	return sysfs_emit(buf, "%d\n", buffer->scan_timestamp);
611 }
612 
613 static ssize_t iio_scan_el_ts_store(struct device *dev,
614 				    struct device_attribute *attr,
615 				    const char *buf,
616 				    size_t len)
617 {
618 	int ret;
619 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
620 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
621 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
622 	bool state;
623 
624 	ret = kstrtobool(buf, &state);
625 	if (ret < 0)
626 		return ret;
627 
628 	guard(mutex)(&iio_dev_opaque->mlock);
629 	if (iio_buffer_is_active(buffer))
630 		return -EBUSY;
631 
632 	buffer->scan_timestamp = state;
633 
634 	return len;
635 }
636 
637 static int iio_buffer_add_channel_sysfs(struct iio_dev *indio_dev,
638 					struct iio_buffer *buffer,
639 					const struct iio_chan_spec *chan)
640 {
641 	int ret, attrcount = 0;
642 
643 	ret = __iio_add_chan_devattr("index",
644 				     chan,
645 				     &iio_show_scan_index,
646 				     NULL,
647 				     0,
648 				     IIO_SEPARATE,
649 				     &indio_dev->dev,
650 				     buffer,
651 				     &buffer->buffer_attr_list);
652 	if (ret)
653 		return ret;
654 	attrcount++;
655 	ret = __iio_add_chan_devattr("type",
656 				     chan,
657 				     &iio_show_fixed_type,
658 				     NULL,
659 				     0,
660 				     IIO_SEPARATE,
661 				     &indio_dev->dev,
662 				     buffer,
663 				     &buffer->buffer_attr_list);
664 	if (ret)
665 		return ret;
666 	attrcount++;
667 	if (chan->type != IIO_TIMESTAMP)
668 		ret = __iio_add_chan_devattr("en",
669 					     chan,
670 					     &iio_scan_el_show,
671 					     &iio_scan_el_store,
672 					     chan->scan_index,
673 					     IIO_SEPARATE,
674 					     &indio_dev->dev,
675 					     buffer,
676 					     &buffer->buffer_attr_list);
677 	else
678 		ret = __iio_add_chan_devattr("en",
679 					     chan,
680 					     &iio_scan_el_ts_show,
681 					     &iio_scan_el_ts_store,
682 					     chan->scan_index,
683 					     IIO_SEPARATE,
684 					     &indio_dev->dev,
685 					     buffer,
686 					     &buffer->buffer_attr_list);
687 	if (ret)
688 		return ret;
689 	attrcount++;
690 	ret = attrcount;
691 	return ret;
692 }
693 
694 static ssize_t length_show(struct device *dev, struct device_attribute *attr,
695 			   char *buf)
696 {
697 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
698 
699 	return sysfs_emit(buf, "%d\n", buffer->length);
700 }
701 
702 static ssize_t length_store(struct device *dev, struct device_attribute *attr,
703 			    const char *buf, size_t len)
704 {
705 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
706 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
707 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
708 	unsigned int val;
709 	int ret;
710 
711 	ret = kstrtouint(buf, 10, &val);
712 	if (ret)
713 		return ret;
714 
715 	if (val == buffer->length)
716 		return len;
717 
718 	guard(mutex)(&iio_dev_opaque->mlock);
719 	if (iio_buffer_is_active(buffer))
720 		return -EBUSY;
721 
722 	buffer->access->set_length(buffer, val);
723 
724 	if (buffer->length && buffer->length < buffer->watermark)
725 		buffer->watermark = buffer->length;
726 
727 	return len;
728 }
729 
730 static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
731 			   char *buf)
732 {
733 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
734 
735 	return sysfs_emit(buf, "%d\n", iio_buffer_is_active(buffer));
736 }
737 
738 static int iio_storage_bytes_for_si(struct iio_dev *indio_dev,
739 				    unsigned int scan_index)
740 {
741 	const struct iio_chan_spec *ch;
742 	const struct iio_scan_type *scan_type;
743 	unsigned int bytes;
744 
745 	ch = iio_find_channel_from_si(indio_dev, scan_index);
746 	scan_type = iio_get_current_scan_type(indio_dev, ch);
747 	if (IS_ERR(scan_type))
748 		return PTR_ERR(scan_type);
749 
750 	bytes = scan_type->storagebits / 8;
751 
752 	if (scan_type->repeat > 1)
753 		bytes *= scan_type->repeat;
754 
755 	return bytes;
756 }
757 
758 static int iio_storage_bytes_for_timestamp(struct iio_dev *indio_dev)
759 {
760 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
761 
762 	return iio_storage_bytes_for_si(indio_dev,
763 					iio_dev_opaque->scan_index_timestamp);
764 }
765 
766 static int iio_compute_scan_bytes(struct iio_dev *indio_dev,
767 				  const unsigned long *mask, bool timestamp)
768 {
769 	unsigned int bytes = 0;
770 	int length, i, largest = 0;
771 
772 	/* How much space will the demuxed element take? */
773 	for_each_set_bit(i, mask, iio_get_masklength(indio_dev)) {
774 		length = iio_storage_bytes_for_si(indio_dev, i);
775 		if (length < 0)
776 			return length;
777 
778 		bytes = ALIGN(bytes, length);
779 		bytes += length;
780 		largest = max(largest, length);
781 	}
782 
783 	if (timestamp) {
784 		length = iio_storage_bytes_for_timestamp(indio_dev);
785 		if (length < 0)
786 			return length;
787 
788 		bytes = ALIGN(bytes, length);
789 		bytes += length;
790 		largest = max(largest, length);
791 	}
792 
793 	bytes = ALIGN(bytes, largest);
794 	return bytes;
795 }
796 
797 static void iio_buffer_activate(struct iio_dev *indio_dev,
798 				struct iio_buffer *buffer)
799 {
800 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
801 
802 	iio_buffer_get(buffer);
803 	list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list);
804 }
805 
806 static void iio_buffer_deactivate(struct iio_buffer *buffer)
807 {
808 	list_del_init(&buffer->buffer_list);
809 	wake_up_interruptible(&buffer->pollq);
810 	iio_buffer_put(buffer);
811 }
812 
813 static void iio_buffer_deactivate_all(struct iio_dev *indio_dev)
814 {
815 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
816 	struct iio_buffer *buffer, *_buffer;
817 
818 	list_for_each_entry_safe(buffer, _buffer,
819 				 &iio_dev_opaque->buffer_list, buffer_list)
820 		iio_buffer_deactivate(buffer);
821 }
822 
823 static int iio_buffer_enable(struct iio_buffer *buffer,
824 			     struct iio_dev *indio_dev)
825 {
826 	if (!buffer->access->enable)
827 		return 0;
828 	return buffer->access->enable(buffer, indio_dev);
829 }
830 
831 static int iio_buffer_disable(struct iio_buffer *buffer,
832 			      struct iio_dev *indio_dev)
833 {
834 	if (!buffer->access->disable)
835 		return 0;
836 	return buffer->access->disable(buffer, indio_dev);
837 }
838 
839 static void iio_buffer_update_bytes_per_datum(struct iio_dev *indio_dev,
840 					      struct iio_buffer *buffer)
841 {
842 	unsigned int bytes;
843 
844 	if (!buffer->access->set_bytes_per_datum)
845 		return;
846 
847 	bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask,
848 				       buffer->scan_timestamp);
849 
850 	buffer->access->set_bytes_per_datum(buffer, bytes);
851 }
852 
853 static int iio_buffer_request_update(struct iio_dev *indio_dev,
854 				     struct iio_buffer *buffer)
855 {
856 	int ret;
857 
858 	iio_buffer_update_bytes_per_datum(indio_dev, buffer);
859 	if (buffer->access->request_update) {
860 		ret = buffer->access->request_update(buffer);
861 		if (ret) {
862 			dev_dbg(&indio_dev->dev,
863 				"Buffer not started: buffer parameter update failed (%d)\n",
864 				ret);
865 			return ret;
866 		}
867 	}
868 
869 	return 0;
870 }
871 
872 static void iio_free_scan_mask(struct iio_dev *indio_dev,
873 			       const unsigned long *mask)
874 {
875 	/* If the mask is dynamically allocated free it, otherwise do nothing */
876 	if (!indio_dev->available_scan_masks)
877 		bitmap_free(mask);
878 }
879 
880 struct iio_device_config {
881 	unsigned int mode;
882 	unsigned int watermark;
883 	const unsigned long *scan_mask;
884 	unsigned int scan_bytes;
885 	bool scan_timestamp;
886 };
887 
888 static int iio_verify_update(struct iio_dev *indio_dev,
889 			     struct iio_buffer *insert_buffer,
890 			     struct iio_buffer *remove_buffer,
891 			     struct iio_device_config *config)
892 {
893 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
894 	unsigned int masklength = iio_get_masklength(indio_dev);
895 	unsigned long *compound_mask;
896 	const unsigned long *scan_mask;
897 	bool strict_scanmask = false;
898 	struct iio_buffer *buffer;
899 	bool scan_timestamp;
900 	unsigned int modes;
901 
902 	if (insert_buffer &&
903 	    bitmap_empty(insert_buffer->scan_mask, masklength)) {
904 		dev_dbg(&indio_dev->dev,
905 			"At least one scan element must be enabled first\n");
906 		return -EINVAL;
907 	}
908 
909 	memset(config, 0, sizeof(*config));
910 	config->watermark = ~0;
911 
912 	/*
913 	 * If there is just one buffer and we are removing it there is nothing
914 	 * to verify.
915 	 */
916 	if (remove_buffer && !insert_buffer &&
917 	    list_is_singular(&iio_dev_opaque->buffer_list))
918 		return 0;
919 
920 	modes = indio_dev->modes;
921 
922 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
923 		if (buffer == remove_buffer)
924 			continue;
925 		modes &= buffer->access->modes;
926 		config->watermark = min(config->watermark, buffer->watermark);
927 	}
928 
929 	if (insert_buffer) {
930 		modes &= insert_buffer->access->modes;
931 		config->watermark = min(config->watermark,
932 					insert_buffer->watermark);
933 	}
934 
935 	/* Definitely possible for devices to support both of these. */
936 	if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) {
937 		config->mode = INDIO_BUFFER_TRIGGERED;
938 	} else if (modes & INDIO_BUFFER_HARDWARE) {
939 		/*
940 		 * Keep things simple for now and only allow a single buffer to
941 		 * be connected in hardware mode.
942 		 */
943 		if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list))
944 			return -EINVAL;
945 		config->mode = INDIO_BUFFER_HARDWARE;
946 		strict_scanmask = true;
947 	} else if (modes & INDIO_BUFFER_SOFTWARE) {
948 		config->mode = INDIO_BUFFER_SOFTWARE;
949 	} else {
950 		/* Can only occur on first buffer */
951 		if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
952 			dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n");
953 		return -EINVAL;
954 	}
955 
956 	/* What scan mask do we actually have? */
957 	compound_mask = bitmap_zalloc(masklength, GFP_KERNEL);
958 	if (!compound_mask)
959 		return -ENOMEM;
960 
961 	scan_timestamp = false;
962 
963 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
964 		if (buffer == remove_buffer)
965 			continue;
966 		bitmap_or(compound_mask, compound_mask, buffer->scan_mask,
967 			  masklength);
968 		scan_timestamp |= buffer->scan_timestamp;
969 	}
970 
971 	if (insert_buffer) {
972 		bitmap_or(compound_mask, compound_mask,
973 			  insert_buffer->scan_mask, masklength);
974 		scan_timestamp |= insert_buffer->scan_timestamp;
975 	}
976 
977 	if (indio_dev->available_scan_masks) {
978 		scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks,
979 						masklength, compound_mask,
980 						strict_scanmask);
981 		bitmap_free(compound_mask);
982 		if (!scan_mask)
983 			return -EINVAL;
984 	} else {
985 		scan_mask = compound_mask;
986 	}
987 
988 	config->scan_bytes = iio_compute_scan_bytes(indio_dev,
989 						    scan_mask, scan_timestamp);
990 	config->scan_mask = scan_mask;
991 	config->scan_timestamp = scan_timestamp;
992 
993 	return 0;
994 }
995 
996 /**
997  * struct iio_demux_table - table describing demux memcpy ops
998  * @from:	index to copy from
999  * @to:		index to copy to
1000  * @length:	how many bytes to copy
1001  * @l:		list head used for management
1002  */
1003 struct iio_demux_table {
1004 	unsigned int from;
1005 	unsigned int to;
1006 	unsigned int length;
1007 	struct list_head l;
1008 };
1009 
1010 static void iio_buffer_demux_free(struct iio_buffer *buffer)
1011 {
1012 	struct iio_demux_table *p, *q;
1013 
1014 	list_for_each_entry_safe(p, q, &buffer->demux_list, l) {
1015 		list_del(&p->l);
1016 		kfree(p);
1017 	}
1018 }
1019 
1020 static int iio_buffer_add_demux(struct iio_buffer *buffer,
1021 				struct iio_demux_table **p, unsigned int in_loc,
1022 				unsigned int out_loc,
1023 				unsigned int length)
1024 {
1025 	if (*p && (*p)->from + (*p)->length == in_loc &&
1026 	    (*p)->to + (*p)->length == out_loc) {
1027 		(*p)->length += length;
1028 	} else {
1029 		*p = kmalloc_obj(**p);
1030 		if (!(*p))
1031 			return -ENOMEM;
1032 		(*p)->from = in_loc;
1033 		(*p)->to = out_loc;
1034 		(*p)->length = length;
1035 		list_add_tail(&(*p)->l, &buffer->demux_list);
1036 	}
1037 
1038 	return 0;
1039 }
1040 
1041 static int iio_buffer_update_demux(struct iio_dev *indio_dev,
1042 				   struct iio_buffer *buffer)
1043 {
1044 	unsigned int masklength = iio_get_masklength(indio_dev);
1045 	int ret, in_ind = -1, out_ind, length;
1046 	unsigned int in_loc = 0, out_loc = 0;
1047 	struct iio_demux_table *p = NULL;
1048 
1049 	/* Clear out any old demux */
1050 	iio_buffer_demux_free(buffer);
1051 	kfree(buffer->demux_bounce);
1052 	buffer->demux_bounce = NULL;
1053 
1054 	/* First work out which scan mode we will actually have */
1055 	if (bitmap_equal(indio_dev->active_scan_mask,
1056 			 buffer->scan_mask, masklength))
1057 		return 0;
1058 
1059 	/* Now we have the two masks, work from least sig and build up sizes */
1060 	for_each_set_bit(out_ind, buffer->scan_mask, masklength) {
1061 		in_ind = find_next_bit(indio_dev->active_scan_mask,
1062 				       masklength, in_ind + 1);
1063 		while (in_ind != out_ind) {
1064 			ret = iio_storage_bytes_for_si(indio_dev, in_ind);
1065 			if (ret < 0)
1066 				goto error_clear_mux_table;
1067 
1068 			length = ret;
1069 			/* Make sure we are aligned */
1070 			in_loc = roundup(in_loc, length) + length;
1071 			in_ind = find_next_bit(indio_dev->active_scan_mask,
1072 					       masklength, in_ind + 1);
1073 		}
1074 		ret = iio_storage_bytes_for_si(indio_dev, in_ind);
1075 		if (ret < 0)
1076 			goto error_clear_mux_table;
1077 
1078 		length = ret;
1079 		out_loc = roundup(out_loc, length);
1080 		in_loc = roundup(in_loc, length);
1081 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1082 		if (ret)
1083 			goto error_clear_mux_table;
1084 		out_loc += length;
1085 		in_loc += length;
1086 	}
1087 	/* Relies on scan_timestamp being last */
1088 	if (buffer->scan_timestamp) {
1089 		ret = iio_storage_bytes_for_timestamp(indio_dev);
1090 		if (ret < 0)
1091 			goto error_clear_mux_table;
1092 
1093 		length = ret;
1094 		out_loc = roundup(out_loc, length);
1095 		in_loc = roundup(in_loc, length);
1096 		ret = iio_buffer_add_demux(buffer, &p, in_loc, out_loc, length);
1097 		if (ret)
1098 			goto error_clear_mux_table;
1099 		out_loc += length;
1100 	}
1101 	buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL);
1102 	if (!buffer->demux_bounce) {
1103 		ret = -ENOMEM;
1104 		goto error_clear_mux_table;
1105 	}
1106 	return 0;
1107 
1108 error_clear_mux_table:
1109 	iio_buffer_demux_free(buffer);
1110 
1111 	return ret;
1112 }
1113 
1114 static int iio_update_demux(struct iio_dev *indio_dev)
1115 {
1116 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1117 	struct iio_buffer *buffer;
1118 	int ret;
1119 
1120 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1121 		ret = iio_buffer_update_demux(indio_dev, buffer);
1122 		if (ret < 0)
1123 			goto error_clear_mux_table;
1124 	}
1125 	return 0;
1126 
1127 error_clear_mux_table:
1128 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list)
1129 		iio_buffer_demux_free(buffer);
1130 
1131 	return ret;
1132 }
1133 
1134 static int iio_enable_buffers(struct iio_dev *indio_dev,
1135 			      struct iio_device_config *config)
1136 {
1137 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1138 	struct iio_buffer *buffer, *tmp = NULL;
1139 	int ret;
1140 
1141 	indio_dev->active_scan_mask = config->scan_mask;
1142 	ACCESS_PRIVATE(indio_dev, scan_timestamp) = config->scan_timestamp;
1143 	indio_dev->scan_bytes = config->scan_bytes;
1144 	iio_dev_opaque->currentmode = config->mode;
1145 
1146 	iio_update_demux(indio_dev);
1147 
1148 	/* Wind up again */
1149 	if (indio_dev->setup_ops->preenable) {
1150 		ret = indio_dev->setup_ops->preenable(indio_dev);
1151 		if (ret) {
1152 			dev_dbg(&indio_dev->dev,
1153 				"Buffer not started: buffer preenable failed (%d)\n", ret);
1154 			goto err_undo_config;
1155 		}
1156 	}
1157 
1158 	if (indio_dev->info->update_scan_mode) {
1159 		ret = indio_dev->info
1160 			->update_scan_mode(indio_dev,
1161 					   indio_dev->active_scan_mask);
1162 		if (ret < 0) {
1163 			dev_dbg(&indio_dev->dev,
1164 				"Buffer not started: update scan mode failed (%d)\n",
1165 				ret);
1166 			goto err_run_postdisable;
1167 		}
1168 	}
1169 
1170 	if (indio_dev->info->hwfifo_set_watermark)
1171 		indio_dev->info->hwfifo_set_watermark(indio_dev,
1172 			config->watermark);
1173 
1174 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1175 		ret = iio_buffer_enable(buffer, indio_dev);
1176 		if (ret) {
1177 			tmp = buffer;
1178 			goto err_disable_buffers;
1179 		}
1180 	}
1181 
1182 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1183 		ret = iio_trigger_attach_poll_func(indio_dev->trig,
1184 						   indio_dev->pollfunc);
1185 		if (ret)
1186 			goto err_disable_buffers;
1187 	}
1188 
1189 	if (indio_dev->setup_ops->postenable) {
1190 		ret = indio_dev->setup_ops->postenable(indio_dev);
1191 		if (ret) {
1192 			dev_dbg(&indio_dev->dev,
1193 				"Buffer not started: postenable failed (%d)\n", ret);
1194 			goto err_detach_pollfunc;
1195 		}
1196 	}
1197 
1198 	return 0;
1199 
1200 err_detach_pollfunc:
1201 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1202 		iio_trigger_detach_poll_func(indio_dev->trig,
1203 					     indio_dev->pollfunc);
1204 	}
1205 err_disable_buffers:
1206 	buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list);
1207 	list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list,
1208 					     buffer_list)
1209 		iio_buffer_disable(buffer, indio_dev);
1210 err_run_postdisable:
1211 	if (indio_dev->setup_ops->postdisable)
1212 		indio_dev->setup_ops->postdisable(indio_dev);
1213 err_undo_config:
1214 	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1215 	indio_dev->active_scan_mask = NULL;
1216 
1217 	return ret;
1218 }
1219 
1220 static int iio_disable_buffers(struct iio_dev *indio_dev)
1221 {
1222 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1223 	struct iio_buffer *buffer;
1224 	int ret = 0;
1225 	int ret2;
1226 
1227 	/* Wind down existing buffers - iff there are any */
1228 	if (list_empty(&iio_dev_opaque->buffer_list))
1229 		return 0;
1230 
1231 	/*
1232 	 * If things go wrong at some step in disable we still need to continue
1233 	 * to perform the other steps, otherwise we leave the device in a
1234 	 * inconsistent state. We return the error code for the first error we
1235 	 * encountered.
1236 	 */
1237 
1238 	if (indio_dev->setup_ops->predisable) {
1239 		ret2 = indio_dev->setup_ops->predisable(indio_dev);
1240 		if (ret2 && !ret)
1241 			ret = ret2;
1242 	}
1243 
1244 	if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) {
1245 		iio_trigger_detach_poll_func(indio_dev->trig,
1246 					     indio_dev->pollfunc);
1247 	}
1248 
1249 	list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) {
1250 		ret2 = iio_buffer_disable(buffer, indio_dev);
1251 		if (ret2 && !ret)
1252 			ret = ret2;
1253 	}
1254 
1255 	if (indio_dev->setup_ops->postdisable) {
1256 		ret2 = indio_dev->setup_ops->postdisable(indio_dev);
1257 		if (ret2 && !ret)
1258 			ret = ret2;
1259 	}
1260 
1261 	iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask);
1262 	indio_dev->active_scan_mask = NULL;
1263 	iio_dev_opaque->currentmode = INDIO_DIRECT_MODE;
1264 
1265 	return ret;
1266 }
1267 
1268 static int __iio_update_buffers(struct iio_dev *indio_dev,
1269 				struct iio_buffer *insert_buffer,
1270 				struct iio_buffer *remove_buffer)
1271 {
1272 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1273 	struct iio_device_config new_config;
1274 	int ret;
1275 
1276 	ret = iio_verify_update(indio_dev, insert_buffer, remove_buffer,
1277 				&new_config);
1278 	if (ret)
1279 		return ret;
1280 
1281 	if (insert_buffer) {
1282 		ret = iio_buffer_request_update(indio_dev, insert_buffer);
1283 		if (ret)
1284 			goto err_free_config;
1285 	}
1286 
1287 	ret = iio_disable_buffers(indio_dev);
1288 	if (ret)
1289 		goto err_deactivate_all;
1290 
1291 	if (remove_buffer)
1292 		iio_buffer_deactivate(remove_buffer);
1293 	if (insert_buffer)
1294 		iio_buffer_activate(indio_dev, insert_buffer);
1295 
1296 	/* If no buffers in list, we are done */
1297 	if (list_empty(&iio_dev_opaque->buffer_list))
1298 		return 0;
1299 
1300 	ret = iio_enable_buffers(indio_dev, &new_config);
1301 	if (ret)
1302 		goto err_deactivate_all;
1303 
1304 	return 0;
1305 
1306 err_deactivate_all:
1307 	/*
1308 	 * We've already verified that the config is valid earlier. If things go
1309 	 * wrong in either enable or disable the most likely reason is an IO
1310 	 * error from the device. In this case there is no good recovery
1311 	 * strategy. Just make sure to disable everything and leave the device
1312 	 * in a sane state.  With a bit of luck the device might come back to
1313 	 * life again later and userspace can try again.
1314 	 */
1315 	iio_buffer_deactivate_all(indio_dev);
1316 
1317 err_free_config:
1318 	iio_free_scan_mask(indio_dev, new_config.scan_mask);
1319 	return ret;
1320 }
1321 
1322 int iio_update_buffers(struct iio_dev *indio_dev,
1323 		       struct iio_buffer *insert_buffer,
1324 		       struct iio_buffer *remove_buffer)
1325 {
1326 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1327 
1328 	if (insert_buffer == remove_buffer)
1329 		return 0;
1330 
1331 	if (insert_buffer &&
1332 	    insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT)
1333 		return -EINVAL;
1334 
1335 	guard(mutex)(&iio_dev_opaque->info_exist_lock);
1336 	guard(mutex)(&iio_dev_opaque->mlock);
1337 
1338 	if (insert_buffer && iio_buffer_is_active(insert_buffer))
1339 		insert_buffer = NULL;
1340 
1341 	if (remove_buffer && !iio_buffer_is_active(remove_buffer))
1342 		remove_buffer = NULL;
1343 
1344 	if (!insert_buffer && !remove_buffer)
1345 		return 0;
1346 
1347 	if (!indio_dev->info)
1348 		return -ENODEV;
1349 
1350 	return __iio_update_buffers(indio_dev, insert_buffer, remove_buffer);
1351 }
1352 EXPORT_SYMBOL_GPL(iio_update_buffers);
1353 
1354 void iio_disable_all_buffers(struct iio_dev *indio_dev)
1355 {
1356 	iio_disable_buffers(indio_dev);
1357 	iio_buffer_deactivate_all(indio_dev);
1358 }
1359 
1360 static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
1361 			    const char *buf, size_t len)
1362 {
1363 	int ret;
1364 	bool requested_state;
1365 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1366 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1367 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1368 	bool inlist;
1369 
1370 	ret = kstrtobool(buf, &requested_state);
1371 	if (ret < 0)
1372 		return ret;
1373 
1374 	guard(mutex)(&iio_dev_opaque->mlock);
1375 
1376 	/* Find out if it is in the list */
1377 	inlist = iio_buffer_is_active(buffer);
1378 	/* Already in desired state */
1379 	if (inlist == requested_state)
1380 		return len;
1381 
1382 	if (requested_state)
1383 		ret = __iio_update_buffers(indio_dev, buffer, NULL);
1384 	else
1385 		ret = __iio_update_buffers(indio_dev, NULL, buffer);
1386 	if (ret)
1387 		return ret;
1388 
1389 	return len;
1390 }
1391 
1392 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr,
1393 			      char *buf)
1394 {
1395 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1396 
1397 	return sysfs_emit(buf, "%u\n", buffer->watermark);
1398 }
1399 
1400 static ssize_t watermark_store(struct device *dev,
1401 			       struct device_attribute *attr,
1402 			       const char *buf, size_t len)
1403 {
1404 	struct iio_dev *indio_dev = dev_to_iio_dev(dev);
1405 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1406 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1407 	unsigned int val;
1408 	int ret;
1409 
1410 	ret = kstrtouint(buf, 10, &val);
1411 	if (ret)
1412 		return ret;
1413 	if (!val)
1414 		return -EINVAL;
1415 
1416 	guard(mutex)(&iio_dev_opaque->mlock);
1417 
1418 	if (val > buffer->length)
1419 		return -EINVAL;
1420 
1421 	if (iio_buffer_is_active(buffer))
1422 		return -EBUSY;
1423 
1424 	buffer->watermark = val;
1425 
1426 	return len;
1427 }
1428 
1429 static ssize_t data_available_show(struct device *dev,
1430 				   struct device_attribute *attr, char *buf)
1431 {
1432 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1433 
1434 	return sysfs_emit(buf, "%zu\n", iio_buffer_data_available(buffer));
1435 }
1436 
1437 static ssize_t direction_show(struct device *dev,
1438 			      struct device_attribute *attr,
1439 			      char *buf)
1440 {
1441 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
1442 
1443 	switch (buffer->direction) {
1444 	case IIO_BUFFER_DIRECTION_IN:
1445 		return sysfs_emit(buf, "in\n");
1446 	case IIO_BUFFER_DIRECTION_OUT:
1447 		return sysfs_emit(buf, "out\n");
1448 	default:
1449 		return -EINVAL;
1450 	}
1451 }
1452 
1453 static DEVICE_ATTR_RW(length);
1454 static struct device_attribute dev_attr_length_ro = __ATTR_RO(length);
1455 static DEVICE_ATTR_RW(enable);
1456 static DEVICE_ATTR_RW(watermark);
1457 static struct device_attribute dev_attr_watermark_ro = __ATTR_RO(watermark);
1458 static DEVICE_ATTR_RO(data_available);
1459 static DEVICE_ATTR_RO(direction);
1460 
1461 /*
1462  * When adding new attributes here, put the at the end, at least until
1463  * the code that handles the length/length_ro & watermark/watermark_ro
1464  * assignments gets cleaned up. Otherwise these can create some weird
1465  * duplicate attributes errors under some setups.
1466  */
1467 static struct attribute *iio_buffer_attrs[] = {
1468 	&dev_attr_length.attr,
1469 	&dev_attr_enable.attr,
1470 	&dev_attr_watermark.attr,
1471 	&dev_attr_data_available.attr,
1472 	&dev_attr_direction.attr,
1473 };
1474 
1475 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1476 
1477 static struct attribute *iio_buffer_wrap_attr(struct iio_buffer *buffer,
1478 					      struct attribute *attr)
1479 {
1480 	struct device_attribute *dattr = to_dev_attr(attr);
1481 	struct iio_dev_attr *iio_attr;
1482 
1483 	iio_attr = kzalloc_obj(*iio_attr);
1484 	if (!iio_attr)
1485 		return NULL;
1486 
1487 	iio_attr->buffer = buffer;
1488 	memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr));
1489 	iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL);
1490 	if (!iio_attr->dev_attr.attr.name) {
1491 		kfree(iio_attr);
1492 		return NULL;
1493 	}
1494 
1495 	sysfs_attr_init(&iio_attr->dev_attr.attr);
1496 
1497 	list_add(&iio_attr->l, &buffer->buffer_attr_list);
1498 
1499 	return &iio_attr->dev_attr.attr;
1500 }
1501 
1502 static int iio_buffer_register_legacy_sysfs_groups(struct iio_dev *indio_dev,
1503 						   struct attribute **buffer_attrs,
1504 						   int buffer_attrcount,
1505 						   int scan_el_attrcount)
1506 {
1507 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1508 	struct attribute_group *group;
1509 	struct attribute **attrs;
1510 	int ret;
1511 
1512 	attrs = kzalloc_objs(*attrs, buffer_attrcount + 1);
1513 	if (!attrs)
1514 		return -ENOMEM;
1515 
1516 	memcpy(attrs, buffer_attrs, buffer_attrcount * sizeof(*attrs));
1517 
1518 	group = &iio_dev_opaque->legacy_buffer_group;
1519 	group->attrs = attrs;
1520 	group->name = "buffer";
1521 
1522 	ret = iio_device_register_sysfs_group(indio_dev, group);
1523 	if (ret)
1524 		goto error_free_buffer_attrs;
1525 
1526 	attrs = kzalloc_objs(*attrs, scan_el_attrcount + 1);
1527 	if (!attrs) {
1528 		ret = -ENOMEM;
1529 		goto error_free_buffer_attrs;
1530 	}
1531 
1532 	memcpy(attrs, &buffer_attrs[buffer_attrcount],
1533 	       scan_el_attrcount * sizeof(*attrs));
1534 
1535 	group = &iio_dev_opaque->legacy_scan_el_group;
1536 	group->attrs = attrs;
1537 	group->name = "scan_elements";
1538 
1539 	ret = iio_device_register_sysfs_group(indio_dev, group);
1540 	if (ret)
1541 		goto error_free_scan_el_attrs;
1542 
1543 	return 0;
1544 
1545 error_free_scan_el_attrs:
1546 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1547 error_free_buffer_attrs:
1548 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1549 
1550 	return ret;
1551 }
1552 
1553 static void iio_buffer_unregister_legacy_sysfs_groups(struct iio_dev *indio_dev)
1554 {
1555 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
1556 
1557 	kfree(iio_dev_opaque->legacy_buffer_group.attrs);
1558 	kfree(iio_dev_opaque->legacy_scan_el_group.attrs);
1559 }
1560 
1561 static void iio_buffer_dmabuf_release(struct kref *ref)
1562 {
1563 	struct iio_dmabuf_priv *priv = container_of(ref, struct iio_dmabuf_priv, ref);
1564 	struct dma_buf_attachment *attach = priv->attach;
1565 	struct iio_buffer *buffer = priv->buffer;
1566 	struct dma_buf *dmabuf = attach->dmabuf;
1567 
1568 	dma_buf_unmap_attachment_unlocked(attach, priv->sgt, priv->dir);
1569 
1570 	buffer->access->detach_dmabuf(buffer, priv->block);
1571 
1572 	dma_buf_detach(attach->dmabuf, attach);
1573 	dma_buf_put(dmabuf);
1574 	kfree(priv);
1575 }
1576 
1577 static void iio_buffer_dmabuf_get(struct dma_buf_attachment *attach)
1578 {
1579 	struct iio_dmabuf_priv *priv = attach->importer_priv;
1580 
1581 	kref_get(&priv->ref);
1582 }
1583 
1584 static void iio_buffer_dmabuf_put(struct dma_buf_attachment *attach)
1585 {
1586 	struct iio_dmabuf_priv *priv = attach->importer_priv;
1587 
1588 	kref_put(&priv->ref, iio_buffer_dmabuf_release);
1589 }
1590 
1591 static int iio_buffer_chrdev_release(struct inode *inode, struct file *filep)
1592 {
1593 	struct iio_dev_buffer_pair *ib = filep->private_data;
1594 	struct iio_dev *indio_dev = ib->indio_dev;
1595 	struct iio_buffer *buffer = ib->buffer;
1596 	struct iio_dmabuf_priv *priv, *tmp;
1597 
1598 	wake_up(&buffer->pollq);
1599 
1600 	guard(mutex)(&buffer->dmabufs_mutex);
1601 
1602 	/* Close all attached DMABUFs */
1603 	list_for_each_entry_safe(priv, tmp, &buffer->dmabufs, entry) {
1604 		list_del_init(&priv->entry);
1605 		iio_buffer_dmabuf_put(priv->attach);
1606 	}
1607 
1608 	kfree(ib);
1609 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
1610 	iio_device_put(indio_dev);
1611 
1612 	return 0;
1613 }
1614 
1615 static int iio_dma_resv_lock(struct dma_buf *dmabuf, bool nonblock)
1616 {
1617 	if (!nonblock)
1618 		return dma_resv_lock_interruptible(dmabuf->resv, NULL);
1619 
1620 	if (!dma_resv_trylock(dmabuf->resv))
1621 		return -EBUSY;
1622 
1623 	return 0;
1624 }
1625 
1626 static struct device *iio_buffer_get_dma_dev(const struct iio_dev *indio_dev,
1627 					     struct iio_buffer *buffer)
1628 {
1629 	if (buffer->access->get_dma_dev)
1630 		return buffer->access->get_dma_dev(buffer);
1631 
1632 	return indio_dev->dev.parent;
1633 }
1634 
1635 static struct dma_buf_attachment *
1636 iio_buffer_find_attachment(struct iio_dev_buffer_pair *ib,
1637 			   struct dma_buf *dmabuf, bool nonblock)
1638 {
1639 	struct iio_buffer *buffer = ib->buffer;
1640 	struct device *dma_dev = iio_buffer_get_dma_dev(ib->indio_dev, buffer);
1641 	struct dma_buf_attachment *attach = NULL;
1642 	struct iio_dmabuf_priv *priv;
1643 
1644 	guard(mutex)(&buffer->dmabufs_mutex);
1645 
1646 	list_for_each_entry(priv, &buffer->dmabufs, entry) {
1647 		if (priv->attach->dev == dma_dev
1648 		    && priv->attach->dmabuf == dmabuf) {
1649 			attach = priv->attach;
1650 			break;
1651 		}
1652 	}
1653 
1654 	if (attach)
1655 		iio_buffer_dmabuf_get(attach);
1656 
1657 	return attach ?: ERR_PTR(-EPERM);
1658 }
1659 
1660 static int iio_buffer_attach_dmabuf(struct iio_dev_buffer_pair *ib,
1661 				    int __user *user_fd, bool nonblock)
1662 {
1663 	struct iio_dev *indio_dev = ib->indio_dev;
1664 	struct iio_buffer *buffer = ib->buffer;
1665 	struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
1666 	struct dma_buf_attachment *attach;
1667 	struct iio_dmabuf_priv *priv, *each;
1668 	struct dma_buf *dmabuf;
1669 	int err, fd;
1670 
1671 	if (!buffer->access->attach_dmabuf
1672 	    || !buffer->access->detach_dmabuf
1673 	    || !buffer->access->enqueue_dmabuf)
1674 		return -EPERM;
1675 
1676 	if (copy_from_user(&fd, user_fd, sizeof(fd)))
1677 		return -EFAULT;
1678 
1679 	priv = kzalloc_obj(*priv);
1680 	if (!priv)
1681 		return -ENOMEM;
1682 
1683 	spin_lock_init(&priv->lock);
1684 	priv->context = dma_fence_context_alloc(1);
1685 
1686 	dmabuf = dma_buf_get(fd);
1687 	if (IS_ERR(dmabuf)) {
1688 		err = PTR_ERR(dmabuf);
1689 		goto err_free_priv;
1690 	}
1691 
1692 	attach = dma_buf_attach(dmabuf, dma_dev);
1693 	if (IS_ERR(attach)) {
1694 		err = PTR_ERR(attach);
1695 		goto err_dmabuf_put;
1696 	}
1697 
1698 	err = iio_dma_resv_lock(dmabuf, nonblock);
1699 	if (err)
1700 		goto err_dmabuf_detach;
1701 
1702 	priv->dir = buffer->direction == IIO_BUFFER_DIRECTION_IN
1703 		? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1704 
1705 	priv->sgt = dma_buf_map_attachment(attach, priv->dir);
1706 	if (IS_ERR(priv->sgt)) {
1707 		err = PTR_ERR(priv->sgt);
1708 		dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", err);
1709 		goto err_resv_unlock;
1710 	}
1711 
1712 	kref_init(&priv->ref);
1713 	priv->buffer = buffer;
1714 	priv->attach = attach;
1715 	attach->importer_priv = priv;
1716 
1717 	priv->block = buffer->access->attach_dmabuf(buffer, attach);
1718 	if (IS_ERR(priv->block)) {
1719 		err = PTR_ERR(priv->block);
1720 		goto err_dmabuf_unmap_attachment;
1721 	}
1722 
1723 	dma_resv_unlock(dmabuf->resv);
1724 
1725 	mutex_lock(&buffer->dmabufs_mutex);
1726 
1727 	/*
1728 	 * Check whether we already have an attachment for this driver/DMABUF
1729 	 * combo. If we do, refuse to attach.
1730 	 */
1731 	list_for_each_entry(each, &buffer->dmabufs, entry) {
1732 		if (each->attach->dev == dma_dev
1733 		    && each->attach->dmabuf == dmabuf) {
1734 			/*
1735 			 * We unlocked the reservation object, so going through
1736 			 * the cleanup code would mean re-locking it first.
1737 			 * At this stage it is simpler to free the attachment
1738 			 * using iio_buffer_dma_put().
1739 			 */
1740 			mutex_unlock(&buffer->dmabufs_mutex);
1741 			iio_buffer_dmabuf_put(attach);
1742 			return -EBUSY;
1743 		}
1744 	}
1745 
1746 	/* Otherwise, add the new attachment to our dmabufs list. */
1747 	list_add(&priv->entry, &buffer->dmabufs);
1748 	mutex_unlock(&buffer->dmabufs_mutex);
1749 
1750 	return 0;
1751 
1752 err_dmabuf_unmap_attachment:
1753 	dma_buf_unmap_attachment(attach, priv->sgt, priv->dir);
1754 err_resv_unlock:
1755 	dma_resv_unlock(dmabuf->resv);
1756 err_dmabuf_detach:
1757 	dma_buf_detach(dmabuf, attach);
1758 err_dmabuf_put:
1759 	dma_buf_put(dmabuf);
1760 err_free_priv:
1761 	kfree(priv);
1762 
1763 	return err;
1764 }
1765 
1766 static int iio_buffer_detach_dmabuf(struct iio_dev_buffer_pair *ib,
1767 				    int __user *user_req, bool nonblock)
1768 {
1769 	struct iio_buffer *buffer = ib->buffer;
1770 	struct iio_dev *indio_dev = ib->indio_dev;
1771 	struct device *dma_dev = iio_buffer_get_dma_dev(indio_dev, buffer);
1772 	struct iio_dmabuf_priv *priv;
1773 	struct dma_buf *dmabuf;
1774 	int dmabuf_fd, ret = -EPERM;
1775 
1776 	if (copy_from_user(&dmabuf_fd, user_req, sizeof(dmabuf_fd)))
1777 		return -EFAULT;
1778 
1779 	dmabuf = dma_buf_get(dmabuf_fd);
1780 	if (IS_ERR(dmabuf))
1781 		return PTR_ERR(dmabuf);
1782 
1783 	guard(mutex)(&buffer->dmabufs_mutex);
1784 
1785 	list_for_each_entry(priv, &buffer->dmabufs, entry) {
1786 		if (priv->attach->dev == dma_dev
1787 		    && priv->attach->dmabuf == dmabuf) {
1788 			list_del(&priv->entry);
1789 
1790 			/* Unref the reference from iio_buffer_attach_dmabuf() */
1791 			iio_buffer_dmabuf_put(priv->attach);
1792 			ret = 0;
1793 			break;
1794 		}
1795 	}
1796 
1797 	dma_buf_put(dmabuf);
1798 
1799 	return ret;
1800 }
1801 
1802 static const char *
1803 iio_buffer_dma_fence_get_driver_name(struct dma_fence *fence)
1804 {
1805 	return "iio";
1806 }
1807 
1808 static void iio_buffer_dma_fence_release(struct dma_fence *fence)
1809 {
1810 	struct iio_dma_fence *iio_fence =
1811 		container_of(fence, struct iio_dma_fence, base);
1812 
1813 	kfree(iio_fence);
1814 }
1815 
1816 static const struct dma_fence_ops iio_buffer_dma_fence_ops = {
1817 	.get_driver_name	= iio_buffer_dma_fence_get_driver_name,
1818 	.get_timeline_name	= iio_buffer_dma_fence_get_driver_name,
1819 	.release		= iio_buffer_dma_fence_release,
1820 };
1821 
1822 static int iio_buffer_enqueue_dmabuf(struct iio_dev_buffer_pair *ib,
1823 				     struct iio_dmabuf __user *iio_dmabuf_req,
1824 				     bool nonblock)
1825 {
1826 	struct iio_buffer *buffer = ib->buffer;
1827 	struct iio_dmabuf iio_dmabuf;
1828 	struct dma_buf_attachment *attach;
1829 	struct iio_dmabuf_priv *priv;
1830 	struct iio_dma_fence *fence;
1831 	struct dma_buf *dmabuf;
1832 	unsigned long timeout;
1833 	bool cookie, cyclic, dma_to_ram;
1834 	long retl;
1835 	u32 seqno;
1836 	int ret;
1837 
1838 	if (copy_from_user(&iio_dmabuf, iio_dmabuf_req, sizeof(iio_dmabuf)))
1839 		return -EFAULT;
1840 
1841 	if (iio_dmabuf.flags & ~IIO_BUFFER_DMABUF_SUPPORTED_FLAGS)
1842 		return -EINVAL;
1843 
1844 	cyclic = iio_dmabuf.flags & IIO_BUFFER_DMABUF_CYCLIC;
1845 
1846 	/* Cyclic flag is only supported on output buffers */
1847 	if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT)
1848 		return -EINVAL;
1849 
1850 	dmabuf = dma_buf_get(iio_dmabuf.fd);
1851 	if (IS_ERR(dmabuf))
1852 		return PTR_ERR(dmabuf);
1853 
1854 	if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) {
1855 		ret = -EINVAL;
1856 		goto err_dmabuf_put;
1857 	}
1858 
1859 	attach = iio_buffer_find_attachment(ib, dmabuf, nonblock);
1860 	if (IS_ERR(attach)) {
1861 		ret = PTR_ERR(attach);
1862 		goto err_dmabuf_put;
1863 	}
1864 
1865 	priv = attach->importer_priv;
1866 
1867 	fence = kmalloc_obj(*fence);
1868 	if (!fence) {
1869 		ret = -ENOMEM;
1870 		goto err_attachment_put;
1871 	}
1872 
1873 	fence->priv = priv;
1874 
1875 	seqno = atomic_add_return(1, &priv->seqno);
1876 
1877 	/*
1878 	 * The transfers are guaranteed to be processed in the order they are
1879 	 * enqueued, so we can use a simple incrementing sequence number for
1880 	 * the dma_fence.
1881 	 */
1882 	dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops,
1883 		       &priv->lock, priv->context, seqno);
1884 
1885 	ret = iio_dma_resv_lock(dmabuf, nonblock);
1886 	if (ret)
1887 		goto err_fence_put;
1888 
1889 	timeout = nonblock ? 0 : msecs_to_jiffies(DMABUF_ENQUEUE_TIMEOUT_MS);
1890 	dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN;
1891 
1892 	/* Make sure we don't have writers */
1893 	retl = dma_resv_wait_timeout(dmabuf->resv,
1894 				     dma_resv_usage_rw(dma_to_ram),
1895 				     true, timeout);
1896 	if (retl == 0)
1897 		retl = -EBUSY;
1898 	if (retl < 0) {
1899 		ret = (int)retl;
1900 		goto err_resv_unlock;
1901 	}
1902 
1903 	if (buffer->access->lock_queue)
1904 		buffer->access->lock_queue(buffer);
1905 
1906 	ret = dma_resv_reserve_fences(dmabuf->resv, 1);
1907 	if (ret)
1908 		goto err_queue_unlock;
1909 
1910 	dma_resv_add_fence(dmabuf->resv, &fence->base,
1911 			   dma_to_ram ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_READ);
1912 	dma_resv_unlock(dmabuf->resv);
1913 
1914 	cookie = dma_fence_begin_signalling();
1915 
1916 	ret = buffer->access->enqueue_dmabuf(buffer, priv->block, &fence->base,
1917 					     priv->sgt, iio_dmabuf.bytes_used,
1918 					     cyclic);
1919 	if (ret) {
1920 		/*
1921 		 * DMABUF enqueue failed, but we already added the fence.
1922 		 * Signal the error through the fence completion mechanism.
1923 		 */
1924 		iio_buffer_signal_dmabuf_done(&fence->base, ret);
1925 	}
1926 
1927 	if (buffer->access->unlock_queue)
1928 		buffer->access->unlock_queue(buffer);
1929 
1930 	dma_fence_end_signalling(cookie);
1931 	dma_buf_put(dmabuf);
1932 
1933 	return ret;
1934 
1935 err_queue_unlock:
1936 	if (buffer->access->unlock_queue)
1937 		buffer->access->unlock_queue(buffer);
1938 err_resv_unlock:
1939 	dma_resv_unlock(dmabuf->resv);
1940 err_fence_put:
1941 	dma_fence_put(&fence->base);
1942 err_attachment_put:
1943 	iio_buffer_dmabuf_put(attach);
1944 err_dmabuf_put:
1945 	dma_buf_put(dmabuf);
1946 
1947 	return ret;
1948 }
1949 
1950 static void iio_buffer_cleanup(struct work_struct *work)
1951 {
1952 	struct iio_dma_fence *fence =
1953 		container_of(work, struct iio_dma_fence, work);
1954 	struct iio_dmabuf_priv *priv = fence->priv;
1955 	struct dma_buf_attachment *attach = priv->attach;
1956 
1957 	dma_fence_put(&fence->base);
1958 	iio_buffer_dmabuf_put(attach);
1959 }
1960 
1961 void iio_buffer_signal_dmabuf_done(struct dma_fence *fence, int ret)
1962 {
1963 	struct iio_dma_fence *iio_fence =
1964 		container_of(fence, struct iio_dma_fence, base);
1965 	bool cookie = dma_fence_begin_signalling();
1966 
1967 	/*
1968 	 * Get a reference to the fence, so that it's not freed as soon as
1969 	 * it's signaled.
1970 	 */
1971 	dma_fence_get(fence);
1972 
1973 	fence->error = ret;
1974 	dma_fence_signal(fence);
1975 	dma_fence_end_signalling(cookie);
1976 
1977 	/*
1978 	 * The fence will be unref'd in iio_buffer_cleanup.
1979 	 * It can't be done here, as the unref functions might try to lock the
1980 	 * resv object, which can deadlock.
1981 	 */
1982 	INIT_WORK(&iio_fence->work, iio_buffer_cleanup);
1983 	schedule_work(&iio_fence->work);
1984 }
1985 EXPORT_SYMBOL_GPL(iio_buffer_signal_dmabuf_done);
1986 
1987 static long iio_buffer_chrdev_ioctl(struct file *filp,
1988 				    unsigned int cmd, unsigned long arg)
1989 {
1990 	struct iio_dev_buffer_pair *ib = filp->private_data;
1991 	void __user *_arg = (void __user *)arg;
1992 	bool nonblock = filp->f_flags & O_NONBLOCK;
1993 
1994 	switch (cmd) {
1995 	case IIO_BUFFER_DMABUF_ATTACH_IOCTL:
1996 		return iio_buffer_attach_dmabuf(ib, _arg, nonblock);
1997 	case IIO_BUFFER_DMABUF_DETACH_IOCTL:
1998 		return iio_buffer_detach_dmabuf(ib, _arg, nonblock);
1999 	case IIO_BUFFER_DMABUF_ENQUEUE_IOCTL:
2000 		return iio_buffer_enqueue_dmabuf(ib, _arg, nonblock);
2001 	default:
2002 		return -EINVAL;
2003 	}
2004 }
2005 
2006 static const struct file_operations iio_buffer_chrdev_fileops = {
2007 	.owner = THIS_MODULE,
2008 	.llseek = noop_llseek,
2009 	.read = iio_buffer_read,
2010 	.write = iio_buffer_write,
2011 	.unlocked_ioctl = iio_buffer_chrdev_ioctl,
2012 	.compat_ioctl = compat_ptr_ioctl,
2013 	.poll = iio_buffer_poll,
2014 	.release = iio_buffer_chrdev_release,
2015 };
2016 
2017 static long iio_device_buffer_getfd(struct iio_dev *indio_dev, unsigned long arg)
2018 {
2019 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2020 	int __user *ival = (int __user *)arg;
2021 	struct iio_dev_buffer_pair *ib;
2022 	struct iio_buffer *buffer;
2023 	int fd, idx, ret;
2024 
2025 	if (copy_from_user(&idx, ival, sizeof(idx)))
2026 		return -EFAULT;
2027 
2028 	if (idx >= iio_dev_opaque->attached_buffers_cnt)
2029 		return -ENODEV;
2030 
2031 	iio_device_get(indio_dev);
2032 
2033 	buffer = iio_dev_opaque->attached_buffers[idx];
2034 
2035 	if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) {
2036 		ret = -EBUSY;
2037 		goto error_iio_dev_put;
2038 	}
2039 
2040 	ib = kzalloc_obj(*ib);
2041 	if (!ib) {
2042 		ret = -ENOMEM;
2043 		goto error_clear_busy_bit;
2044 	}
2045 
2046 	ib->indio_dev = indio_dev;
2047 	ib->buffer = buffer;
2048 
2049 	fd = anon_inode_getfd("iio:buffer", &iio_buffer_chrdev_fileops,
2050 			      ib, O_RDWR | O_CLOEXEC);
2051 	if (fd < 0) {
2052 		ret = fd;
2053 		goto error_free_ib;
2054 	}
2055 
2056 	if (copy_to_user(ival, &fd, sizeof(fd))) {
2057 		/*
2058 		 * "Leak" the fd, as there's not much we can do about this
2059 		 * anyway. 'fd' might have been closed already, as
2060 		 * anon_inode_getfd() called fd_install() on it, which made
2061 		 * it reachable by userland.
2062 		 *
2063 		 * Instead of allowing a malicious user to play tricks with
2064 		 * us, rely on the process exit path to do any necessary
2065 		 * cleanup, as in releasing the file, if still needed.
2066 		 */
2067 		return -EFAULT;
2068 	}
2069 
2070 	return 0;
2071 
2072 error_free_ib:
2073 	kfree(ib);
2074 error_clear_busy_bit:
2075 	clear_bit(IIO_BUSY_BIT_POS, &buffer->flags);
2076 error_iio_dev_put:
2077 	iio_device_put(indio_dev);
2078 	return ret;
2079 }
2080 
2081 static long iio_device_buffer_ioctl(struct iio_dev *indio_dev, struct file *filp,
2082 				    unsigned int cmd, unsigned long arg)
2083 {
2084 	switch (cmd) {
2085 	case IIO_BUFFER_GET_FD_IOCTL:
2086 		return iio_device_buffer_getfd(indio_dev, arg);
2087 	default:
2088 		return IIO_IOCTL_UNHANDLED;
2089 	}
2090 }
2091 
2092 static int iio_channel_validate_scan_type(struct device *dev, int ch,
2093 					  const struct iio_scan_type *scan_type)
2094 {
2095 	/* Verify that sample bits fit into storage */
2096 	if (scan_type->storagebits < scan_type->realbits + scan_type->shift) {
2097 		dev_err(dev,
2098 			"Channel %d storagebits (%d) < shifted realbits (%d + %d)\n",
2099 			ch, scan_type->storagebits,
2100 			scan_type->realbits,
2101 			scan_type->shift);
2102 		return -EINVAL;
2103 	}
2104 
2105 	return 0;
2106 }
2107 
2108 static int __iio_buffer_alloc_sysfs_and_mask(struct iio_buffer *buffer,
2109 					     struct iio_dev *indio_dev,
2110 					     int index)
2111 {
2112 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2113 	unsigned int masklength = iio_get_masklength(indio_dev);
2114 	struct iio_dev_attr *p;
2115 	const struct iio_dev_attr *id_attr;
2116 	struct attribute **attr;
2117 	int ret, i, attrn, scan_el_attrcount, buffer_attrcount;
2118 	const struct iio_chan_spec *channels;
2119 
2120 	buffer_attrcount = 0;
2121 	if (buffer->attrs) {
2122 		while (buffer->attrs[buffer_attrcount])
2123 			buffer_attrcount++;
2124 	}
2125 	buffer_attrcount += ARRAY_SIZE(iio_buffer_attrs);
2126 
2127 	scan_el_attrcount = 0;
2128 	INIT_LIST_HEAD(&buffer->buffer_attr_list);
2129 	channels = indio_dev->channels;
2130 	if (channels) {
2131 		/* new magic */
2132 		for (i = 0; i < indio_dev->num_channels; i++) {
2133 			const struct iio_scan_type *scan_type;
2134 
2135 			if (channels[i].scan_index < 0)
2136 				continue;
2137 
2138 			if (channels[i].has_ext_scan_type) {
2139 				int j;
2140 
2141 				/*
2142 				 * get_current_scan_type is required when using
2143 				 * extended scan types.
2144 				 */
2145 				if (!indio_dev->info->get_current_scan_type) {
2146 					ret = -EINVAL;
2147 					goto error_cleanup_dynamic;
2148 				}
2149 
2150 				for (j = 0; j < channels[i].num_ext_scan_type; j++) {
2151 					scan_type = &channels[i].ext_scan_type[j];
2152 
2153 					ret = iio_channel_validate_scan_type(
2154 						&indio_dev->dev, i, scan_type);
2155 					if (ret)
2156 						goto error_cleanup_dynamic;
2157 				}
2158 			} else {
2159 				scan_type = &channels[i].scan_type;
2160 
2161 				ret = iio_channel_validate_scan_type(
2162 						&indio_dev->dev, i, scan_type);
2163 				if (ret)
2164 					goto error_cleanup_dynamic;
2165 			}
2166 
2167 			ret = iio_buffer_add_channel_sysfs(indio_dev, buffer,
2168 							   &channels[i]);
2169 			if (ret < 0)
2170 				goto error_cleanup_dynamic;
2171 			scan_el_attrcount += ret;
2172 			if (channels[i].type == IIO_TIMESTAMP)
2173 				iio_dev_opaque->scan_index_timestamp =
2174 					channels[i].scan_index;
2175 		}
2176 		if (masklength && !buffer->scan_mask) {
2177 			buffer->scan_mask = bitmap_zalloc(masklength,
2178 							  GFP_KERNEL);
2179 			if (!buffer->scan_mask) {
2180 				ret = -ENOMEM;
2181 				goto error_cleanup_dynamic;
2182 			}
2183 		}
2184 	}
2185 
2186 	attrn = buffer_attrcount + scan_el_attrcount;
2187 	attr = kzalloc_objs(*attr, attrn + 1);
2188 	if (!attr) {
2189 		ret = -ENOMEM;
2190 		goto error_free_scan_mask;
2191 	}
2192 
2193 	memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs));
2194 	if (!buffer->access->set_length)
2195 		attr[0] = &dev_attr_length_ro.attr;
2196 
2197 	if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK)
2198 		attr[2] = &dev_attr_watermark_ro.attr;
2199 
2200 	if (buffer->attrs)
2201 		for (i = 0, id_attr = buffer->attrs[i];
2202 		     (id_attr = buffer->attrs[i]); i++)
2203 			attr[ARRAY_SIZE(iio_buffer_attrs) + i] =
2204 				(struct attribute *)&id_attr->dev_attr.attr;
2205 
2206 	buffer->buffer_group.attrs = attr;
2207 
2208 	for (i = 0; i < buffer_attrcount; i++) {
2209 		struct attribute *wrapped;
2210 
2211 		wrapped = iio_buffer_wrap_attr(buffer, attr[i]);
2212 		if (!wrapped) {
2213 			ret = -ENOMEM;
2214 			goto error_free_buffer_attrs;
2215 		}
2216 		attr[i] = wrapped;
2217 	}
2218 
2219 	attrn = 0;
2220 	list_for_each_entry(p, &buffer->buffer_attr_list, l)
2221 		attr[attrn++] = &p->dev_attr.attr;
2222 
2223 	buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index);
2224 	if (!buffer->buffer_group.name) {
2225 		ret = -ENOMEM;
2226 		goto error_free_buffer_attrs;
2227 	}
2228 
2229 	ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group);
2230 	if (ret)
2231 		goto error_free_buffer_attr_group_name;
2232 
2233 	/* we only need to register the legacy groups for the first buffer */
2234 	if (index > 0)
2235 		return 0;
2236 
2237 	ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr,
2238 						      buffer_attrcount,
2239 						      scan_el_attrcount);
2240 	if (ret)
2241 		goto error_free_buffer_attr_group_name;
2242 
2243 	return 0;
2244 
2245 error_free_buffer_attr_group_name:
2246 	kfree(buffer->buffer_group.name);
2247 error_free_buffer_attrs:
2248 	kfree(buffer->buffer_group.attrs);
2249 error_free_scan_mask:
2250 	bitmap_free(buffer->scan_mask);
2251 error_cleanup_dynamic:
2252 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
2253 
2254 	return ret;
2255 }
2256 
2257 static void __iio_buffer_free_sysfs_and_mask(struct iio_buffer *buffer,
2258 					     struct iio_dev *indio_dev,
2259 					     int index)
2260 {
2261 	if (index == 0)
2262 		iio_buffer_unregister_legacy_sysfs_groups(indio_dev);
2263 	bitmap_free(buffer->scan_mask);
2264 	kfree(buffer->buffer_group.name);
2265 	kfree(buffer->buffer_group.attrs);
2266 	iio_free_chan_devattr_list(&buffer->buffer_attr_list);
2267 }
2268 
2269 int iio_buffers_alloc_sysfs_and_mask(struct iio_dev *indio_dev)
2270 {
2271 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2272 	const struct iio_chan_spec *channels;
2273 	struct iio_buffer *buffer;
2274 	int ret, i, idx;
2275 	size_t sz;
2276 
2277 	channels = indio_dev->channels;
2278 	if (channels) {
2279 		int ml = 0;
2280 
2281 		for (i = 0; i < indio_dev->num_channels; i++)
2282 			ml = max(ml, channels[i].scan_index + 1);
2283 		ACCESS_PRIVATE(indio_dev, masklength) = ml;
2284 	}
2285 
2286 	if (!iio_dev_opaque->attached_buffers_cnt)
2287 		return 0;
2288 
2289 	for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) {
2290 		buffer = iio_dev_opaque->attached_buffers[idx];
2291 		ret = __iio_buffer_alloc_sysfs_and_mask(buffer, indio_dev, idx);
2292 		if (ret)
2293 			goto error_unwind_sysfs_and_mask;
2294 	}
2295 
2296 	sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler);
2297 	iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL);
2298 	if (!iio_dev_opaque->buffer_ioctl_handler) {
2299 		ret = -ENOMEM;
2300 		goto error_unwind_sysfs_and_mask;
2301 	}
2302 
2303 	iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl;
2304 	iio_device_ioctl_handler_register(indio_dev,
2305 					  iio_dev_opaque->buffer_ioctl_handler);
2306 
2307 	return 0;
2308 
2309 error_unwind_sysfs_and_mask:
2310 	while (idx--) {
2311 		buffer = iio_dev_opaque->attached_buffers[idx];
2312 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, idx);
2313 	}
2314 	return ret;
2315 }
2316 
2317 void iio_buffers_free_sysfs_and_mask(struct iio_dev *indio_dev)
2318 {
2319 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2320 	struct iio_buffer *buffer;
2321 	int i;
2322 
2323 	if (!iio_dev_opaque->attached_buffers_cnt)
2324 		return;
2325 
2326 	iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler);
2327 	kfree(iio_dev_opaque->buffer_ioctl_handler);
2328 
2329 	for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) {
2330 		buffer = iio_dev_opaque->attached_buffers[i];
2331 		__iio_buffer_free_sysfs_and_mask(buffer, indio_dev, i);
2332 	}
2333 }
2334 
2335 /**
2336  * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
2337  * @indio_dev: the iio device
2338  * @mask: scan mask to be checked
2339  *
2340  * Return true if exactly one bit is set in the scan mask, false otherwise. It
2341  * can be used for devices where only one channel can be active for sampling at
2342  * a time.
2343  */
2344 bool iio_validate_scan_mask_onehot(struct iio_dev *indio_dev,
2345 				   const unsigned long *mask)
2346 {
2347 	return bitmap_weight(mask, iio_get_masklength(indio_dev)) == 1;
2348 }
2349 EXPORT_SYMBOL_GPL(iio_validate_scan_mask_onehot);
2350 
2351 static const void *iio_demux(struct iio_buffer *buffer,
2352 			     const void *datain)
2353 {
2354 	struct iio_demux_table *t;
2355 
2356 	if (list_empty(&buffer->demux_list))
2357 		return datain;
2358 	list_for_each_entry(t, &buffer->demux_list, l)
2359 		memcpy(buffer->demux_bounce + t->to,
2360 		       datain + t->from, t->length);
2361 
2362 	return buffer->demux_bounce;
2363 }
2364 
2365 static int iio_push_to_buffer(struct iio_buffer *buffer, const void *data)
2366 {
2367 	const void *dataout = iio_demux(buffer, data);
2368 	int ret;
2369 
2370 	ret = buffer->access->store_to(buffer, dataout);
2371 	if (ret)
2372 		return ret;
2373 
2374 	/*
2375 	 * We can't just test for watermark to decide if we wake the poll queue
2376 	 * because read may request less samples than the watermark.
2377 	 */
2378 	wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM);
2379 	return 0;
2380 }
2381 
2382 /**
2383  * iio_push_to_buffers() - push to a registered buffer.
2384  * @indio_dev:		iio_dev structure for device.
2385  * @data:		Full scan.
2386  *
2387  * Context: Any context.
2388  * Return: 0 on success, negative error code on failure.
2389  */
2390 int iio_push_to_buffers(struct iio_dev *indio_dev, const void *data)
2391 {
2392 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2393 	int ret;
2394 	struct iio_buffer *buf;
2395 
2396 	list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) {
2397 		ret = iio_push_to_buffer(buf, data);
2398 		if (ret < 0)
2399 			return ret;
2400 	}
2401 
2402 	return 0;
2403 }
2404 EXPORT_SYMBOL_GPL(iio_push_to_buffers);
2405 
2406 /**
2407  * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
2408  *    no alignment or space requirements.
2409  * @indio_dev:		iio_dev structure for device.
2410  * @data:		channel data excluding the timestamp.
2411  * @data_sz:		size of data.
2412  * @timestamp:		timestamp for the sample data.
2413  *
2414  * This special variant of iio_push_to_buffers_with_timestamp() does
2415  * not require space for the timestamp, or 8 byte alignment of data.
2416  * It does however require an allocation on first call and additional
2417  * copies on all calls, so should be avoided if possible.
2418  *
2419  * Context: May sleep.
2420  * Return: 0 on success, negative error code on failure.
2421  */
2422 int iio_push_to_buffers_with_ts_unaligned(struct iio_dev *indio_dev,
2423 					  const void *data,
2424 					  size_t data_sz,
2425 					  int64_t timestamp)
2426 {
2427 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2428 
2429 	might_sleep();
2430 
2431 	/*
2432 	 * Conservative estimate - we can always safely copy the minimum
2433 	 * of either the data provided or the length of the destination buffer.
2434 	 * This relaxed limit allows the calling drivers to be lax about
2435 	 * tracking the size of the data they are pushing, at the cost of
2436 	 * unnecessary copying of padding.
2437 	 */
2438 	data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz);
2439 	if (iio_dev_opaque->bounce_buffer_size !=  indio_dev->scan_bytes) {
2440 		void *bb;
2441 
2442 		bb = devm_krealloc(&indio_dev->dev,
2443 				   iio_dev_opaque->bounce_buffer,
2444 				   indio_dev->scan_bytes, GFP_KERNEL);
2445 		if (!bb)
2446 			return -ENOMEM;
2447 		iio_dev_opaque->bounce_buffer = bb;
2448 		iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes;
2449 	}
2450 	memcpy(iio_dev_opaque->bounce_buffer, data, data_sz);
2451 	return iio_push_to_buffers_with_timestamp(indio_dev,
2452 						  iio_dev_opaque->bounce_buffer,
2453 						  timestamp);
2454 }
2455 EXPORT_SYMBOL_GPL(iio_push_to_buffers_with_ts_unaligned);
2456 
2457 /**
2458  * iio_buffer_release() - Free a buffer's resources
2459  * @ref: Pointer to the kref embedded in the iio_buffer struct
2460  *
2461  * This function is called when the last reference to the buffer has been
2462  * dropped. It will typically free all resources allocated by the buffer. Do not
2463  * call this function manually, always use iio_buffer_put() when done using a
2464  * buffer.
2465  */
2466 static void iio_buffer_release(struct kref *ref)
2467 {
2468 	struct iio_buffer *buffer = container_of(ref, struct iio_buffer, ref);
2469 
2470 	mutex_destroy(&buffer->dmabufs_mutex);
2471 	buffer->access->release(buffer);
2472 }
2473 
2474 /**
2475  * iio_buffer_get() - Grab a reference to the buffer
2476  * @buffer: The buffer to grab a reference for, may be NULL
2477  *
2478  * Returns the pointer to the buffer that was passed into the function.
2479  */
2480 struct iio_buffer *iio_buffer_get(struct iio_buffer *buffer)
2481 {
2482 	if (buffer)
2483 		kref_get(&buffer->ref);
2484 
2485 	return buffer;
2486 }
2487 EXPORT_SYMBOL_GPL(iio_buffer_get);
2488 
2489 /**
2490  * iio_buffer_put() - Release the reference to the buffer
2491  * @buffer: The buffer to release the reference for, may be NULL
2492  */
2493 void iio_buffer_put(struct iio_buffer *buffer)
2494 {
2495 	if (buffer)
2496 		kref_put(&buffer->ref, iio_buffer_release);
2497 }
2498 EXPORT_SYMBOL_GPL(iio_buffer_put);
2499 
2500 /**
2501  * iio_device_attach_buffer - Attach a buffer to a IIO device
2502  * @indio_dev: The device the buffer should be attached to
2503  * @buffer: The buffer to attach to the device
2504  *
2505  * Return 0 if successful, negative if error.
2506  *
2507  * This function attaches a buffer to a IIO device. The buffer stays attached to
2508  * the device until the device is freed. For legacy reasons, the first attached
2509  * buffer will also be assigned to 'indio_dev->buffer'.
2510  * The array allocated here, will be free'd via the iio_device_detach_buffers()
2511  * call which is handled by the iio_device_free().
2512  */
2513 int iio_device_attach_buffer(struct iio_dev *indio_dev,
2514 			     struct iio_buffer *buffer)
2515 {
2516 	struct iio_dev_opaque *iio_dev_opaque = to_iio_dev_opaque(indio_dev);
2517 	struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers;
2518 	unsigned int cnt = iio_dev_opaque->attached_buffers_cnt;
2519 
2520 	cnt++;
2521 
2522 	new = krealloc(old, sizeof(*new) * cnt, GFP_KERNEL);
2523 	if (!new)
2524 		return -ENOMEM;
2525 	iio_dev_opaque->attached_buffers = new;
2526 
2527 	buffer = iio_buffer_get(buffer);
2528 
2529 	/* first buffer is legacy; attach it to the IIO device directly */
2530 	if (!indio_dev->buffer)
2531 		indio_dev->buffer = buffer;
2532 
2533 	iio_dev_opaque->attached_buffers[cnt - 1] = buffer;
2534 	iio_dev_opaque->attached_buffers_cnt = cnt;
2535 
2536 	return 0;
2537 }
2538 EXPORT_SYMBOL_GPL(iio_device_attach_buffer);
2539