Lines Matching +full:multi +full:- +full:attr

1 // SPDX-License-Identifier: GPL-2.0-only
9 * - Better memory allocation techniques?
10 * - Alternative access techniques?
18 #include <linux/dma-buf.h>
19 #include <linux/dma-fence.h>
20 #include <linux/dma-resv.h>
30 #include <linux/iio/iio-opaque.h>
72 return !list_empty(&buf->buffer_list); in iio_buffer_is_active()
77 return buf->access->data_available(buf); in iio_buffer_data_available()
83 if (!indio_dev->info->hwfifo_flush_to_buffer) in iio_buffer_flush_hwfifo()
84 return -ENODEV; in iio_buffer_flush_hwfifo()
86 return indio_dev->info->hwfifo_flush_to_buffer(indio_dev, required); in iio_buffer_flush_hwfifo()
96 if (!indio_dev->info) in iio_buffer_ready()
108 /* force a flush for non-blocking reads */ in iio_buffer_ready()
111 to_flush - avail); in iio_buffer_ready()
117 to_wait - avail); in iio_buffer_ready()
128 * iio_buffer_read() - chrdev read for buffer access
143 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_read()
144 struct iio_buffer *rb = ib->buffer; in iio_buffer_read()
145 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_read()
151 if (!indio_dev->info) in iio_buffer_read()
152 return -ENODEV; in iio_buffer_read()
154 if (!rb || !rb->access->read) in iio_buffer_read()
155 return -EINVAL; in iio_buffer_read()
157 if (rb->direction != IIO_BUFFER_DIRECTION_IN) in iio_buffer_read()
158 return -EPERM; in iio_buffer_read()
160 datum_size = rb->bytes_per_datum; in iio_buffer_read()
169 if (filp->f_flags & O_NONBLOCK) in iio_buffer_read()
172 to_wait = min_t(size_t, n / datum_size, rb->watermark); in iio_buffer_read()
174 add_wait_queue(&rb->pollq, &wait); in iio_buffer_read()
176 if (!indio_dev->info) { in iio_buffer_read()
177 ret = -ENODEV; in iio_buffer_read()
183 ret = -ERESTARTSYS; in iio_buffer_read()
192 ret = rb->access->read(rb, n, buf); in iio_buffer_read()
193 if (ret == 0 && (filp->f_flags & O_NONBLOCK)) in iio_buffer_read()
194 ret = -EAGAIN; in iio_buffer_read()
196 remove_wait_queue(&rb->pollq, &wait); in iio_buffer_read()
203 if (buf->access->space_available) in iio_buffer_space_available()
204 return buf->access->space_available(buf); in iio_buffer_space_available()
212 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_write()
213 struct iio_buffer *rb = ib->buffer; in iio_buffer_write()
214 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_write()
219 if (!indio_dev->info) in iio_buffer_write()
220 return -ENODEV; in iio_buffer_write()
222 if (!rb || !rb->access->write) in iio_buffer_write()
223 return -EINVAL; in iio_buffer_write()
225 if (rb->direction != IIO_BUFFER_DIRECTION_OUT) in iio_buffer_write()
226 return -EPERM; in iio_buffer_write()
229 add_wait_queue(&rb->pollq, &wait); in iio_buffer_write()
231 if (!indio_dev->info) in iio_buffer_write()
232 return -ENODEV; in iio_buffer_write()
236 ret = -ERESTARTSYS; in iio_buffer_write()
240 if (filp->f_flags & O_NONBLOCK) { in iio_buffer_write()
242 ret = -EAGAIN; in iio_buffer_write()
251 ret = rb->access->write(rb, n - written, buf + written); in iio_buffer_write()
258 remove_wait_queue(&rb->pollq, &wait); in iio_buffer_write()
264 * iio_buffer_poll() - poll the buffer to find out if it has data
275 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_poll()
276 struct iio_buffer *rb = ib->buffer; in iio_buffer_poll()
277 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_poll()
279 if (!indio_dev->info || !rb) in iio_buffer_poll()
282 poll_wait(filp, &rb->pollq, wait); in iio_buffer_poll()
284 switch (rb->direction) { in iio_buffer_poll()
286 if (iio_buffer_ready(indio_dev, rb, rb->watermark, 0)) in iio_buffer_poll()
301 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_read_wrapper()
302 struct iio_buffer *rb = ib->buffer; in iio_buffer_read_wrapper()
305 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) in iio_buffer_read_wrapper()
306 return -EBUSY; in iio_buffer_read_wrapper()
314 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_write_wrapper()
315 struct iio_buffer *rb = ib->buffer; in iio_buffer_write_wrapper()
318 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) in iio_buffer_write_wrapper()
319 return -EBUSY; in iio_buffer_write_wrapper()
327 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_poll_wrapper()
328 struct iio_buffer *rb = ib->buffer; in iio_buffer_poll_wrapper()
331 if (test_bit(IIO_BUSY_BIT_POS, &rb->flags)) in iio_buffer_poll_wrapper()
338 * iio_buffer_wakeup_poll - Wakes up the buffer waitqueue
350 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { in iio_buffer_wakeup_poll()
351 buffer = iio_dev_opaque->attached_buffers[i]; in iio_buffer_wakeup_poll()
352 wake_up(&buffer->pollq); in iio_buffer_wakeup_poll()
358 if (!buffer || !buffer->access || !buffer->access->remove_from) in iio_pop_from_buffer()
359 return -EINVAL; in iio_pop_from_buffer()
361 return buffer->access->remove_from(buffer, data); in iio_pop_from_buffer()
367 INIT_LIST_HEAD(&buffer->demux_list); in iio_buffer_init()
368 INIT_LIST_HEAD(&buffer->buffer_list); in iio_buffer_init()
369 INIT_LIST_HEAD(&buffer->dmabufs); in iio_buffer_init()
370 mutex_init(&buffer->dmabufs_mutex); in iio_buffer_init()
371 init_waitqueue_head(&buffer->pollq); in iio_buffer_init()
372 kref_init(&buffer->ref); in iio_buffer_init()
373 if (!buffer->watermark) in iio_buffer_init()
374 buffer->watermark = 1; in iio_buffer_init()
384 for (i = 0; i < iio_dev_opaque->attached_buffers_cnt; i++) { in iio_device_detach_buffers()
385 buffer = iio_dev_opaque->attached_buffers[i]; in iio_device_detach_buffers()
389 kfree(iio_dev_opaque->attached_buffers); in iio_device_detach_buffers()
393 struct device_attribute *attr, in iio_show_scan_index() argument
396 return sysfs_emit(buf, "%u\n", to_iio_dev_attr(attr)->c->scan_index); in iio_show_scan_index()
400 struct device_attribute *attr, in iio_show_fixed_type() argument
404 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); in iio_show_fixed_type()
408 scan_type = iio_get_current_scan_type(indio_dev, this_attr->c); in iio_show_fixed_type()
412 type = scan_type->endianness; in iio_show_fixed_type()
421 if (scan_type->repeat > 1) in iio_show_fixed_type()
424 scan_type->sign, in iio_show_fixed_type()
425 scan_type->realbits, in iio_show_fixed_type()
426 scan_type->storagebits, in iio_show_fixed_type()
427 scan_type->repeat, in iio_show_fixed_type()
428 scan_type->shift); in iio_show_fixed_type()
432 scan_type->sign, in iio_show_fixed_type()
433 scan_type->realbits, in iio_show_fixed_type()
434 scan_type->storagebits, in iio_show_fixed_type()
435 scan_type->shift); in iio_show_fixed_type()
439 struct device_attribute *attr, in iio_scan_el_show() argument
443 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_show()
446 ret = !!test_bit(to_iio_dev_attr(attr)->address, in iio_scan_el_show()
447 buffer->scan_mask); in iio_scan_el_show()
461 * The condition here do not handle multi-long masks correctly. in iio_scan_mask_match()
468 * avaliable_scan_masks is a zero terminated array of longs - and in iio_scan_mask_match()
469 * using the proper bitmap_empty() check for multi-long wide masks in iio_scan_mask_match()
470 * would require the array to be terminated with multiple zero longs - in iio_scan_mask_match()
473 * As writing of this no multi-long wide masks were found in-tree, so in iio_scan_mask_match()
492 if (!indio_dev->setup_ops->validate_scan_mask) in iio_validate_scan_mask()
495 return indio_dev->setup_ops->validate_scan_mask(indio_dev, mask); in iio_validate_scan_mask()
499 * iio_scan_mask_set() - set particular bit in the scan mask
517 return -EINVAL; in iio_scan_mask_set()
522 return -ENOMEM; in iio_scan_mask_set()
523 bitmap_copy(trialmask, buffer->scan_mask, masklength); in iio_scan_mask_set()
529 if (indio_dev->available_scan_masks) { in iio_scan_mask_set()
530 mask = iio_scan_mask_match(indio_dev->available_scan_masks, in iio_scan_mask_set()
535 bitmap_copy(buffer->scan_mask, trialmask, masklength); in iio_scan_mask_set()
543 return -EINVAL; in iio_scan_mask_set()
548 clear_bit(bit, buffer->scan_mask); in iio_scan_mask_clear()
556 return -EINVAL; in iio_scan_mask_query()
558 if (!buffer->scan_mask) in iio_scan_mask_query()
562 return !!test_bit(bit, buffer->scan_mask); in iio_scan_mask_query()
566 struct device_attribute *attr, in iio_scan_el_store() argument
574 struct iio_dev_attr *this_attr = to_iio_dev_attr(attr); in iio_scan_el_store()
575 struct iio_buffer *buffer = this_attr->buffer; in iio_scan_el_store()
581 guard(mutex)(&iio_dev_opaque->mlock); in iio_scan_el_store()
583 return -EBUSY; in iio_scan_el_store()
585 ret = iio_scan_mask_query(indio_dev, buffer, this_attr->address); in iio_scan_el_store()
593 ret = iio_scan_mask_set(indio_dev, buffer, this_attr->address); in iio_scan_el_store()
595 ret = iio_scan_mask_clear(buffer, this_attr->address); in iio_scan_el_store()
603 struct device_attribute *attr, in iio_scan_el_ts_show() argument
606 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_ts_show()
608 return sysfs_emit(buf, "%d\n", buffer->scan_timestamp); in iio_scan_el_ts_show()
612 struct device_attribute *attr, in iio_scan_el_ts_store() argument
619 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in iio_scan_el_ts_store()
626 guard(mutex)(&iio_dev_opaque->mlock); in iio_scan_el_ts_store()
628 return -EBUSY; in iio_scan_el_ts_store()
630 buffer->scan_timestamp = state; in iio_scan_el_ts_store()
647 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
649 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
659 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
661 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
665 if (chan->type != IIO_TIMESTAMP) in iio_buffer_add_channel_sysfs()
670 chan->scan_index, in iio_buffer_add_channel_sysfs()
672 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
674 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
680 chan->scan_index, in iio_buffer_add_channel_sysfs()
682 &indio_dev->dev, in iio_buffer_add_channel_sysfs()
684 &buffer->buffer_attr_list); in iio_buffer_add_channel_sysfs()
692 static ssize_t length_show(struct device *dev, struct device_attribute *attr, in length_show() argument
695 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in length_show()
697 return sysfs_emit(buf, "%d\n", buffer->length); in length_show()
700 static ssize_t length_store(struct device *dev, struct device_attribute *attr, in length_store() argument
705 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in length_store()
713 if (val == buffer->length) in length_store()
716 guard(mutex)(&iio_dev_opaque->mlock); in length_store()
718 return -EBUSY; in length_store()
720 buffer->access->set_length(buffer, val); in length_store()
722 if (buffer->length && buffer->length < buffer->watermark) in length_store()
723 buffer->watermark = buffer->length; in length_store()
728 static ssize_t enable_show(struct device *dev, struct device_attribute *attr, in enable_show() argument
731 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in enable_show()
748 bytes = scan_type->storagebits / 8; in iio_storage_bytes_for_si()
750 if (scan_type->repeat > 1) in iio_storage_bytes_for_si()
751 bytes *= scan_type->repeat; in iio_storage_bytes_for_si()
761 iio_dev_opaque->scan_index_timestamp); in iio_storage_bytes_for_timestamp()
801 list_add(&buffer->buffer_list, &iio_dev_opaque->buffer_list); in iio_buffer_activate()
806 list_del_init(&buffer->buffer_list); in iio_buffer_deactivate()
807 wake_up_interruptible(&buffer->pollq); in iio_buffer_deactivate()
817 &iio_dev_opaque->buffer_list, buffer_list) in iio_buffer_deactivate_all()
824 if (!buffer->access->enable) in iio_buffer_enable()
826 return buffer->access->enable(buffer, indio_dev); in iio_buffer_enable()
832 if (!buffer->access->disable) in iio_buffer_disable()
834 return buffer->access->disable(buffer, indio_dev); in iio_buffer_disable()
842 if (!buffer->access->set_bytes_per_datum) in iio_buffer_update_bytes_per_datum()
845 bytes = iio_compute_scan_bytes(indio_dev, buffer->scan_mask, in iio_buffer_update_bytes_per_datum()
846 buffer->scan_timestamp); in iio_buffer_update_bytes_per_datum()
848 buffer->access->set_bytes_per_datum(buffer, bytes); in iio_buffer_update_bytes_per_datum()
857 if (buffer->access->request_update) { in iio_buffer_request_update()
858 ret = buffer->access->request_update(buffer); in iio_buffer_request_update()
860 dev_dbg(&indio_dev->dev, in iio_buffer_request_update()
874 if (!indio_dev->available_scan_masks) in iio_free_scan_mask()
901 bitmap_empty(insert_buffer->scan_mask, masklength)) { in iio_verify_update()
902 dev_dbg(&indio_dev->dev, in iio_verify_update()
904 return -EINVAL; in iio_verify_update()
908 config->watermark = ~0; in iio_verify_update()
915 list_is_singular(&iio_dev_opaque->buffer_list)) in iio_verify_update()
918 modes = indio_dev->modes; in iio_verify_update()
920 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_verify_update()
923 modes &= buffer->access->modes; in iio_verify_update()
924 config->watermark = min(config->watermark, buffer->watermark); in iio_verify_update()
928 modes &= insert_buffer->access->modes; in iio_verify_update()
929 config->watermark = min(config->watermark, in iio_verify_update()
930 insert_buffer->watermark); in iio_verify_update()
934 if ((modes & INDIO_BUFFER_TRIGGERED) && indio_dev->trig) { in iio_verify_update()
935 config->mode = INDIO_BUFFER_TRIGGERED; in iio_verify_update()
941 if (insert_buffer && !list_empty(&iio_dev_opaque->buffer_list)) in iio_verify_update()
942 return -EINVAL; in iio_verify_update()
943 config->mode = INDIO_BUFFER_HARDWARE; in iio_verify_update()
946 config->mode = INDIO_BUFFER_SOFTWARE; in iio_verify_update()
949 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) in iio_verify_update()
950 dev_dbg(&indio_dev->dev, "Buffer not started: no trigger\n"); in iio_verify_update()
951 return -EINVAL; in iio_verify_update()
957 return -ENOMEM; in iio_verify_update()
961 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_verify_update()
964 bitmap_or(compound_mask, compound_mask, buffer->scan_mask, in iio_verify_update()
966 scan_timestamp |= buffer->scan_timestamp; in iio_verify_update()
971 insert_buffer->scan_mask, masklength); in iio_verify_update()
972 scan_timestamp |= insert_buffer->scan_timestamp; in iio_verify_update()
975 if (indio_dev->available_scan_masks) { in iio_verify_update()
976 scan_mask = iio_scan_mask_match(indio_dev->available_scan_masks, in iio_verify_update()
981 return -EINVAL; in iio_verify_update()
986 config->scan_bytes = iio_compute_scan_bytes(indio_dev, in iio_verify_update()
988 config->scan_mask = scan_mask; in iio_verify_update()
989 config->scan_timestamp = scan_timestamp; in iio_verify_update()
995 * struct iio_demux_table - table describing demux memcpy ops
1012 list_for_each_entry_safe(p, q, &buffer->demux_list, l) { in iio_buffer_demux_free()
1013 list_del(&p->l); in iio_buffer_demux_free()
1023 if (*p && (*p)->from + (*p)->length == in_loc && in iio_buffer_add_demux()
1024 (*p)->to + (*p)->length == out_loc) { in iio_buffer_add_demux()
1025 (*p)->length += length; in iio_buffer_add_demux()
1029 return -ENOMEM; in iio_buffer_add_demux()
1030 (*p)->from = in_loc; in iio_buffer_add_demux()
1031 (*p)->to = out_loc; in iio_buffer_add_demux()
1032 (*p)->length = length; in iio_buffer_add_demux()
1033 list_add_tail(&(*p)->l, &buffer->demux_list); in iio_buffer_add_demux()
1043 int ret, in_ind = -1, out_ind, length; in iio_buffer_update_demux()
1049 kfree(buffer->demux_bounce); in iio_buffer_update_demux()
1050 buffer->demux_bounce = NULL; in iio_buffer_update_demux()
1053 if (bitmap_equal(indio_dev->active_scan_mask, in iio_buffer_update_demux()
1054 buffer->scan_mask, masklength)) in iio_buffer_update_demux()
1058 for_each_set_bit(out_ind, buffer->scan_mask, masklength) { in iio_buffer_update_demux()
1059 in_ind = find_next_bit(indio_dev->active_scan_mask, in iio_buffer_update_demux()
1069 in_ind = find_next_bit(indio_dev->active_scan_mask, in iio_buffer_update_demux()
1086 if (buffer->scan_timestamp) { in iio_buffer_update_demux()
1099 buffer->demux_bounce = kzalloc(out_loc, GFP_KERNEL); in iio_buffer_update_demux()
1100 if (!buffer->demux_bounce) { in iio_buffer_update_demux()
1101 ret = -ENOMEM; in iio_buffer_update_demux()
1118 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_update_demux()
1126 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) in iio_update_demux()
1139 indio_dev->active_scan_mask = config->scan_mask; in iio_enable_buffers()
1140 ACCESS_PRIVATE(indio_dev, scan_timestamp) = config->scan_timestamp; in iio_enable_buffers()
1141 indio_dev->scan_bytes = config->scan_bytes; in iio_enable_buffers()
1142 iio_dev_opaque->currentmode = config->mode; in iio_enable_buffers()
1147 if (indio_dev->setup_ops->preenable) { in iio_enable_buffers()
1148 ret = indio_dev->setup_ops->preenable(indio_dev); in iio_enable_buffers()
1150 dev_dbg(&indio_dev->dev, in iio_enable_buffers()
1156 if (indio_dev->info->update_scan_mode) { in iio_enable_buffers()
1157 ret = indio_dev->info in iio_enable_buffers()
1158 ->update_scan_mode(indio_dev, in iio_enable_buffers()
1159 indio_dev->active_scan_mask); in iio_enable_buffers()
1161 dev_dbg(&indio_dev->dev, in iio_enable_buffers()
1168 if (indio_dev->info->hwfifo_set_watermark) in iio_enable_buffers()
1169 indio_dev->info->hwfifo_set_watermark(indio_dev, in iio_enable_buffers()
1170 config->watermark); in iio_enable_buffers()
1172 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_enable_buffers()
1180 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { in iio_enable_buffers()
1181 ret = iio_trigger_attach_poll_func(indio_dev->trig, in iio_enable_buffers()
1182 indio_dev->pollfunc); in iio_enable_buffers()
1187 if (indio_dev->setup_ops->postenable) { in iio_enable_buffers()
1188 ret = indio_dev->setup_ops->postenable(indio_dev); in iio_enable_buffers()
1190 dev_dbg(&indio_dev->dev, in iio_enable_buffers()
1199 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { in iio_enable_buffers()
1200 iio_trigger_detach_poll_func(indio_dev->trig, in iio_enable_buffers()
1201 indio_dev->pollfunc); in iio_enable_buffers()
1204 buffer = list_prepare_entry(tmp, &iio_dev_opaque->buffer_list, buffer_list); in iio_enable_buffers()
1205 list_for_each_entry_continue_reverse(buffer, &iio_dev_opaque->buffer_list, in iio_enable_buffers()
1209 if (indio_dev->setup_ops->postdisable) in iio_enable_buffers()
1210 indio_dev->setup_ops->postdisable(indio_dev); in iio_enable_buffers()
1212 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; in iio_enable_buffers()
1213 indio_dev->active_scan_mask = NULL; in iio_enable_buffers()
1225 /* Wind down existing buffers - iff there are any */ in iio_disable_buffers()
1226 if (list_empty(&iio_dev_opaque->buffer_list)) in iio_disable_buffers()
1236 if (indio_dev->setup_ops->predisable) { in iio_disable_buffers()
1237 ret2 = indio_dev->setup_ops->predisable(indio_dev); in iio_disable_buffers()
1242 if (iio_dev_opaque->currentmode == INDIO_BUFFER_TRIGGERED) { in iio_disable_buffers()
1243 iio_trigger_detach_poll_func(indio_dev->trig, in iio_disable_buffers()
1244 indio_dev->pollfunc); in iio_disable_buffers()
1247 list_for_each_entry(buffer, &iio_dev_opaque->buffer_list, buffer_list) { in iio_disable_buffers()
1253 if (indio_dev->setup_ops->postdisable) { in iio_disable_buffers()
1254 ret2 = indio_dev->setup_ops->postdisable(indio_dev); in iio_disable_buffers()
1259 iio_free_scan_mask(indio_dev, indio_dev->active_scan_mask); in iio_disable_buffers()
1260 indio_dev->active_scan_mask = NULL; in iio_disable_buffers()
1261 iio_dev_opaque->currentmode = INDIO_DIRECT_MODE; in iio_disable_buffers()
1295 if (list_empty(&iio_dev_opaque->buffer_list)) in __iio_update_buffers()
1330 insert_buffer->direction == IIO_BUFFER_DIRECTION_OUT) in iio_update_buffers()
1331 return -EINVAL; in iio_update_buffers()
1333 guard(mutex)(&iio_dev_opaque->info_exist_lock); in iio_update_buffers()
1334 guard(mutex)(&iio_dev_opaque->mlock); in iio_update_buffers()
1345 if (!indio_dev->info) in iio_update_buffers()
1346 return -ENODEV; in iio_update_buffers()
1358 static ssize_t enable_store(struct device *dev, struct device_attribute *attr, in enable_store() argument
1365 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in enable_store()
1372 guard(mutex)(&iio_dev_opaque->mlock); in enable_store()
1390 static ssize_t watermark_show(struct device *dev, struct device_attribute *attr, in watermark_show() argument
1393 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in watermark_show()
1395 return sysfs_emit(buf, "%u\n", buffer->watermark); in watermark_show()
1399 struct device_attribute *attr, in watermark_store() argument
1404 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in watermark_store()
1412 return -EINVAL; in watermark_store()
1414 guard(mutex)(&iio_dev_opaque->mlock); in watermark_store()
1416 if (val > buffer->length) in watermark_store()
1417 return -EINVAL; in watermark_store()
1420 return -EBUSY; in watermark_store()
1422 buffer->watermark = val; in watermark_store()
1428 struct device_attribute *attr, char *buf) in data_available_show() argument
1430 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in data_available_show()
1436 struct device_attribute *attr, in direction_show() argument
1439 struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer; in direction_show()
1441 switch (buffer->direction) { in direction_show()
1447 return -EINVAL; in direction_show()
1466 &dev_attr_length.attr,
1467 &dev_attr_enable.attr,
1468 &dev_attr_watermark.attr,
1469 &dev_attr_data_available.attr,
1470 &dev_attr_direction.attr,
1473 #define to_dev_attr(_attr) container_of(_attr, struct device_attribute, attr)
1476 struct attribute *attr) in iio_buffer_wrap_attr() argument
1478 struct device_attribute *dattr = to_dev_attr(attr); in iio_buffer_wrap_attr()
1485 iio_attr->buffer = buffer; in iio_buffer_wrap_attr()
1486 memcpy(&iio_attr->dev_attr, dattr, sizeof(iio_attr->dev_attr)); in iio_buffer_wrap_attr()
1487 iio_attr->dev_attr.attr.name = kstrdup_const(attr->name, GFP_KERNEL); in iio_buffer_wrap_attr()
1488 if (!iio_attr->dev_attr.attr.name) { in iio_buffer_wrap_attr()
1493 sysfs_attr_init(&iio_attr->dev_attr.attr); in iio_buffer_wrap_attr()
1495 list_add(&iio_attr->l, &buffer->buffer_attr_list); in iio_buffer_wrap_attr()
1497 return &iio_attr->dev_attr.attr; in iio_buffer_wrap_attr()
1512 return -ENOMEM; in iio_buffer_register_legacy_sysfs_groups()
1516 group = &iio_dev_opaque->legacy_buffer_group; in iio_buffer_register_legacy_sysfs_groups()
1517 group->attrs = attrs; in iio_buffer_register_legacy_sysfs_groups()
1518 group->name = "buffer"; in iio_buffer_register_legacy_sysfs_groups()
1526 ret = -ENOMEM; in iio_buffer_register_legacy_sysfs_groups()
1533 group = &iio_dev_opaque->legacy_scan_el_group; in iio_buffer_register_legacy_sysfs_groups()
1534 group->attrs = attrs; in iio_buffer_register_legacy_sysfs_groups()
1535 group->name = "scan_elements"; in iio_buffer_register_legacy_sysfs_groups()
1544 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); in iio_buffer_register_legacy_sysfs_groups()
1546 kfree(iio_dev_opaque->legacy_buffer_group.attrs); in iio_buffer_register_legacy_sysfs_groups()
1555 kfree(iio_dev_opaque->legacy_buffer_group.attrs); in iio_buffer_unregister_legacy_sysfs_groups()
1556 kfree(iio_dev_opaque->legacy_scan_el_group.attrs); in iio_buffer_unregister_legacy_sysfs_groups()
1562 struct dma_buf_attachment *attach = priv->attach; in iio_buffer_dmabuf_release()
1563 struct iio_buffer *buffer = priv->buffer; in iio_buffer_dmabuf_release()
1564 struct dma_buf *dmabuf = attach->dmabuf; in iio_buffer_dmabuf_release()
1566 dma_resv_lock(dmabuf->resv, NULL); in iio_buffer_dmabuf_release()
1567 dma_buf_unmap_attachment(attach, priv->sgt, priv->dir); in iio_buffer_dmabuf_release()
1568 dma_resv_unlock(dmabuf->resv); in iio_buffer_dmabuf_release()
1570 buffer->access->detach_dmabuf(buffer, priv->block); in iio_buffer_dmabuf_release()
1572 dma_buf_detach(attach->dmabuf, attach); in iio_buffer_dmabuf_release()
1579 struct iio_dmabuf_priv *priv = attach->importer_priv; in iio_buffer_dmabuf_get()
1581 kref_get(&priv->ref); in iio_buffer_dmabuf_get()
1586 struct iio_dmabuf_priv *priv = attach->importer_priv; in iio_buffer_dmabuf_put()
1588 kref_put(&priv->ref, iio_buffer_dmabuf_release); in iio_buffer_dmabuf_put()
1593 struct iio_dev_buffer_pair *ib = filep->private_data; in iio_buffer_chrdev_release()
1594 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_chrdev_release()
1595 struct iio_buffer *buffer = ib->buffer; in iio_buffer_chrdev_release()
1598 wake_up(&buffer->pollq); in iio_buffer_chrdev_release()
1600 guard(mutex)(&buffer->dmabufs_mutex); in iio_buffer_chrdev_release()
1603 list_for_each_entry_safe(priv, tmp, &buffer->dmabufs, entry) { in iio_buffer_chrdev_release()
1604 list_del_init(&priv->entry); in iio_buffer_chrdev_release()
1605 iio_buffer_dmabuf_put(priv->attach); in iio_buffer_chrdev_release()
1609 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); in iio_buffer_chrdev_release()
1618 return dma_resv_lock_interruptible(dmabuf->resv, NULL); in iio_dma_resv_lock()
1620 if (!dma_resv_trylock(dmabuf->resv)) in iio_dma_resv_lock()
1621 return -EBUSY; in iio_dma_resv_lock()
1630 struct device *dev = ib->indio_dev->dev.parent; in iio_buffer_find_attachment()
1631 struct iio_buffer *buffer = ib->buffer; in iio_buffer_find_attachment()
1635 guard(mutex)(&buffer->dmabufs_mutex); in iio_buffer_find_attachment()
1637 list_for_each_entry(priv, &buffer->dmabufs, entry) { in iio_buffer_find_attachment()
1638 if (priv->attach->dev == dev in iio_buffer_find_attachment()
1639 && priv->attach->dmabuf == dmabuf) { in iio_buffer_find_attachment()
1640 attach = priv->attach; in iio_buffer_find_attachment()
1648 return attach ?: ERR_PTR(-EPERM); in iio_buffer_find_attachment()
1654 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_attach_dmabuf()
1655 struct iio_buffer *buffer = ib->buffer; in iio_buffer_attach_dmabuf()
1661 if (!buffer->access->attach_dmabuf in iio_buffer_attach_dmabuf()
1662 || !buffer->access->detach_dmabuf in iio_buffer_attach_dmabuf()
1663 || !buffer->access->enqueue_dmabuf) in iio_buffer_attach_dmabuf()
1664 return -EPERM; in iio_buffer_attach_dmabuf()
1667 return -EFAULT; in iio_buffer_attach_dmabuf()
1671 return -ENOMEM; in iio_buffer_attach_dmabuf()
1673 spin_lock_init(&priv->lock); in iio_buffer_attach_dmabuf()
1674 priv->context = dma_fence_context_alloc(1); in iio_buffer_attach_dmabuf()
1682 attach = dma_buf_attach(dmabuf, indio_dev->dev.parent); in iio_buffer_attach_dmabuf()
1692 priv->dir = buffer->direction == IIO_BUFFER_DIRECTION_IN in iio_buffer_attach_dmabuf()
1695 priv->sgt = dma_buf_map_attachment(attach, priv->dir); in iio_buffer_attach_dmabuf()
1696 if (IS_ERR(priv->sgt)) { in iio_buffer_attach_dmabuf()
1697 err = PTR_ERR(priv->sgt); in iio_buffer_attach_dmabuf()
1698 dev_err(&indio_dev->dev, "Unable to map attachment: %d\n", err); in iio_buffer_attach_dmabuf()
1702 kref_init(&priv->ref); in iio_buffer_attach_dmabuf()
1703 priv->buffer = buffer; in iio_buffer_attach_dmabuf()
1704 priv->attach = attach; in iio_buffer_attach_dmabuf()
1705 attach->importer_priv = priv; in iio_buffer_attach_dmabuf()
1707 priv->block = buffer->access->attach_dmabuf(buffer, attach); in iio_buffer_attach_dmabuf()
1708 if (IS_ERR(priv->block)) { in iio_buffer_attach_dmabuf()
1709 err = PTR_ERR(priv->block); in iio_buffer_attach_dmabuf()
1713 dma_resv_unlock(dmabuf->resv); in iio_buffer_attach_dmabuf()
1715 mutex_lock(&buffer->dmabufs_mutex); in iio_buffer_attach_dmabuf()
1721 list_for_each_entry(each, &buffer->dmabufs, entry) { in iio_buffer_attach_dmabuf()
1722 if (each->attach->dev == indio_dev->dev.parent in iio_buffer_attach_dmabuf()
1723 && each->attach->dmabuf == dmabuf) { in iio_buffer_attach_dmabuf()
1726 * the cleanup code would mean re-locking it first. in iio_buffer_attach_dmabuf()
1730 mutex_unlock(&buffer->dmabufs_mutex); in iio_buffer_attach_dmabuf()
1732 return -EBUSY; in iio_buffer_attach_dmabuf()
1737 list_add(&priv->entry, &buffer->dmabufs); in iio_buffer_attach_dmabuf()
1738 mutex_unlock(&buffer->dmabufs_mutex); in iio_buffer_attach_dmabuf()
1743 dma_buf_unmap_attachment(attach, priv->sgt, priv->dir); in iio_buffer_attach_dmabuf()
1745 dma_resv_unlock(dmabuf->resv); in iio_buffer_attach_dmabuf()
1759 struct iio_buffer *buffer = ib->buffer; in iio_buffer_detach_dmabuf()
1760 struct iio_dev *indio_dev = ib->indio_dev; in iio_buffer_detach_dmabuf()
1763 int dmabuf_fd, ret = -EPERM; in iio_buffer_detach_dmabuf()
1766 return -EFAULT; in iio_buffer_detach_dmabuf()
1772 guard(mutex)(&buffer->dmabufs_mutex); in iio_buffer_detach_dmabuf()
1774 list_for_each_entry(priv, &buffer->dmabufs, entry) { in iio_buffer_detach_dmabuf()
1775 if (priv->attach->dev == indio_dev->dev.parent in iio_buffer_detach_dmabuf()
1776 && priv->attach->dmabuf == dmabuf) { in iio_buffer_detach_dmabuf()
1777 list_del(&priv->entry); in iio_buffer_detach_dmabuf()
1780 iio_buffer_dmabuf_put(priv->attach); in iio_buffer_detach_dmabuf()
1815 struct iio_buffer *buffer = ib->buffer; in iio_buffer_enqueue_dmabuf()
1828 return -EFAULT; in iio_buffer_enqueue_dmabuf()
1831 return -EINVAL; in iio_buffer_enqueue_dmabuf()
1836 if (cyclic && buffer->direction != IIO_BUFFER_DIRECTION_OUT) in iio_buffer_enqueue_dmabuf()
1837 return -EINVAL; in iio_buffer_enqueue_dmabuf()
1843 if (!iio_dmabuf.bytes_used || iio_dmabuf.bytes_used > dmabuf->size) { in iio_buffer_enqueue_dmabuf()
1844 ret = -EINVAL; in iio_buffer_enqueue_dmabuf()
1854 priv = attach->importer_priv; in iio_buffer_enqueue_dmabuf()
1858 ret = -ENOMEM; in iio_buffer_enqueue_dmabuf()
1862 fence->priv = priv; in iio_buffer_enqueue_dmabuf()
1864 seqno = atomic_add_return(1, &priv->seqno); in iio_buffer_enqueue_dmabuf()
1871 dma_fence_init(&fence->base, &iio_buffer_dma_fence_ops, in iio_buffer_enqueue_dmabuf()
1872 &priv->lock, priv->context, seqno); in iio_buffer_enqueue_dmabuf()
1879 dma_to_ram = buffer->direction == IIO_BUFFER_DIRECTION_IN; in iio_buffer_enqueue_dmabuf()
1882 retl = dma_resv_wait_timeout(dmabuf->resv, in iio_buffer_enqueue_dmabuf()
1886 retl = -EBUSY; in iio_buffer_enqueue_dmabuf()
1892 if (buffer->access->lock_queue) in iio_buffer_enqueue_dmabuf()
1893 buffer->access->lock_queue(buffer); in iio_buffer_enqueue_dmabuf()
1895 ret = dma_resv_reserve_fences(dmabuf->resv, 1); in iio_buffer_enqueue_dmabuf()
1899 dma_resv_add_fence(dmabuf->resv, &fence->base, in iio_buffer_enqueue_dmabuf()
1901 dma_resv_unlock(dmabuf->resv); in iio_buffer_enqueue_dmabuf()
1905 ret = buffer->access->enqueue_dmabuf(buffer, priv->block, &fence->base, in iio_buffer_enqueue_dmabuf()
1906 priv->sgt, iio_dmabuf.bytes_used, in iio_buffer_enqueue_dmabuf()
1913 iio_buffer_signal_dmabuf_done(&fence->base, ret); in iio_buffer_enqueue_dmabuf()
1916 if (buffer->access->unlock_queue) in iio_buffer_enqueue_dmabuf()
1917 buffer->access->unlock_queue(buffer); in iio_buffer_enqueue_dmabuf()
1925 if (buffer->access->unlock_queue) in iio_buffer_enqueue_dmabuf()
1926 buffer->access->unlock_queue(buffer); in iio_buffer_enqueue_dmabuf()
1928 dma_resv_unlock(dmabuf->resv); in iio_buffer_enqueue_dmabuf()
1930 dma_fence_put(&fence->base); in iio_buffer_enqueue_dmabuf()
1943 struct iio_dmabuf_priv *priv = fence->priv; in iio_buffer_cleanup()
1944 struct dma_buf_attachment *attach = priv->attach; in iio_buffer_cleanup()
1946 dma_fence_put(&fence->base); in iio_buffer_cleanup()
1962 fence->error = ret; in iio_buffer_signal_dmabuf_done()
1971 INIT_WORK(&iio_fence->work, iio_buffer_cleanup); in iio_buffer_signal_dmabuf_done()
1972 schedule_work(&iio_fence->work); in iio_buffer_signal_dmabuf_done()
1979 struct iio_dev_buffer_pair *ib = filp->private_data; in iio_buffer_chrdev_ioctl()
1981 bool nonblock = filp->f_flags & O_NONBLOCK; in iio_buffer_chrdev_ioctl()
1991 return -EINVAL; in iio_buffer_chrdev_ioctl()
2015 return -EFAULT; in iio_device_buffer_getfd()
2017 if (idx >= iio_dev_opaque->attached_buffers_cnt) in iio_device_buffer_getfd()
2018 return -ENODEV; in iio_device_buffer_getfd()
2022 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_device_buffer_getfd()
2024 if (test_and_set_bit(IIO_BUSY_BIT_POS, &buffer->flags)) { in iio_device_buffer_getfd()
2025 ret = -EBUSY; in iio_device_buffer_getfd()
2031 ret = -ENOMEM; in iio_device_buffer_getfd()
2035 ib->indio_dev = indio_dev; in iio_device_buffer_getfd()
2036 ib->buffer = buffer; in iio_device_buffer_getfd()
2056 return -EFAULT; in iio_device_buffer_getfd()
2064 clear_bit(IIO_BUSY_BIT_POS, &buffer->flags); in iio_device_buffer_getfd()
2085 if (scan_type->storagebits < scan_type->realbits + scan_type->shift) { in iio_channel_validate_scan_type()
2088 ch, scan_type->storagebits, in iio_channel_validate_scan_type()
2089 scan_type->realbits, in iio_channel_validate_scan_type()
2090 scan_type->shift); in iio_channel_validate_scan_type()
2091 return -EINVAL; in iio_channel_validate_scan_type()
2105 struct attribute **attr; in __iio_buffer_alloc_sysfs_and_mask() local
2110 if (buffer->attrs) { in __iio_buffer_alloc_sysfs_and_mask()
2111 while (buffer->attrs[buffer_attrcount]) in __iio_buffer_alloc_sysfs_and_mask()
2117 INIT_LIST_HEAD(&buffer->buffer_attr_list); in __iio_buffer_alloc_sysfs_and_mask()
2118 channels = indio_dev->channels; in __iio_buffer_alloc_sysfs_and_mask()
2121 for (i = 0; i < indio_dev->num_channels; i++) { in __iio_buffer_alloc_sysfs_and_mask()
2134 if (!indio_dev->info->get_current_scan_type) { in __iio_buffer_alloc_sysfs_and_mask()
2135 ret = -EINVAL; in __iio_buffer_alloc_sysfs_and_mask()
2143 &indio_dev->dev, i, scan_type); in __iio_buffer_alloc_sysfs_and_mask()
2151 &indio_dev->dev, i, scan_type); in __iio_buffer_alloc_sysfs_and_mask()
2162 iio_dev_opaque->scan_index_timestamp = in __iio_buffer_alloc_sysfs_and_mask()
2165 if (masklength && !buffer->scan_mask) { in __iio_buffer_alloc_sysfs_and_mask()
2166 buffer->scan_mask = bitmap_zalloc(masklength, in __iio_buffer_alloc_sysfs_and_mask()
2168 if (!buffer->scan_mask) { in __iio_buffer_alloc_sysfs_and_mask()
2169 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
2176 attr = kcalloc(attrn + 1, sizeof(*attr), GFP_KERNEL); in __iio_buffer_alloc_sysfs_and_mask()
2177 if (!attr) { in __iio_buffer_alloc_sysfs_and_mask()
2178 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
2182 memcpy(attr, iio_buffer_attrs, sizeof(iio_buffer_attrs)); in __iio_buffer_alloc_sysfs_and_mask()
2183 if (!buffer->access->set_length) in __iio_buffer_alloc_sysfs_and_mask()
2184 attr[0] = &dev_attr_length_ro.attr; in __iio_buffer_alloc_sysfs_and_mask()
2186 if (buffer->access->flags & INDIO_BUFFER_FLAG_FIXED_WATERMARK) in __iio_buffer_alloc_sysfs_and_mask()
2187 attr[2] = &dev_attr_watermark_ro.attr; in __iio_buffer_alloc_sysfs_and_mask()
2189 if (buffer->attrs) in __iio_buffer_alloc_sysfs_and_mask()
2190 for (i = 0, id_attr = buffer->attrs[i]; in __iio_buffer_alloc_sysfs_and_mask()
2191 (id_attr = buffer->attrs[i]); i++) in __iio_buffer_alloc_sysfs_and_mask()
2192 attr[ARRAY_SIZE(iio_buffer_attrs) + i] = in __iio_buffer_alloc_sysfs_and_mask()
2193 (struct attribute *)&id_attr->dev_attr.attr; in __iio_buffer_alloc_sysfs_and_mask()
2195 buffer->buffer_group.attrs = attr; in __iio_buffer_alloc_sysfs_and_mask()
2200 wrapped = iio_buffer_wrap_attr(buffer, attr[i]); in __iio_buffer_alloc_sysfs_and_mask()
2202 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
2205 attr[i] = wrapped; in __iio_buffer_alloc_sysfs_and_mask()
2209 list_for_each_entry(p, &buffer->buffer_attr_list, l) in __iio_buffer_alloc_sysfs_and_mask()
2210 attr[attrn++] = &p->dev_attr.attr; in __iio_buffer_alloc_sysfs_and_mask()
2212 buffer->buffer_group.name = kasprintf(GFP_KERNEL, "buffer%d", index); in __iio_buffer_alloc_sysfs_and_mask()
2213 if (!buffer->buffer_group.name) { in __iio_buffer_alloc_sysfs_and_mask()
2214 ret = -ENOMEM; in __iio_buffer_alloc_sysfs_and_mask()
2218 ret = iio_device_register_sysfs_group(indio_dev, &buffer->buffer_group); in __iio_buffer_alloc_sysfs_and_mask()
2226 ret = iio_buffer_register_legacy_sysfs_groups(indio_dev, attr, in __iio_buffer_alloc_sysfs_and_mask()
2235 kfree(buffer->buffer_group.name); in __iio_buffer_alloc_sysfs_and_mask()
2237 kfree(buffer->buffer_group.attrs); in __iio_buffer_alloc_sysfs_and_mask()
2239 bitmap_free(buffer->scan_mask); in __iio_buffer_alloc_sysfs_and_mask()
2241 iio_free_chan_devattr_list(&buffer->buffer_attr_list); in __iio_buffer_alloc_sysfs_and_mask()
2252 bitmap_free(buffer->scan_mask); in __iio_buffer_free_sysfs_and_mask()
2253 kfree(buffer->buffer_group.name); in __iio_buffer_free_sysfs_and_mask()
2254 kfree(buffer->buffer_group.attrs); in __iio_buffer_free_sysfs_and_mask()
2255 iio_free_chan_devattr_list(&buffer->buffer_attr_list); in __iio_buffer_free_sysfs_and_mask()
2266 channels = indio_dev->channels; in iio_buffers_alloc_sysfs_and_mask()
2270 for (i = 0; i < indio_dev->num_channels; i++) in iio_buffers_alloc_sysfs_and_mask()
2275 if (!iio_dev_opaque->attached_buffers_cnt) in iio_buffers_alloc_sysfs_and_mask()
2278 for (idx = 0; idx < iio_dev_opaque->attached_buffers_cnt; idx++) { in iio_buffers_alloc_sysfs_and_mask()
2279 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_buffers_alloc_sysfs_and_mask()
2285 sz = sizeof(*iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_alloc_sysfs_and_mask()
2286 iio_dev_opaque->buffer_ioctl_handler = kzalloc(sz, GFP_KERNEL); in iio_buffers_alloc_sysfs_and_mask()
2287 if (!iio_dev_opaque->buffer_ioctl_handler) { in iio_buffers_alloc_sysfs_and_mask()
2288 ret = -ENOMEM; in iio_buffers_alloc_sysfs_and_mask()
2292 iio_dev_opaque->buffer_ioctl_handler->ioctl = iio_device_buffer_ioctl; in iio_buffers_alloc_sysfs_and_mask()
2294 iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_alloc_sysfs_and_mask()
2299 while (idx--) { in iio_buffers_alloc_sysfs_and_mask()
2300 buffer = iio_dev_opaque->attached_buffers[idx]; in iio_buffers_alloc_sysfs_and_mask()
2312 if (!iio_dev_opaque->attached_buffers_cnt) in iio_buffers_free_sysfs_and_mask()
2315 iio_device_ioctl_handler_unregister(iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_free_sysfs_and_mask()
2316 kfree(iio_dev_opaque->buffer_ioctl_handler); in iio_buffers_free_sysfs_and_mask()
2318 for (i = iio_dev_opaque->attached_buffers_cnt - 1; i >= 0; i--) { in iio_buffers_free_sysfs_and_mask()
2319 buffer = iio_dev_opaque->attached_buffers[i]; in iio_buffers_free_sysfs_and_mask()
2325 * iio_validate_scan_mask_onehot() - Validates that exactly one channel is selected
2345 if (list_empty(&buffer->demux_list)) in iio_demux()
2347 list_for_each_entry(t, &buffer->demux_list, l) in iio_demux()
2348 memcpy(buffer->demux_bounce + t->to, in iio_demux()
2349 datain + t->from, t->length); in iio_demux()
2351 return buffer->demux_bounce; in iio_demux()
2359 ret = buffer->access->store_to(buffer, dataout); in iio_push_to_buffer()
2367 wake_up_interruptible_poll(&buffer->pollq, EPOLLIN | EPOLLRDNORM); in iio_push_to_buffer()
2372 * iio_push_to_buffers() - push to a registered buffer.
2382 list_for_each_entry(buf, &iio_dev_opaque->buffer_list, buffer_list) { in iio_push_to_buffers()
2393 * iio_push_to_buffers_with_ts_unaligned() - push to registered buffer,
2413 * Conservative estimate - we can always safely copy the minimum in iio_push_to_buffers_with_ts_unaligned()
2419 data_sz = min_t(size_t, indio_dev->scan_bytes, data_sz); in iio_push_to_buffers_with_ts_unaligned()
2420 if (iio_dev_opaque->bounce_buffer_size != indio_dev->scan_bytes) { in iio_push_to_buffers_with_ts_unaligned()
2423 bb = devm_krealloc(&indio_dev->dev, in iio_push_to_buffers_with_ts_unaligned()
2424 iio_dev_opaque->bounce_buffer, in iio_push_to_buffers_with_ts_unaligned()
2425 indio_dev->scan_bytes, GFP_KERNEL); in iio_push_to_buffers_with_ts_unaligned()
2427 return -ENOMEM; in iio_push_to_buffers_with_ts_unaligned()
2428 iio_dev_opaque->bounce_buffer = bb; in iio_push_to_buffers_with_ts_unaligned()
2429 iio_dev_opaque->bounce_buffer_size = indio_dev->scan_bytes; in iio_push_to_buffers_with_ts_unaligned()
2431 memcpy(iio_dev_opaque->bounce_buffer, data, data_sz); in iio_push_to_buffers_with_ts_unaligned()
2433 iio_dev_opaque->bounce_buffer, in iio_push_to_buffers_with_ts_unaligned()
2439 * iio_buffer_release() - Free a buffer's resources
2451 mutex_destroy(&buffer->dmabufs_mutex); in iio_buffer_release()
2452 buffer->access->release(buffer); in iio_buffer_release()
2456 * iio_buffer_get() - Grab a reference to the buffer
2464 kref_get(&buffer->ref); in iio_buffer_get()
2471 * iio_buffer_put() - Release the reference to the buffer
2477 kref_put(&buffer->ref, iio_buffer_release); in iio_buffer_put()
2482 * iio_device_attach_buffer - Attach a buffer to a IIO device
2490 * buffer will also be assigned to 'indio_dev->buffer'.
2498 struct iio_buffer **new, **old = iio_dev_opaque->attached_buffers; in iio_device_attach_buffer()
2499 unsigned int cnt = iio_dev_opaque->attached_buffers_cnt; in iio_device_attach_buffer()
2505 return -ENOMEM; in iio_device_attach_buffer()
2506 iio_dev_opaque->attached_buffers = new; in iio_device_attach_buffer()
2511 if (!indio_dev->buffer) in iio_device_attach_buffer()
2512 indio_dev->buffer = buffer; in iio_device_attach_buffer()
2514 iio_dev_opaque->attached_buffers[cnt - 1] = buffer; in iio_device_attach_buffer()
2515 iio_dev_opaque->attached_buffers_cnt = cnt; in iio_device_attach_buffer()