1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2013-2015 Analog Devices Inc.
4 * Author: Lars-Peter Clausen <lars@metafoo.de>
5 */
6
7 #include <linux/atomic.h>
8 #include <linux/cleanup.h>
9 #include <linux/lockdep.h>
10 #include <linux/slab.h>
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/device.h>
14 #include <linux/workqueue.h>
15 #include <linux/mutex.h>
16 #include <linux/sched.h>
17 #include <linux/poll.h>
18 #include <linux/iio/buffer_impl.h>
19 #include <linux/iio/buffer-dma.h>
20 #include <linux/dma-buf.h>
21 #include <linux/dma-fence.h>
22 #include <linux/dma-mapping.h>
23 #include <linux/sizes.h>
24
25 /*
26 * For DMA buffers the storage is sub-divided into so called blocks. Each block
27 * has its own memory buffer. The size of the block is the granularity at which
28 * memory is exchanged between the hardware and the application. Increasing the
29 * basic unit of data exchange from one sample to one block decreases the
30 * management overhead that is associated with each sample. E.g. if we say the
31 * management overhead for one exchange is x and the unit of exchange is one
32 * sample the overhead will be x for each sample. Whereas when using a block
33 * which contains n samples the overhead per sample is reduced to x/n. This
34 * allows to achieve much higher samplerates than what can be sustained with
35 * the one sample approach.
36 *
37 * Blocks are exchanged between the DMA controller and the application via the
38 * means of two queues. The incoming queue and the outgoing queue. Blocks on the
39 * incoming queue are waiting for the DMA controller to pick them up and fill
40 * them with data. Block on the outgoing queue have been filled with data and
41 * are waiting for the application to dequeue them and read the data.
42 *
43 * A block can be in one of the following states:
44 * * Owned by the application. In this state the application can read data from
45 * the block.
46 * * On the incoming list: Blocks on the incoming list are queued up to be
47 * processed by the DMA controller.
48 * * Owned by the DMA controller: The DMA controller is processing the block
49 * and filling it with data.
50 * * On the outgoing list: Blocks on the outgoing list have been successfully
51 * processed by the DMA controller and contain data. They can be dequeued by
52 * the application.
53 * * Dead: A block that is dead has been marked as to be freed. It might still
54 * be owned by either the application or the DMA controller at the moment.
55 * But once they are done processing it instead of going to either the
56 * incoming or outgoing queue the block will be freed.
57 *
58 * In addition to this blocks are reference counted and the memory associated
59 * with both the block structure as well as the storage memory for the block
60 * will be freed when the last reference to the block is dropped. This means a
61 * block must not be accessed without holding a reference.
62 *
63 * The iio_dma_buffer implementation provides a generic infrastructure for
64 * managing the blocks.
65 *
66 * A driver for a specific piece of hardware that has DMA capabilities need to
67 * implement the submit() callback from the iio_dma_buffer_ops structure. This
68 * callback is supposed to initiate the DMA transfer copying data from the
69 * converter to the memory region of the block. Once the DMA transfer has been
70 * completed the driver must call iio_dma_buffer_block_done() for the completed
71 * block.
72 *
73 * Prior to this it must set the bytes_used field of the block contains
74 * the actual number of bytes in the buffer. Typically this will be equal to the
75 * size of the block, but if the DMA hardware has certain alignment requirements
76 * for the transfer length it might choose to use less than the full size. In
77 * either case it is expected that bytes_used is a multiple of the bytes per
78 * datum, i.e. the block must not contain partial samples.
79 *
80 * The driver must call iio_dma_buffer_block_done() for each block it has
81 * received through its submit_block() callback, even if it does not actually
82 * perform a DMA transfer for the block, e.g. because the buffer was disabled
83 * before the block transfer was started. In this case it should set bytes_used
84 * to 0.
85 *
86 * In addition it is recommended that a driver implements the abort() callback.
87 * It will be called when the buffer is disabled and can be used to cancel
88 * pending and stop active transfers.
89 *
90 * The specific driver implementation should use the default callback
91 * implementations provided by this module for the iio_buffer_access_funcs
92 * struct. It may overload some callbacks with custom variants if the hardware
93 * has special requirements that are not handled by the generic functions. If a
94 * driver chooses to overload a callback it has to ensure that the generic
95 * callback is called from within the custom callback.
96 */
97
iio_buffer_block_release(struct kref * kref)98 static void iio_buffer_block_release(struct kref *kref)
99 {
100 struct iio_dma_buffer_block *block = container_of(kref,
101 struct iio_dma_buffer_block, kref);
102 struct iio_dma_buffer_queue *queue = block->queue;
103
104 WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
105
106 if (block->fileio) {
107 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
108 block->vaddr, block->phys_addr);
109 } else {
110 atomic_dec(&queue->num_dmabufs);
111 }
112
113 iio_buffer_put(&queue->buffer);
114 kfree(block);
115 }
116
iio_buffer_block_get(struct iio_dma_buffer_block * block)117 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
118 {
119 kref_get(&block->kref);
120 }
121
iio_buffer_block_put(struct iio_dma_buffer_block * block)122 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
123 {
124 kref_put(&block->kref, iio_buffer_block_release);
125 }
126
127 /*
128 * dma_free_coherent can sleep, hence we need to take some special care to be
129 * able to drop a reference from an atomic context.
130 */
131 static LIST_HEAD(iio_dma_buffer_dead_blocks);
132 static DEFINE_SPINLOCK(iio_dma_buffer_dead_blocks_lock);
133
iio_dma_buffer_cleanup_worker(struct work_struct * work)134 static void iio_dma_buffer_cleanup_worker(struct work_struct *work)
135 {
136 struct iio_dma_buffer_block *block, *_block;
137 LIST_HEAD(block_list);
138
139 scoped_guard(spinlock_irq, &iio_dma_buffer_dead_blocks_lock)
140 list_splice_tail_init(&iio_dma_buffer_dead_blocks, &block_list);
141
142 list_for_each_entry_safe(block, _block, &block_list, head)
143 iio_buffer_block_release(&block->kref);
144 }
145 static DECLARE_WORK(iio_dma_buffer_cleanup_work, iio_dma_buffer_cleanup_worker);
146
iio_buffer_block_release_atomic(struct kref * kref)147 static void iio_buffer_block_release_atomic(struct kref *kref)
148 {
149 struct iio_dma_buffer_block *block;
150
151 block = container_of(kref, struct iio_dma_buffer_block, kref);
152
153 scoped_guard(spinlock_irqsave, &iio_dma_buffer_dead_blocks_lock)
154 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
155
156 schedule_work(&iio_dma_buffer_cleanup_work);
157 }
158
159 /*
160 * Version of iio_buffer_block_put() that can be called from atomic context
161 */
iio_buffer_block_put_atomic(struct iio_dma_buffer_block * block)162 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
163 {
164 kref_put(&block->kref, iio_buffer_block_release_atomic);
165 }
166
iio_buffer_to_queue(struct iio_buffer * buf)167 static struct iio_dma_buffer_queue *iio_buffer_to_queue(struct iio_buffer *buf)
168 {
169 return container_of(buf, struct iio_dma_buffer_queue, buffer);
170 }
171
172 static struct iio_dma_buffer_block *
iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue * queue,size_t size,bool fileio)173 iio_dma_buffer_alloc_block(struct iio_dma_buffer_queue *queue, size_t size,
174 bool fileio)
175 {
176 struct iio_dma_buffer_block *block __free(kfree) =
177 kzalloc_obj(*block);
178 if (!block)
179 return NULL;
180
181 if (fileio) {
182 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
183 &block->phys_addr, GFP_KERNEL);
184 if (!block->vaddr)
185 return NULL;
186 }
187
188 block->fileio = fileio;
189 block->size = size;
190 block->state = IIO_BLOCK_STATE_DONE;
191 block->queue = queue;
192 INIT_LIST_HEAD(&block->head);
193 kref_init(&block->kref);
194
195 iio_buffer_get(&queue->buffer);
196
197 if (!fileio)
198 atomic_inc(&queue->num_dmabufs);
199
200 return_ptr(block);
201 }
202
_iio_dma_buffer_block_done(struct iio_dma_buffer_block * block)203 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
204 {
205 if (block->state != IIO_BLOCK_STATE_DEAD)
206 block->state = IIO_BLOCK_STATE_DONE;
207 }
208
iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue * queue)209 static void iio_dma_buffer_queue_wake(struct iio_dma_buffer_queue *queue)
210 {
211 __poll_t flags;
212
213 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
214 flags = EPOLLIN | EPOLLRDNORM;
215 else
216 flags = EPOLLOUT | EPOLLWRNORM;
217
218 wake_up_interruptible_poll(&queue->buffer.pollq, flags);
219 }
220
221 /**
222 * iio_dma_buffer_block_done() - Indicate that a block has been completed
223 * @block: The completed block
224 *
225 * Should be called when the DMA controller has finished handling the block to
226 * pass back ownership of the block to the queue.
227 */
iio_dma_buffer_block_done(struct iio_dma_buffer_block * block)228 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
229 {
230 struct iio_dma_buffer_queue *queue = block->queue;
231 bool cookie;
232
233 cookie = dma_fence_begin_signalling();
234
235 scoped_guard(spinlock_irqsave, &queue->list_lock)
236 _iio_dma_buffer_block_done(block);
237
238 if (!block->fileio)
239 iio_buffer_signal_dmabuf_done(block->fence, 0);
240
241 iio_buffer_block_put_atomic(block);
242 iio_dma_buffer_queue_wake(queue);
243 dma_fence_end_signalling(cookie);
244 }
245 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_done, "IIO_DMA_BUFFER");
246
247 /**
248 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
249 * aborted
250 * @queue: Queue for which to complete blocks.
251 * @list: List of aborted blocks. All blocks in this list must be from @queue.
252 *
253 * Typically called from the abort() callback after the DMA controller has been
254 * stopped. This will set bytes_used to 0 for each block in the list and then
255 * hand the blocks back to the queue.
256 */
iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue * queue,struct list_head * list)257 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
258 struct list_head *list)
259 {
260 struct iio_dma_buffer_block *block, *_block;
261 bool cookie;
262
263 cookie = dma_fence_begin_signalling();
264
265 scoped_guard(spinlock_irqsave, &queue->list_lock) {
266 list_for_each_entry_safe(block, _block, list, head) {
267 list_del(&block->head);
268 block->bytes_used = 0;
269 _iio_dma_buffer_block_done(block);
270
271 if (!block->fileio)
272 iio_buffer_signal_dmabuf_done(block->fence,
273 -EINTR);
274 iio_buffer_block_put_atomic(block);
275 }
276 }
277
278 if (queue->fileio.enabled)
279 queue->fileio.enabled = false;
280
281 iio_dma_buffer_queue_wake(queue);
282 dma_fence_end_signalling(cookie);
283 }
284 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_block_list_abort, "IIO_DMA_BUFFER");
285
iio_dma_block_reusable(struct iio_dma_buffer_block * block)286 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
287 {
288 /*
289 * If the core owns the block it can be re-used. This should be the
290 * default case when enabling the buffer, unless the DMA controller does
291 * not support abort and has not given back the block yet.
292 */
293 switch (block->state) {
294 case IIO_BLOCK_STATE_QUEUED:
295 case IIO_BLOCK_STATE_DONE:
296 return true;
297 default:
298 return false;
299 }
300 }
301
iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue * queue)302 static bool iio_dma_buffer_can_use_fileio(struct iio_dma_buffer_queue *queue)
303 {
304 /*
305 * Note that queue->num_dmabufs cannot increase while the queue is
306 * locked, it can only decrease, so it does not race against
307 * iio_dma_buffer_alloc_block().
308 */
309 return queue->fileio.enabled || !atomic_read(&queue->num_dmabufs);
310 }
311
312 /**
313 * iio_dma_buffer_request_update() - DMA buffer request_update callback
314 * @buffer: The buffer which to request an update
315 *
316 * Should be used as the iio_dma_buffer_request_update() callback for
317 * iio_buffer_access_ops struct for DMA buffers.
318 */
iio_dma_buffer_request_update(struct iio_buffer * buffer)319 int iio_dma_buffer_request_update(struct iio_buffer *buffer)
320 {
321 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
322 struct iio_dma_buffer_block *block;
323 bool try_reuse = false;
324 size_t size;
325 int i;
326
327 /*
328 * Split the buffer into two even parts. This is used as a double
329 * buffering scheme with usually one block at a time being used by the
330 * DMA and the other one by the application.
331 */
332 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum *
333 queue->buffer.length, 2);
334
335 guard(mutex)(&queue->lock);
336
337 queue->fileio.enabled = iio_dma_buffer_can_use_fileio(queue);
338
339 /* If DMABUFs were created, disable fileio interface */
340 if (!queue->fileio.enabled)
341 return 0;
342
343 /* Allocations are page aligned */
344 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size))
345 try_reuse = true;
346
347 queue->fileio.block_size = size;
348 queue->fileio.active_block = NULL;
349
350 scoped_guard(spinlock_irq, &queue->list_lock) {
351 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
352 block = queue->fileio.blocks[i];
353
354 /* If we can't re-use it free it */
355 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
356 block->state = IIO_BLOCK_STATE_DEAD;
357 }
358
359 /*
360 * At this point all blocks are either owned by the core or
361 * marked as dead. This means we can reset the lists without
362 * having to fear corruption.
363 */
364 }
365
366 INIT_LIST_HEAD(&queue->incoming);
367
368 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
369 if (queue->fileio.blocks[i]) {
370 block = queue->fileio.blocks[i];
371 if (block->state == IIO_BLOCK_STATE_DEAD) {
372 /* Could not reuse it */
373 iio_buffer_block_put(block);
374 block = NULL;
375 } else {
376 block->size = size;
377 }
378 } else {
379 block = NULL;
380 }
381
382 if (!block) {
383 block = iio_dma_buffer_alloc_block(queue, size, true);
384 if (!block)
385 return -ENOMEM;
386
387 queue->fileio.blocks[i] = block;
388 }
389
390 /*
391 * block->bytes_used may have been modified previously, e.g. by
392 * iio_dma_buffer_block_list_abort(). Reset it here to the
393 * block's so that iio_dma_buffer_io() will work.
394 */
395 block->bytes_used = block->size;
396
397 /*
398 * If it's an input buffer, mark the block as queued, and
399 * iio_dma_buffer_enable() will submit it. Otherwise mark it as
400 * done, which means it's ready to be dequeued.
401 */
402 if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
403 block->state = IIO_BLOCK_STATE_QUEUED;
404 list_add_tail(&block->head, &queue->incoming);
405 } else {
406 block->state = IIO_BLOCK_STATE_DONE;
407 }
408 }
409
410 return 0;
411 }
412 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_request_update, "IIO_DMA_BUFFER");
413
iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue * queue)414 static void iio_dma_buffer_fileio_free(struct iio_dma_buffer_queue *queue)
415 {
416 unsigned int i;
417
418 scoped_guard(spinlock_irq, &queue->list_lock) {
419 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
420 if (!queue->fileio.blocks[i])
421 continue;
422 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD;
423 }
424 }
425
426 INIT_LIST_HEAD(&queue->incoming);
427
428 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
429 if (!queue->fileio.blocks[i])
430 continue;
431 iio_buffer_block_put(queue->fileio.blocks[i]);
432 queue->fileio.blocks[i] = NULL;
433 }
434 queue->fileio.active_block = NULL;
435 }
436
iio_dma_buffer_submit_block(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block)437 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue,
438 struct iio_dma_buffer_block *block)
439 {
440 int ret;
441
442 /*
443 * If the hardware has already been removed we put the block into
444 * limbo. It will neither be on the incoming nor outgoing list, nor will
445 * it ever complete. It will just wait to be freed eventually.
446 */
447 if (!queue->ops)
448 return;
449
450 block->state = IIO_BLOCK_STATE_ACTIVE;
451 iio_buffer_block_get(block);
452
453 ret = queue->ops->submit(queue, block);
454 if (ret) {
455 if (!block->fileio)
456 iio_buffer_signal_dmabuf_done(block->fence, ret);
457
458 /*
459 * This is a bit of a problem and there is not much we can do
460 * other then wait for the buffer to be disabled and re-enabled
461 * and try again. But it should not really happen unless we run
462 * out of memory or something similar.
463 *
464 * TODO: Implement support in the IIO core to allow buffers to
465 * notify consumers that something went wrong and the buffer
466 * should be disabled.
467 */
468 iio_buffer_block_put(block);
469 }
470 }
471
472 /**
473 * iio_dma_buffer_enable() - Enable DMA buffer
474 * @buffer: IIO buffer to enable
475 * @indio_dev: IIO device the buffer is attached to
476 *
477 * Needs to be called when the device that the buffer is attached to starts
478 * sampling. Typically should be the iio_buffer_access_ops enable callback.
479 *
480 * This will allocate the DMA buffers and start the DMA transfers.
481 */
iio_dma_buffer_enable(struct iio_buffer * buffer,struct iio_dev * indio_dev)482 int iio_dma_buffer_enable(struct iio_buffer *buffer, struct iio_dev *indio_dev)
483 {
484 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
485 struct iio_dma_buffer_block *block, *_block;
486
487 guard(mutex)(&queue->lock);
488 queue->active = true;
489 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
490 list_del(&block->head);
491 iio_dma_buffer_submit_block(queue, block);
492 }
493
494 return 0;
495 }
496 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enable, "IIO_DMA_BUFFER");
497
498 /**
499 * iio_dma_buffer_disable() - Disable DMA buffer
500 * @buffer: IIO DMA buffer to disable
501 * @indio_dev: IIO device the buffer is attached to
502 *
503 * Needs to be called when the device that the buffer is attached to stops
504 * sampling. Typically should be the iio_buffer_access_ops disable callback.
505 */
iio_dma_buffer_disable(struct iio_buffer * buffer,struct iio_dev * indio_dev)506 int iio_dma_buffer_disable(struct iio_buffer *buffer, struct iio_dev *indio_dev)
507 {
508 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
509
510 guard(mutex)(&queue->lock);
511 queue->active = false;
512
513 if (queue->ops && queue->ops->abort)
514 queue->ops->abort(queue);
515
516 return 0;
517 }
518 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_disable, "IIO_DMA_BUFFER");
519
iio_dma_buffer_enqueue(struct iio_dma_buffer_queue * queue,struct iio_dma_buffer_block * block)520 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue,
521 struct iio_dma_buffer_block *block)
522 {
523 if (block->state == IIO_BLOCK_STATE_DEAD) {
524 iio_buffer_block_put(block);
525 } else if (queue->active) {
526 iio_dma_buffer_submit_block(queue, block);
527 } else {
528 block->state = IIO_BLOCK_STATE_QUEUED;
529 list_add_tail(&block->head, &queue->incoming);
530 }
531 }
532
533 static struct iio_dma_buffer_block *
iio_dma_buffer_dequeue(struct iio_dma_buffer_queue * queue)534 iio_dma_buffer_dequeue(struct iio_dma_buffer_queue *queue)
535 {
536 struct iio_dma_buffer_block *block;
537 unsigned int idx;
538
539 guard(spinlock_irq)(&queue->list_lock);
540
541 idx = queue->fileio.next_dequeue;
542 block = queue->fileio.blocks[idx];
543
544 if (block->state != IIO_BLOCK_STATE_DONE)
545 return NULL;
546
547 idx = (idx + 1) % ARRAY_SIZE(queue->fileio.blocks);
548 queue->fileio.next_dequeue = idx;
549
550 return block;
551 }
552
iio_dma_buffer_io(struct iio_buffer * buffer,size_t n,char __user * user_buffer,bool is_from_user)553 static int iio_dma_buffer_io(struct iio_buffer *buffer, size_t n,
554 char __user *user_buffer, bool is_from_user)
555 {
556 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
557 struct iio_dma_buffer_block *block;
558 void *addr;
559 int ret;
560
561 if (n < buffer->bytes_per_datum)
562 return -EINVAL;
563
564 guard(mutex)(&queue->lock);
565
566 if (!queue->fileio.active_block) {
567 block = iio_dma_buffer_dequeue(queue);
568 if (!block)
569 return 0;
570
571 queue->fileio.pos = 0;
572 queue->fileio.active_block = block;
573 } else {
574 block = queue->fileio.active_block;
575 }
576
577 n = rounddown(n, buffer->bytes_per_datum);
578 if (n > block->bytes_used - queue->fileio.pos)
579 n = block->bytes_used - queue->fileio.pos;
580 addr = block->vaddr + queue->fileio.pos;
581
582 if (is_from_user)
583 ret = copy_from_user(addr, user_buffer, n);
584 else
585 ret = copy_to_user(user_buffer, addr, n);
586 if (ret)
587 return -EFAULT;
588
589 queue->fileio.pos += n;
590
591 if (queue->fileio.pos == block->bytes_used) {
592 queue->fileio.active_block = NULL;
593 iio_dma_buffer_enqueue(queue, block);
594 }
595
596 return n;
597 }
598
599 /**
600 * iio_dma_buffer_read() - DMA buffer read callback
601 * @buffer: Buffer to read from
602 * @n: Number of bytes to read
603 * @user_buffer: Userspace buffer to copy the data to
604 *
605 * Should be used as the read callback for iio_buffer_access_ops
606 * struct for DMA buffers.
607 */
iio_dma_buffer_read(struct iio_buffer * buffer,size_t n,char __user * user_buffer)608 int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
609 char __user *user_buffer)
610 {
611 return iio_dma_buffer_io(buffer, n, user_buffer, false);
612 }
613 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_read, "IIO_DMA_BUFFER");
614
615 /**
616 * iio_dma_buffer_write() - DMA buffer write callback
617 * @buffer: Buffer to write to
618 * @n: Number of bytes to read
619 * @user_buffer: Userspace buffer to copy the data from
620 *
621 * Should be used as the write callback for iio_buffer_access_ops
622 * struct for DMA buffers.
623 */
iio_dma_buffer_write(struct iio_buffer * buffer,size_t n,const char __user * user_buffer)624 int iio_dma_buffer_write(struct iio_buffer *buffer, size_t n,
625 const char __user *user_buffer)
626 {
627 return iio_dma_buffer_io(buffer, n,
628 (__force __user char *)user_buffer, true);
629 }
630 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_write, "IIO_DMA_BUFFER");
631
632 /**
633 * iio_dma_buffer_usage() - DMA buffer data_available and
634 * space_available callback
635 * @buf: Buffer to check for data availability
636 *
637 * Should be used as the data_available and space_available callbacks for
638 * iio_buffer_access_ops struct for DMA buffers.
639 */
iio_dma_buffer_usage(struct iio_buffer * buf)640 size_t iio_dma_buffer_usage(struct iio_buffer *buf)
641 {
642 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf);
643 struct iio_dma_buffer_block *block;
644 size_t data_available = 0;
645 unsigned int i;
646
647 /*
648 * For counting the available bytes we'll use the size of the block not
649 * the number of actual bytes available in the block. Otherwise it is
650 * possible that we end up with a value that is lower than the watermark
651 * but won't increase since all blocks are in use.
652 */
653
654 guard(mutex)(&queue->lock);
655 if (queue->fileio.active_block)
656 data_available += queue->fileio.active_block->size;
657
658 guard(spinlock_irq)(&queue->list_lock);
659
660 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) {
661 block = queue->fileio.blocks[i];
662
663 if (block != queue->fileio.active_block && block->state == IIO_BLOCK_STATE_DONE)
664 data_available += block->size;
665 }
666
667 return data_available;
668 }
669 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_usage, "IIO_DMA_BUFFER");
670
671 struct iio_dma_buffer_block *
iio_dma_buffer_attach_dmabuf(struct iio_buffer * buffer,struct dma_buf_attachment * attach)672 iio_dma_buffer_attach_dmabuf(struct iio_buffer *buffer,
673 struct dma_buf_attachment *attach)
674 {
675 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
676 struct iio_dma_buffer_block *block;
677
678 guard(mutex)(&queue->lock);
679
680 /*
681 * If the buffer is enabled and in fileio mode new blocks can't be
682 * allocated.
683 */
684 if (queue->fileio.enabled)
685 return ERR_PTR(-EBUSY);
686
687 block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false);
688 if (!block)
689 return ERR_PTR(-ENOMEM);
690
691 /* Free memory that might be in use for fileio mode */
692 iio_dma_buffer_fileio_free(queue);
693
694 return block;
695 }
696 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_attach_dmabuf, "IIO_DMA_BUFFER");
697
iio_dma_buffer_detach_dmabuf(struct iio_buffer * buffer,struct iio_dma_buffer_block * block)698 void iio_dma_buffer_detach_dmabuf(struct iio_buffer *buffer,
699 struct iio_dma_buffer_block *block)
700 {
701 block->state = IIO_BLOCK_STATE_DEAD;
702 iio_buffer_block_put_atomic(block);
703 }
704 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_detach_dmabuf, "IIO_DMA_BUFFER");
705
iio_dma_can_enqueue_block(struct iio_dma_buffer_block * block)706 static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
707 {
708 struct iio_dma_buffer_queue *queue = block->queue;
709
710 /* If in fileio mode buffers can't be enqueued. */
711 if (queue->fileio.enabled)
712 return -EBUSY;
713
714 switch (block->state) {
715 case IIO_BLOCK_STATE_QUEUED:
716 return -EPERM;
717 case IIO_BLOCK_STATE_ACTIVE:
718 case IIO_BLOCK_STATE_DEAD:
719 return -EBUSY;
720 case IIO_BLOCK_STATE_DONE:
721 break;
722 }
723
724 return 0;
725 }
726
iio_dma_buffer_enqueue_dmabuf(struct iio_buffer * buffer,struct iio_dma_buffer_block * block,struct dma_fence * fence,struct sg_table * sgt,size_t size,bool cyclic)727 int iio_dma_buffer_enqueue_dmabuf(struct iio_buffer *buffer,
728 struct iio_dma_buffer_block *block,
729 struct dma_fence *fence,
730 struct sg_table *sgt,
731 size_t size, bool cyclic)
732 {
733 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
734 bool cookie;
735 int ret;
736
737 lockdep_assert_held(&queue->lock);
738
739 cookie = dma_fence_begin_signalling();
740
741 ret = iio_dma_can_enqueue_block(block);
742 if (ret < 0)
743 goto out_end_signalling;
744
745 block->bytes_used = size;
746 block->cyclic = cyclic;
747 block->sg_table = sgt;
748 block->fence = fence;
749
750 iio_dma_buffer_enqueue(queue, block);
751
752 out_end_signalling:
753 dma_fence_end_signalling(cookie);
754
755 return ret;
756 }
757 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_enqueue_dmabuf, "IIO_DMA_BUFFER");
758
iio_dma_buffer_get_dma_dev(struct iio_buffer * buffer)759 struct device *iio_dma_buffer_get_dma_dev(struct iio_buffer *buffer)
760 {
761 return iio_buffer_to_queue(buffer)->dev;
762 }
763 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_get_dma_dev, "IIO_DMA_BUFFER");
764
iio_dma_buffer_lock_queue(struct iio_buffer * buffer)765 void iio_dma_buffer_lock_queue(struct iio_buffer *buffer)
766 {
767 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
768
769 mutex_lock(&queue->lock);
770 }
771 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_lock_queue, "IIO_DMA_BUFFER");
772
iio_dma_buffer_unlock_queue(struct iio_buffer * buffer)773 void iio_dma_buffer_unlock_queue(struct iio_buffer *buffer)
774 {
775 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer);
776
777 mutex_unlock(&queue->lock);
778 }
779 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_unlock_queue, "IIO_DMA_BUFFER");
780
781 /**
782 * iio_dma_buffer_set_bytes_per_datum() - DMA buffer set_bytes_per_datum callback
783 * @buffer: Buffer to set the bytes-per-datum for
784 * @bpd: The new bytes-per-datum value
785 *
786 * Should be used as the set_bytes_per_datum callback for iio_buffer_access_ops
787 * struct for DMA buffers.
788 */
iio_dma_buffer_set_bytes_per_datum(struct iio_buffer * buffer,size_t bpd)789 int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd)
790 {
791 buffer->bytes_per_datum = bpd;
792
793 return 0;
794 }
795 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_bytes_per_datum, "IIO_DMA_BUFFER");
796
797 /**
798 * iio_dma_buffer_set_length - DMA buffer set_length callback
799 * @buffer: Buffer to set the length for
800 * @length: The new buffer length
801 *
802 * Should be used as the set_length callback for iio_buffer_access_ops
803 * struct for DMA buffers.
804 */
iio_dma_buffer_set_length(struct iio_buffer * buffer,unsigned int length)805 int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length)
806 {
807 /* Avoid an invalid state */
808 if (length < 2)
809 length = 2;
810 buffer->length = length;
811 buffer->watermark = length / 2;
812
813 return 0;
814 }
815 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_set_length, "IIO_DMA_BUFFER");
816
817 /**
818 * iio_dma_buffer_init() - Initialize DMA buffer queue
819 * @queue: Buffer to initialize
820 * @dev: DMA device
821 * @ops: DMA buffer queue callback operations
822 *
823 * The DMA device will be used by the queue to do DMA memory allocations. So it
824 * should refer to the device that will perform the DMA to ensure that
825 * allocations are done from a memory region that can be accessed by the device.
826 */
iio_dma_buffer_init(struct iio_dma_buffer_queue * queue,struct device * dev,const struct iio_dma_buffer_ops * ops)827 void iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, struct device *dev,
828 const struct iio_dma_buffer_ops *ops)
829 {
830 iio_buffer_init(&queue->buffer);
831 queue->buffer.length = PAGE_SIZE;
832 queue->buffer.watermark = queue->buffer.length / 2;
833 queue->dev = dev;
834 queue->ops = ops;
835
836 INIT_LIST_HEAD(&queue->incoming);
837
838 mutex_init(&queue->lock);
839 spin_lock_init(&queue->list_lock);
840 }
841 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_init, "IIO_DMA_BUFFER");
842
843 /**
844 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
845 * @queue: Buffer to cleanup
846 *
847 * After this function has completed it is safe to free any resources that are
848 * associated with the buffer and are accessed inside the callback operations.
849 */
iio_dma_buffer_exit(struct iio_dma_buffer_queue * queue)850 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue)
851 {
852 guard(mutex)(&queue->lock);
853
854 iio_dma_buffer_fileio_free(queue);
855 queue->ops = NULL;
856 }
857 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_exit, "IIO_DMA_BUFFER");
858
859 /**
860 * iio_dma_buffer_release() - Release final buffer resources
861 * @queue: Buffer to release
862 *
863 * Frees resources that can't yet be freed in iio_dma_buffer_exit(). Should be
864 * called in the buffers release callback implementation right before freeing
865 * the memory associated with the buffer.
866 */
iio_dma_buffer_release(struct iio_dma_buffer_queue * queue)867 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue)
868 {
869 mutex_destroy(&queue->lock);
870 }
871 EXPORT_SYMBOL_NS_GPL(iio_dma_buffer_release, "IIO_DMA_BUFFER");
872
873 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
874 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
875 MODULE_LICENSE("GPL v2");
876