Lines Matching full:block

25  * For DMA buffers the storage is sub-divided into so called blocks. Each block
26 * has its own memory buffer. The size of the block is the granularity at which
28 * basic unit of data exchange from one sample to one block decreases the
31 * sample the overhead will be x for each sample. Whereas when using a block
39 * them with data. Block on the outgoing queue have been filled with data and
42 * A block can be in one of the following states:
44 * the block.
47 * * Owned by the DMA controller: The DMA controller is processing the block
52 * * Dead: A block that is dead has been marked as to be freed. It might still
55 * incoming or outgoing queue the block will be freed.
58 * with both the block structure as well as the storage memory for the block
59 * will be freed when the last reference to the block is dropped. This means a
60 * block must not be accessed without holding a reference.
68 * converter to the memory region of the block. Once the DMA transfer has been
70 * block.
72 * Prior to this it must set the bytes_used field of the block contains
74 * size of the block, but if the DMA hardware has certain alignment requirements
77 * datum, i.e. the block must not contain partial samples.
79 * The driver must call iio_dma_buffer_block_done() for each block it has
81 * perform a DMA transfer for the block, e.g. because the buffer was disabled
82 * before the block transfer was started. In this case it should set bytes_used
99 struct iio_dma_buffer_block *block = container_of(kref,
101 struct iio_dma_buffer_queue *queue = block->queue;
103 WARN_ON(block->fileio && block->state != IIO_BLOCK_STATE_DEAD);
105 if (block->fileio) {
106 dma_free_coherent(queue->dev, PAGE_ALIGN(block->size),
107 block->vaddr, block->phys_addr);
113 kfree(block);
116 static void iio_buffer_block_get(struct iio_dma_buffer_block *block)
118 kref_get(&block->kref);
121 static void iio_buffer_block_put(struct iio_dma_buffer_block *block)
123 kref_put(&block->kref, iio_buffer_block_release);
135 struct iio_dma_buffer_block *block, *_block;
142 list_for_each_entry_safe(block, _block, &block_list, head)
143 iio_buffer_block_release(&block->kref);
149 struct iio_dma_buffer_block *block;
152 block = container_of(kref, struct iio_dma_buffer_block, kref);
155 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks);
164 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block)
166 kref_put(&block->kref, iio_buffer_block_release_atomic);
177 struct iio_dma_buffer_block *block;
179 block = kzalloc(sizeof(*block), GFP_KERNEL);
180 if (!block)
184 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size),
185 &block->phys_addr, GFP_KERNEL);
186 if (!block->vaddr) {
187 kfree(block);
192 block->fileio = fileio;
193 block->size = size;
194 block->state = IIO_BLOCK_STATE_DONE;
195 block->queue = queue;
196 INIT_LIST_HEAD(&block->head);
197 kref_init(&block->kref);
204 return block;
207 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
209 if (block->state != IIO_BLOCK_STATE_DEAD)
210 block->state = IIO_BLOCK_STATE_DONE;
226 * iio_dma_buffer_block_done() - Indicate that a block has been completed
227 * @block: The completed block
229 * Should be called when the DMA controller has finished handling the block to
230 * pass back ownership of the block to the queue.
232 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block)
234 struct iio_dma_buffer_queue *queue = block->queue;
241 _iio_dma_buffer_block_done(block);
244 if (!block->fileio)
245 iio_buffer_signal_dmabuf_done(block->fence, 0);
247 iio_buffer_block_put_atomic(block);
254 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
260 * stopped. This will set bytes_used to 0 for each block in the list and then
266 struct iio_dma_buffer_block *block, *_block;
273 list_for_each_entry_safe(block, _block, list, head) {
274 list_del(&block->head);
275 block->bytes_used = 0;
276 _iio_dma_buffer_block_done(block);
278 if (!block->fileio)
279 iio_buffer_signal_dmabuf_done(block->fence, -EINTR);
280 iio_buffer_block_put_atomic(block);
292 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block)
295 * If the core owns the block it can be re-used. This should be the
297 * not support abort and has not given back the block yet.
299 switch (block->state) {
328 struct iio_dma_buffer_block *block;
336 * buffering scheme with usually one block at a time being used by the
359 block = queue->fileio.blocks[i];
362 if (block && (!iio_dma_block_reusable(block) || !try_reuse))
363 block->state = IIO_BLOCK_STATE_DEAD;
377 block = queue->fileio.blocks[i];
378 if (block->state == IIO_BLOCK_STATE_DEAD) {
380 iio_buffer_block_put(block);
381 block = NULL;
383 block->size = size;
386 block = NULL;
389 if (!block) {
390 block = iio_dma_buffer_alloc_block(queue, size, true);
391 if (!block) {
395 queue->fileio.blocks[i] = block;
399 * block->bytes_used may have been modified previously, e.g. by
401 * block's so that iio_dma_buffer_io() will work.
403 block->bytes_used = block->size;
406 * If it's an input buffer, mark the block as queued, and
411 block->state = IIO_BLOCK_STATE_QUEUED;
412 list_add_tail(&block->head, &queue->incoming);
414 block->state = IIO_BLOCK_STATE_DONE;
449 struct iio_dma_buffer_block *block)
454 * If the hardware has already been removed we put the block into
461 block->state = IIO_BLOCK_STATE_ACTIVE;
462 iio_buffer_block_get(block);
464 ret = queue->ops->submit(queue, block);
466 if (!block->fileio)
467 iio_buffer_signal_dmabuf_done(block->fence, ret);
479 iio_buffer_block_put(block);
497 struct iio_dma_buffer_block *block, *_block;
501 list_for_each_entry_safe(block, _block, &queue->incoming, head) {
502 list_del(&block->head);
503 iio_dma_buffer_submit_block(queue, block);
536 struct iio_dma_buffer_block *block)
538 if (block->state == IIO_BLOCK_STATE_DEAD) {
539 iio_buffer_block_put(block);
541 iio_dma_buffer_submit_block(queue, block);
543 block->state = IIO_BLOCK_STATE_QUEUED;
544 list_add_tail(&block->head, &queue->incoming);
551 struct iio_dma_buffer_block *block;
557 block = queue->fileio.blocks[idx];
559 if (block->state == IIO_BLOCK_STATE_DONE) {
563 block = NULL;
568 return block;
575 struct iio_dma_buffer_block *block;
585 block = iio_dma_buffer_dequeue(queue);
586 if (block == NULL) {
591 queue->fileio.active_block = block;
593 block = queue->fileio.active_block;
597 if (n > block->bytes_used - queue->fileio.pos)
598 n = block->bytes_used - queue->fileio.pos;
599 addr = block->vaddr + queue->fileio.pos;
612 if (queue->fileio.pos == block->bytes_used) {
614 iio_dma_buffer_enqueue(queue, block);
669 struct iio_dma_buffer_block *block;
674 * For counting the available bytes we'll use the size of the block not
675 * the number of actual bytes available in the block. Otherwise it is
687 block = queue->fileio.blocks[i];
689 if (block != queue->fileio.active_block
690 && block->state == IIO_BLOCK_STATE_DONE)
691 data_available += block->size;
706 struct iio_dma_buffer_block *block;
717 block = iio_dma_buffer_alloc_block(queue, attach->dmabuf->size, false);
718 if (!block)
724 return block;
729 struct iio_dma_buffer_block *block)
731 block->state = IIO_BLOCK_STATE_DEAD;
732 iio_buffer_block_put_atomic(block);
736 static int iio_dma_can_enqueue_block(struct iio_dma_buffer_block *block)
738 struct iio_dma_buffer_queue *queue = block->queue;
744 switch (block->state) {
758 struct iio_dma_buffer_block *block,
771 ret = iio_dma_can_enqueue_block(block);
775 block->bytes_used = size;
776 block->cyclic = cyclic;
777 block->sg_table = sgt;
778 block->fence = fence;
780 iio_dma_buffer_enqueue(queue, block);