xref: /linux/drivers/iio/buffer/industrialio-buffer-dmaengine.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2014-2015 Analog Devices Inc.
4  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 
15 #include <linux/iio/iio.h>
16 #include <linux/iio/sysfs.h>
17 #include <linux/iio/buffer.h>
18 #include <linux/iio/buffer_impl.h>
19 #include <linux/iio/buffer-dma.h>
20 #include <linux/iio/buffer-dmaengine.h>
21 
22 /*
23  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25  * used to manage the buffer memory and implement the IIO buffer operations
26  * while the DMAengine framework is used to perform the DMA transfers. Combined
27  * this results in a device independent fully functional DMA buffer
28  * implementation that can be used by device drivers for peripherals which are
29  * connected to a DMA controller which has a DMAengine driver implementation.
30  */
31 
32 struct dmaengine_buffer {
33 	struct iio_dma_buffer_queue queue;
34 
35 	struct dma_chan *chan;
36 	struct list_head active;
37 
38 	size_t align;
39 	size_t max_size;
40 };
41 
42 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
43 		struct iio_buffer *buffer)
44 {
45 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46 }
47 
48 static void iio_dmaengine_buffer_block_done(void *data,
49 		const struct dmaengine_result *result)
50 {
51 	struct iio_dma_buffer_block *block = data;
52 	unsigned long flags;
53 
54 	spin_lock_irqsave(&block->queue->list_lock, flags);
55 	list_del(&block->head);
56 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
57 	block->bytes_used -= result->residue;
58 	iio_dma_buffer_block_done(block);
59 }
60 
61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
62 	struct iio_dma_buffer_block *block)
63 {
64 	struct dmaengine_buffer *dmaengine_buffer =
65 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
66 	struct dma_async_tx_descriptor *desc;
67 	dma_cookie_t cookie;
68 
69 	block->bytes_used = min(block->size, dmaengine_buffer->max_size);
70 	block->bytes_used = round_down(block->bytes_used,
71 			dmaengine_buffer->align);
72 
73 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
74 		block->phys_addr, block->bytes_used, DMA_DEV_TO_MEM,
75 		DMA_PREP_INTERRUPT);
76 	if (!desc)
77 		return -ENOMEM;
78 
79 	desc->callback_result = iio_dmaengine_buffer_block_done;
80 	desc->callback_param = block;
81 
82 	cookie = dmaengine_submit(desc);
83 	if (dma_submit_error(cookie))
84 		return dma_submit_error(cookie);
85 
86 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
87 	list_add_tail(&block->head, &dmaengine_buffer->active);
88 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
89 
90 	dma_async_issue_pending(dmaengine_buffer->chan);
91 
92 	return 0;
93 }
94 
95 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
96 {
97 	struct dmaengine_buffer *dmaengine_buffer =
98 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
99 
100 	dmaengine_terminate_sync(dmaengine_buffer->chan);
101 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
102 }
103 
104 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
105 {
106 	struct dmaengine_buffer *dmaengine_buffer =
107 		iio_buffer_to_dmaengine_buffer(buf);
108 
109 	iio_dma_buffer_release(&dmaengine_buffer->queue);
110 	kfree(dmaengine_buffer);
111 }
112 
113 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
114 	.read = iio_dma_buffer_read,
115 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
116 	.set_length = iio_dma_buffer_set_length,
117 	.request_update = iio_dma_buffer_request_update,
118 	.enable = iio_dma_buffer_enable,
119 	.disable = iio_dma_buffer_disable,
120 	.data_available = iio_dma_buffer_data_available,
121 	.release = iio_dmaengine_buffer_release,
122 
123 	.modes = INDIO_BUFFER_HARDWARE,
124 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
125 };
126 
127 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
128 	.submit = iio_dmaengine_buffer_submit_block,
129 	.abort = iio_dmaengine_buffer_abort,
130 };
131 
132 static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
133 	struct device_attribute *attr, char *buf)
134 {
135 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
136 	struct dmaengine_buffer *dmaengine_buffer =
137 		iio_buffer_to_dmaengine_buffer(buffer);
138 
139 	return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
140 }
141 
142 static IIO_DEVICE_ATTR(length_align_bytes, 0444,
143 		       iio_dmaengine_buffer_get_length_align, NULL, 0);
144 
145 static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
146 	&iio_dev_attr_length_align_bytes,
147 	NULL,
148 };
149 
150 /**
151  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
152  * @dev: Parent device for the buffer
153  * @channel: DMA channel name, typically "rx".
154  *
155  * This allocates a new IIO buffer which internally uses the DMAengine framework
156  * to perform its transfers. The parent device will be used to request the DMA
157  * channel.
158  *
159  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
160  * release it.
161  */
162 struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
163 	const char *channel)
164 {
165 	struct dmaengine_buffer *dmaengine_buffer;
166 	unsigned int width, src_width, dest_width;
167 	struct dma_slave_caps caps;
168 	struct dma_chan *chan;
169 	int ret;
170 
171 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
172 	if (!dmaengine_buffer)
173 		return ERR_PTR(-ENOMEM);
174 
175 	chan = dma_request_chan(dev, channel);
176 	if (IS_ERR(chan)) {
177 		ret = PTR_ERR(chan);
178 		goto err_free;
179 	}
180 
181 	ret = dma_get_slave_caps(chan, &caps);
182 	if (ret < 0)
183 		goto err_free;
184 
185 	/* Needs to be aligned to the maximum of the minimums */
186 	if (caps.src_addr_widths)
187 		src_width = __ffs(caps.src_addr_widths);
188 	else
189 		src_width = 1;
190 	if (caps.dst_addr_widths)
191 		dest_width = __ffs(caps.dst_addr_widths);
192 	else
193 		dest_width = 1;
194 	width = max(src_width, dest_width);
195 
196 	INIT_LIST_HEAD(&dmaengine_buffer->active);
197 	dmaengine_buffer->chan = chan;
198 	dmaengine_buffer->align = width;
199 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
200 
201 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
202 		&iio_dmaengine_default_ops);
203 
204 	dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
205 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
206 
207 	return &dmaengine_buffer->queue.buffer;
208 
209 err_free:
210 	kfree(dmaengine_buffer);
211 	return ERR_PTR(ret);
212 }
213 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_alloc, IIO_DMAENGINE_BUFFER);
214 
215 /**
216  * iio_dmaengine_buffer_free() - Free dmaengine buffer
217  * @buffer: Buffer to free
218  *
219  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
220  */
221 void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
222 {
223 	struct dmaengine_buffer *dmaengine_buffer =
224 		iio_buffer_to_dmaengine_buffer(buffer);
225 
226 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
227 	dma_release_channel(dmaengine_buffer->chan);
228 
229 	iio_buffer_put(buffer);
230 }
231 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
232 
233 static void __devm_iio_dmaengine_buffer_free(void *buffer)
234 {
235 	iio_dmaengine_buffer_free(buffer);
236 }
237 
238 /**
239  * devm_iio_dmaengine_buffer_alloc() - Resource-managed iio_dmaengine_buffer_alloc()
240  * @dev: Parent device for the buffer
241  * @channel: DMA channel name, typically "rx".
242  *
243  * This allocates a new IIO buffer which internally uses the DMAengine framework
244  * to perform its transfers. The parent device will be used to request the DMA
245  * channel.
246  *
247  * The buffer will be automatically de-allocated once the device gets destroyed.
248  */
249 static struct iio_buffer *devm_iio_dmaengine_buffer_alloc(struct device *dev,
250 	const char *channel)
251 {
252 	struct iio_buffer *buffer;
253 	int ret;
254 
255 	buffer = iio_dmaengine_buffer_alloc(dev, channel);
256 	if (IS_ERR(buffer))
257 		return buffer;
258 
259 	ret = devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
260 				       buffer);
261 	if (ret)
262 		return ERR_PTR(ret);
263 
264 	return buffer;
265 }
266 
267 /**
268  * devm_iio_dmaengine_buffer_setup() - Setup a DMA buffer for an IIO device
269  * @dev: Parent device for the buffer
270  * @indio_dev: IIO device to which to attach this buffer.
271  * @channel: DMA channel name, typically "rx".
272  *
273  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
274  * and attaches it to an IIO device with iio_device_attach_buffer().
275  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
276  * IIO device.
277  */
278 int devm_iio_dmaengine_buffer_setup(struct device *dev,
279 				    struct iio_dev *indio_dev,
280 				    const char *channel)
281 {
282 	struct iio_buffer *buffer;
283 
284 	buffer = devm_iio_dmaengine_buffer_alloc(dev, channel);
285 	if (IS_ERR(buffer))
286 		return PTR_ERR(buffer);
287 
288 	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
289 
290 	return iio_device_attach_buffer(indio_dev, buffer);
291 }
292 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup, IIO_DMAENGINE_BUFFER);
293 
294 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
295 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
296 MODULE_LICENSE("GPL");
297