xref: /linux/drivers/iio/buffer/industrialio-buffer-dmaengine.c (revision c5288cda69ee2d8607f5026bd599a5cebf0ee783)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2014-2015 Analog Devices Inc.
4  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/dmaengine.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/spinlock.h>
12 #include <linux/err.h>
13 #include <linux/module.h>
14 
15 #include <linux/iio/iio.h>
16 #include <linux/iio/sysfs.h>
17 #include <linux/iio/buffer.h>
18 #include <linux/iio/buffer_impl.h>
19 #include <linux/iio/buffer-dma.h>
20 #include <linux/iio/buffer-dmaengine.h>
21 
22 /*
23  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
24  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
25  * used to manage the buffer memory and implement the IIO buffer operations
26  * while the DMAengine framework is used to perform the DMA transfers. Combined
27  * this results in a device independent fully functional DMA buffer
28  * implementation that can be used by device drivers for peripherals which are
29  * connected to a DMA controller which has a DMAengine driver implementation.
30  */
31 
32 struct dmaengine_buffer {
33 	struct iio_dma_buffer_queue queue;
34 
35 	struct dma_chan *chan;
36 	struct list_head active;
37 
38 	size_t align;
39 	size_t max_size;
40 };
41 
42 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(
43 		struct iio_buffer *buffer)
44 {
45 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46 }
47 
48 static void iio_dmaengine_buffer_block_done(void *data,
49 		const struct dmaengine_result *result)
50 {
51 	struct iio_dma_buffer_block *block = data;
52 	unsigned long flags;
53 
54 	spin_lock_irqsave(&block->queue->list_lock, flags);
55 	list_del(&block->head);
56 	spin_unlock_irqrestore(&block->queue->list_lock, flags);
57 	block->bytes_used -= result->residue;
58 	iio_dma_buffer_block_done(block);
59 }
60 
61 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
62 	struct iio_dma_buffer_block *block)
63 {
64 	struct dmaengine_buffer *dmaengine_buffer =
65 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
66 	struct dma_async_tx_descriptor *desc;
67 	enum dma_transfer_direction dma_dir;
68 	size_t max_size;
69 	dma_cookie_t cookie;
70 
71 	max_size = min(block->size, dmaengine_buffer->max_size);
72 	max_size = round_down(max_size, dmaengine_buffer->align);
73 
74 	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN) {
75 		block->bytes_used = max_size;
76 		dma_dir = DMA_DEV_TO_MEM;
77 	} else {
78 		dma_dir = DMA_MEM_TO_DEV;
79 	}
80 
81 	if (!block->bytes_used || block->bytes_used > max_size)
82 		return -EINVAL;
83 
84 	desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
85 		block->phys_addr, block->bytes_used, dma_dir,
86 		DMA_PREP_INTERRUPT);
87 	if (!desc)
88 		return -ENOMEM;
89 
90 	desc->callback_result = iio_dmaengine_buffer_block_done;
91 	desc->callback_param = block;
92 
93 	cookie = dmaengine_submit(desc);
94 	if (dma_submit_error(cookie))
95 		return dma_submit_error(cookie);
96 
97 	spin_lock_irq(&dmaengine_buffer->queue.list_lock);
98 	list_add_tail(&block->head, &dmaengine_buffer->active);
99 	spin_unlock_irq(&dmaengine_buffer->queue.list_lock);
100 
101 	dma_async_issue_pending(dmaengine_buffer->chan);
102 
103 	return 0;
104 }
105 
106 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
107 {
108 	struct dmaengine_buffer *dmaengine_buffer =
109 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
110 
111 	dmaengine_terminate_sync(dmaengine_buffer->chan);
112 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
113 }
114 
115 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
116 {
117 	struct dmaengine_buffer *dmaengine_buffer =
118 		iio_buffer_to_dmaengine_buffer(buf);
119 
120 	iio_dma_buffer_release(&dmaengine_buffer->queue);
121 	kfree(dmaengine_buffer);
122 }
123 
124 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
125 	.read = iio_dma_buffer_read,
126 	.write = iio_dma_buffer_write,
127 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
128 	.set_length = iio_dma_buffer_set_length,
129 	.request_update = iio_dma_buffer_request_update,
130 	.enable = iio_dma_buffer_enable,
131 	.disable = iio_dma_buffer_disable,
132 	.data_available = iio_dma_buffer_usage,
133 	.space_available = iio_dma_buffer_usage,
134 	.release = iio_dmaengine_buffer_release,
135 
136 	.modes = INDIO_BUFFER_HARDWARE,
137 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
138 };
139 
140 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
141 	.submit = iio_dmaengine_buffer_submit_block,
142 	.abort = iio_dmaengine_buffer_abort,
143 };
144 
145 static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
146 	struct device_attribute *attr, char *buf)
147 {
148 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
149 	struct dmaengine_buffer *dmaengine_buffer =
150 		iio_buffer_to_dmaengine_buffer(buffer);
151 
152 	return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
153 }
154 
155 static IIO_DEVICE_ATTR(length_align_bytes, 0444,
156 		       iio_dmaengine_buffer_get_length_align, NULL, 0);
157 
158 static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
159 	&iio_dev_attr_length_align_bytes,
160 	NULL,
161 };
162 
163 /**
164  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
165  * @dev: Parent device for the buffer
166  * @channel: DMA channel name, typically "rx".
167  *
168  * This allocates a new IIO buffer which internally uses the DMAengine framework
169  * to perform its transfers. The parent device will be used to request the DMA
170  * channel.
171  *
172  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
173  * release it.
174  */
175 static struct iio_buffer *iio_dmaengine_buffer_alloc(struct device *dev,
176 	const char *channel)
177 {
178 	struct dmaengine_buffer *dmaengine_buffer;
179 	unsigned int width, src_width, dest_width;
180 	struct dma_slave_caps caps;
181 	struct dma_chan *chan;
182 	int ret;
183 
184 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
185 	if (!dmaengine_buffer)
186 		return ERR_PTR(-ENOMEM);
187 
188 	chan = dma_request_chan(dev, channel);
189 	if (IS_ERR(chan)) {
190 		ret = PTR_ERR(chan);
191 		goto err_free;
192 	}
193 
194 	ret = dma_get_slave_caps(chan, &caps);
195 	if (ret < 0)
196 		goto err_free;
197 
198 	/* Needs to be aligned to the maximum of the minimums */
199 	if (caps.src_addr_widths)
200 		src_width = __ffs(caps.src_addr_widths);
201 	else
202 		src_width = 1;
203 	if (caps.dst_addr_widths)
204 		dest_width = __ffs(caps.dst_addr_widths);
205 	else
206 		dest_width = 1;
207 	width = max(src_width, dest_width);
208 
209 	INIT_LIST_HEAD(&dmaengine_buffer->active);
210 	dmaengine_buffer->chan = chan;
211 	dmaengine_buffer->align = width;
212 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
213 
214 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
215 		&iio_dmaengine_default_ops);
216 
217 	dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
218 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
219 
220 	return &dmaengine_buffer->queue.buffer;
221 
222 err_free:
223 	kfree(dmaengine_buffer);
224 	return ERR_PTR(ret);
225 }
226 
227 /**
228  * iio_dmaengine_buffer_free() - Free dmaengine buffer
229  * @buffer: Buffer to free
230  *
231  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
232  */
233 void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
234 {
235 	struct dmaengine_buffer *dmaengine_buffer =
236 		iio_buffer_to_dmaengine_buffer(buffer);
237 
238 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
239 	dma_release_channel(dmaengine_buffer->chan);
240 
241 	iio_buffer_put(buffer);
242 }
243 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_free, IIO_DMAENGINE_BUFFER);
244 
245 struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
246 						  struct iio_dev *indio_dev,
247 						  const char *channel,
248 						  enum iio_buffer_direction dir)
249 {
250 	struct iio_buffer *buffer;
251 	int ret;
252 
253 	buffer = iio_dmaengine_buffer_alloc(dev, channel);
254 	if (IS_ERR(buffer))
255 		return ERR_CAST(buffer);
256 
257 	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
258 
259 	buffer->direction = dir;
260 
261 	ret = iio_device_attach_buffer(indio_dev, buffer);
262 	if (ret) {
263 		iio_dmaengine_buffer_free(buffer);
264 		return ERR_PTR(ret);
265 	}
266 
267 	return buffer;
268 }
269 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
270 
271 static void __devm_iio_dmaengine_buffer_free(void *buffer)
272 {
273 	iio_dmaengine_buffer_free(buffer);
274 }
275 
276 /**
277  * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
278  * @dev: Parent device for the buffer
279  * @indio_dev: IIO device to which to attach this buffer.
280  * @channel: DMA channel name, typically "rx".
281  * @dir: Direction of buffer (in or out)
282  *
283  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
284  * and attaches it to an IIO device with iio_device_attach_buffer().
285  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
286  * IIO device.
287  */
288 int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
289 					struct iio_dev *indio_dev,
290 					const char *channel,
291 					enum iio_buffer_direction dir)
292 {
293 	struct iio_buffer *buffer;
294 
295 	buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
296 	if (IS_ERR(buffer))
297 		return PTR_ERR(buffer);
298 
299 	return devm_add_action_or_reset(dev, __devm_iio_dmaengine_buffer_free,
300 					buffer);
301 }
302 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, IIO_DMAENGINE_BUFFER);
303 
304 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
305 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
306 MODULE_LICENSE("GPL");
307