xref: /linux/drivers/iio/buffer/industrialio-buffer-dmaengine.c (revision 23b0f90ba871f096474e1c27c3d14f455189d2d9)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright 2014-2015 Analog Devices Inc.
4  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5  */
6 
7 #include <linux/slab.h>
8 #include <linux/kernel.h>
9 #include <linux/cleanup.h>
10 #include <linux/dmaengine.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/spinlock.h>
13 #include <linux/err.h>
14 #include <linux/module.h>
15 
16 #include <linux/iio/iio.h>
17 #include <linux/iio/sysfs.h>
18 #include <linux/iio/buffer.h>
19 #include <linux/iio/buffer_impl.h>
20 #include <linux/iio/buffer-dma.h>
21 #include <linux/iio/buffer-dmaengine.h>
22 
23 /*
24  * The IIO DMAengine buffer combines the generic IIO DMA buffer infrastructure
25  * with the DMAengine framework. The generic IIO DMA buffer infrastructure is
26  * used to manage the buffer memory and implement the IIO buffer operations
27  * while the DMAengine framework is used to perform the DMA transfers. Combined
28  * this results in a device independent fully functional DMA buffer
29  * implementation that can be used by device drivers for peripherals which are
30  * connected to a DMA controller which has a DMAengine driver implementation.
31  */
32 
33 struct dmaengine_buffer {
34 	struct iio_dma_buffer_queue queue;
35 
36 	struct dma_chan *chan;
37 	struct list_head active;
38 
39 	size_t align;
40 	size_t max_size;
41 };
42 
43 static struct dmaengine_buffer *iio_buffer_to_dmaengine_buffer(struct iio_buffer *buffer)
44 {
45 	return container_of(buffer, struct dmaengine_buffer, queue.buffer);
46 }
47 
48 static void iio_dmaengine_buffer_block_done(void *data,
49 					    const struct dmaengine_result *result)
50 {
51 	struct iio_dma_buffer_block *block = data;
52 
53 	scoped_guard(spinlock_irqsave, &block->queue->list_lock)
54 		list_del(&block->head);
55 	block->bytes_used -= result->residue;
56 	iio_dma_buffer_block_done(block);
57 }
58 
59 static int iio_dmaengine_buffer_submit_block(struct iio_dma_buffer_queue *queue,
60 					     struct iio_dma_buffer_block *block)
61 {
62 	struct dmaengine_buffer *dmaengine_buffer =
63 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
64 	struct dma_async_tx_descriptor *desc;
65 	enum dma_transfer_direction dma_dir;
66 	struct scatterlist *sgl;
67 	struct dma_vec *vecs;
68 	size_t max_size;
69 	dma_cookie_t cookie;
70 	size_t len_total;
71 	unsigned int i;
72 	int nents;
73 
74 	max_size = min(block->size, dmaengine_buffer->max_size);
75 	max_size = round_down(max_size, dmaengine_buffer->align);
76 
77 	if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
78 		dma_dir = DMA_DEV_TO_MEM;
79 	else
80 		dma_dir = DMA_MEM_TO_DEV;
81 
82 	if (block->sg_table) {
83 		sgl = block->sg_table->sgl;
84 		nents = sg_nents_for_len(sgl, block->bytes_used);
85 		if (nents < 0)
86 			return nents;
87 
88 		vecs = kmalloc_array(nents, sizeof(*vecs), GFP_ATOMIC);
89 		if (!vecs)
90 			return -ENOMEM;
91 
92 		len_total = block->bytes_used;
93 
94 		for (i = 0; i < nents; i++) {
95 			vecs[i].addr = sg_dma_address(sgl);
96 			vecs[i].len = min(sg_dma_len(sgl), len_total);
97 			len_total -= vecs[i].len;
98 
99 			sgl = sg_next(sgl);
100 		}
101 
102 		desc = dmaengine_prep_peripheral_dma_vec(dmaengine_buffer->chan,
103 							 vecs, nents, dma_dir,
104 							 DMA_PREP_INTERRUPT);
105 		kfree(vecs);
106 	} else {
107 		max_size = min(block->size, dmaengine_buffer->max_size);
108 		max_size = round_down(max_size, dmaengine_buffer->align);
109 
110 		if (queue->buffer.direction == IIO_BUFFER_DIRECTION_IN)
111 			block->bytes_used = max_size;
112 
113 		if (!block->bytes_used || block->bytes_used > max_size)
114 			return -EINVAL;
115 
116 		desc = dmaengine_prep_slave_single(dmaengine_buffer->chan,
117 						   block->phys_addr,
118 						   block->bytes_used,
119 						   dma_dir,
120 						   DMA_PREP_INTERRUPT);
121 	}
122 	if (!desc)
123 		return -ENOMEM;
124 
125 	desc->callback_result = iio_dmaengine_buffer_block_done;
126 	desc->callback_param = block;
127 
128 	cookie = dmaengine_submit(desc);
129 	if (dma_submit_error(cookie))
130 		return dma_submit_error(cookie);
131 
132 	scoped_guard(spinlock_irq, &dmaengine_buffer->queue.list_lock)
133 		list_add_tail(&block->head, &dmaengine_buffer->active);
134 
135 	dma_async_issue_pending(dmaengine_buffer->chan);
136 
137 	return 0;
138 }
139 
140 static void iio_dmaengine_buffer_abort(struct iio_dma_buffer_queue *queue)
141 {
142 	struct dmaengine_buffer *dmaengine_buffer =
143 		iio_buffer_to_dmaengine_buffer(&queue->buffer);
144 
145 	dmaengine_terminate_sync(dmaengine_buffer->chan);
146 	iio_dma_buffer_block_list_abort(queue, &dmaengine_buffer->active);
147 }
148 
149 static void iio_dmaengine_buffer_release(struct iio_buffer *buf)
150 {
151 	struct dmaengine_buffer *dmaengine_buffer =
152 		iio_buffer_to_dmaengine_buffer(buf);
153 
154 	iio_dma_buffer_release(&dmaengine_buffer->queue);
155 	kfree(dmaengine_buffer);
156 }
157 
158 static const struct iio_buffer_access_funcs iio_dmaengine_buffer_ops = {
159 	.read = iio_dma_buffer_read,
160 	.write = iio_dma_buffer_write,
161 	.set_bytes_per_datum = iio_dma_buffer_set_bytes_per_datum,
162 	.set_length = iio_dma_buffer_set_length,
163 	.request_update = iio_dma_buffer_request_update,
164 	.enable = iio_dma_buffer_enable,
165 	.disable = iio_dma_buffer_disable,
166 	.data_available = iio_dma_buffer_usage,
167 	.space_available = iio_dma_buffer_usage,
168 	.release = iio_dmaengine_buffer_release,
169 
170 	.enqueue_dmabuf = iio_dma_buffer_enqueue_dmabuf,
171 	.attach_dmabuf = iio_dma_buffer_attach_dmabuf,
172 	.detach_dmabuf = iio_dma_buffer_detach_dmabuf,
173 
174 	.lock_queue = iio_dma_buffer_lock_queue,
175 	.unlock_queue = iio_dma_buffer_unlock_queue,
176 
177 	.get_dma_dev = iio_dma_buffer_get_dma_dev,
178 
179 	.modes = INDIO_BUFFER_HARDWARE,
180 	.flags = INDIO_BUFFER_FLAG_FIXED_WATERMARK,
181 };
182 
183 static const struct iio_dma_buffer_ops iio_dmaengine_default_ops = {
184 	.submit = iio_dmaengine_buffer_submit_block,
185 	.abort = iio_dmaengine_buffer_abort,
186 };
187 
188 static ssize_t iio_dmaengine_buffer_get_length_align(struct device *dev,
189 						     struct device_attribute *attr, char *buf)
190 {
191 	struct iio_buffer *buffer = to_iio_dev_attr(attr)->buffer;
192 	struct dmaengine_buffer *dmaengine_buffer =
193 		iio_buffer_to_dmaengine_buffer(buffer);
194 
195 	return sysfs_emit(buf, "%zu\n", dmaengine_buffer->align);
196 }
197 
198 static IIO_DEVICE_ATTR(length_align_bytes, 0444,
199 		       iio_dmaengine_buffer_get_length_align, NULL, 0);
200 
201 static const struct iio_dev_attr *iio_dmaengine_buffer_attrs[] = {
202 	&iio_dev_attr_length_align_bytes,
203 	NULL,
204 };
205 
206 /**
207  * iio_dmaengine_buffer_alloc() - Allocate new buffer which uses DMAengine
208  * @chan: DMA channel.
209  *
210  * This allocates a new IIO buffer which internally uses the DMAengine framework
211  * to perform its transfers.
212  *
213  * Once done using the buffer iio_dmaengine_buffer_free() should be used to
214  * release it.
215  */
216 static struct iio_buffer *iio_dmaengine_buffer_alloc(struct dma_chan *chan)
217 {
218 	struct dmaengine_buffer *dmaengine_buffer;
219 	unsigned int width, src_width, dest_width;
220 	struct dma_slave_caps caps;
221 	int ret;
222 
223 	ret = dma_get_slave_caps(chan, &caps);
224 	if (ret < 0)
225 		return ERR_PTR(ret);
226 
227 	dmaengine_buffer = kzalloc(sizeof(*dmaengine_buffer), GFP_KERNEL);
228 	if (!dmaengine_buffer)
229 		return ERR_PTR(-ENOMEM);
230 
231 	/* Needs to be aligned to the maximum of the minimums */
232 	if (caps.src_addr_widths)
233 		src_width = __ffs(caps.src_addr_widths);
234 	else
235 		src_width = 1;
236 	if (caps.dst_addr_widths)
237 		dest_width = __ffs(caps.dst_addr_widths);
238 	else
239 		dest_width = 1;
240 	width = max(src_width, dest_width);
241 
242 	INIT_LIST_HEAD(&dmaengine_buffer->active);
243 	dmaengine_buffer->chan = chan;
244 	dmaengine_buffer->align = width;
245 	dmaengine_buffer->max_size = dma_get_max_seg_size(chan->device->dev);
246 
247 	iio_dma_buffer_init(&dmaengine_buffer->queue, chan->device->dev,
248 			    &iio_dmaengine_default_ops);
249 
250 	dmaengine_buffer->queue.buffer.attrs = iio_dmaengine_buffer_attrs;
251 	dmaengine_buffer->queue.buffer.access = &iio_dmaengine_buffer_ops;
252 
253 	return &dmaengine_buffer->queue.buffer;
254 }
255 
256 /**
257  * iio_dmaengine_buffer_free() - Free dmaengine buffer
258  * @buffer: Buffer to free
259  *
260  * Frees a buffer previously allocated with iio_dmaengine_buffer_alloc().
261  */
262 static void iio_dmaengine_buffer_free(struct iio_buffer *buffer)
263 {
264 	struct dmaengine_buffer *dmaengine_buffer =
265 		iio_buffer_to_dmaengine_buffer(buffer);
266 
267 	iio_dma_buffer_exit(&dmaengine_buffer->queue);
268 	iio_buffer_put(buffer);
269 }
270 
271 /**
272  * iio_dmaengine_buffer_teardown() - Releases DMA channel and frees buffer
273  * @buffer: Buffer to free
274  *
275  * Releases the DMA channel and frees the buffer previously setup with
276  * iio_dmaengine_buffer_setup_ext().
277  */
278 void iio_dmaengine_buffer_teardown(struct iio_buffer *buffer)
279 {
280 	struct dmaengine_buffer *dmaengine_buffer =
281 		iio_buffer_to_dmaengine_buffer(buffer);
282 	struct dma_chan *chan = dmaengine_buffer->chan;
283 
284 	iio_dmaengine_buffer_free(buffer);
285 	dma_release_channel(chan);
286 }
287 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_teardown, "IIO_DMAENGINE_BUFFER");
288 
289 static struct iio_buffer
290 *__iio_dmaengine_buffer_setup_ext(struct iio_dev *indio_dev,
291 				  struct dma_chan *chan,
292 				  enum iio_buffer_direction dir)
293 {
294 	struct iio_buffer *buffer;
295 	int ret;
296 
297 	buffer = iio_dmaengine_buffer_alloc(chan);
298 	if (IS_ERR(buffer))
299 		return ERR_CAST(buffer);
300 
301 	indio_dev->modes |= INDIO_BUFFER_HARDWARE;
302 
303 	buffer->direction = dir;
304 
305 	ret = iio_device_attach_buffer(indio_dev, buffer);
306 	if (ret) {
307 		iio_dmaengine_buffer_free(buffer);
308 		return ERR_PTR(ret);
309 	}
310 
311 	return buffer;
312 }
313 
314 /**
315  * iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
316  * @dev: DMA channel consumer device
317  * @indio_dev: IIO device to which to attach this buffer.
318  * @channel: DMA channel name, typically "rx".
319  * @dir: Direction of buffer (in or out)
320  *
321  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
322  * and attaches it to an IIO device with iio_device_attach_buffer().
323  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
324  * IIO device.
325  *
326  * Once done using the buffer iio_dmaengine_buffer_teardown() should be used to
327  * release it.
328  */
329 struct iio_buffer *iio_dmaengine_buffer_setup_ext(struct device *dev,
330 						  struct iio_dev *indio_dev,
331 						  const char *channel,
332 						  enum iio_buffer_direction dir)
333 {
334 	struct dma_chan *chan;
335 	struct iio_buffer *buffer;
336 
337 	chan = dma_request_chan(dev, channel);
338 	if (IS_ERR(chan))
339 		return ERR_CAST(chan);
340 
341 	buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
342 	if (IS_ERR(buffer))
343 		dma_release_channel(chan);
344 
345 	return buffer;
346 }
347 EXPORT_SYMBOL_NS_GPL(iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
348 
349 static void devm_iio_dmaengine_buffer_teardown(void *buffer)
350 {
351 	iio_dmaengine_buffer_teardown(buffer);
352 }
353 
354 /**
355  * devm_iio_dmaengine_buffer_setup_ext() - Setup a DMA buffer for an IIO device
356  * @dev: Device for devm ownership and DMA channel consumer device
357  * @indio_dev: IIO device to which to attach this buffer.
358  * @channel: DMA channel name, typically "rx".
359  * @dir: Direction of buffer (in or out)
360  *
361  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
362  * and attaches it to an IIO device with iio_device_attach_buffer().
363  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
364  * IIO device.
365  */
366 int devm_iio_dmaengine_buffer_setup_ext(struct device *dev,
367 					struct iio_dev *indio_dev,
368 					const char *channel,
369 					enum iio_buffer_direction dir)
370 {
371 	struct iio_buffer *buffer;
372 
373 	buffer = iio_dmaengine_buffer_setup_ext(dev, indio_dev, channel, dir);
374 	if (IS_ERR(buffer))
375 		return PTR_ERR(buffer);
376 
377 	return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_teardown,
378 					buffer);
379 }
380 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_ext, "IIO_DMAENGINE_BUFFER");
381 
382 static void devm_iio_dmaengine_buffer_free(void *buffer)
383 {
384 	iio_dmaengine_buffer_free(buffer);
385 }
386 
387 /**
388  * devm_iio_dmaengine_buffer_setup_with_handle() - Setup a DMA buffer for an
389  *						   IIO device
390  * @dev: Device for devm ownership
391  * @indio_dev: IIO device to which to attach this buffer.
392  * @chan: DMA channel
393  * @dir: Direction of buffer (in or out)
394  *
395  * This allocates a new IIO buffer with devm_iio_dmaengine_buffer_alloc()
396  * and attaches it to an IIO device with iio_device_attach_buffer().
397  * It also appends the INDIO_BUFFER_HARDWARE mode to the supported modes of the
398  * IIO device.
399  *
400  * This is the same as devm_iio_dmaengine_buffer_setup_ext() except that the
401  * caller manages requesting and releasing the DMA channel handle.
402  */
403 int devm_iio_dmaengine_buffer_setup_with_handle(struct device *dev,
404 						struct iio_dev *indio_dev,
405 						struct dma_chan *chan,
406 						enum iio_buffer_direction dir)
407 {
408 	struct iio_buffer *buffer;
409 
410 	buffer = __iio_dmaengine_buffer_setup_ext(indio_dev, chan, dir);
411 	if (IS_ERR(buffer))
412 		return PTR_ERR(buffer);
413 
414 	return devm_add_action_or_reset(dev, devm_iio_dmaengine_buffer_free,
415 					buffer);
416 }
417 EXPORT_SYMBOL_NS_GPL(devm_iio_dmaengine_buffer_setup_with_handle,
418 		     "IIO_DMAENGINE_BUFFER");
419 
420 MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
421 MODULE_DESCRIPTION("DMA buffer for the IIO framework");
422 MODULE_LICENSE("GPL");
423 MODULE_IMPORT_NS("IIO_DMA_BUFFER");
424