xref: /linux/drivers/dma/imx-dma.c (revision b43ab901d671e3e3cad425ea5e9a3c74e266dcdd)
1 /*
2  * drivers/dma/imx-dma.c
3  *
4  * This file contains a driver for the Freescale i.MX DMA engine
5  * found on i.MX1/21/27
6  *
7  * Copyright 2010 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
8  *
9  * The code contained herein is licensed under the GNU General Public
10  * License. You may obtain a copy of the GNU General Public License
11  * Version 2 or later at the following locations:
12  *
13  * http://www.opensource.org/licenses/gpl-license.html
14  * http://www.gnu.org/copyleft/gpl.html
15  */
16 #include <linux/init.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/mm.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/device.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/slab.h>
25 #include <linux/platform_device.h>
26 #include <linux/dmaengine.h>
27 #include <linux/module.h>
28 
29 #include <asm/irq.h>
30 #include <mach/dma-v1.h>
31 #include <mach/hardware.h>
32 
33 struct imxdma_channel {
34 	struct imxdma_engine		*imxdma;
35 	unsigned int			channel;
36 	unsigned int			imxdma_channel;
37 
38 	enum dma_slave_buswidth		word_size;
39 	dma_addr_t			per_address;
40 	u32				watermark_level;
41 	struct dma_chan			chan;
42 	spinlock_t			lock;
43 	struct dma_async_tx_descriptor	desc;
44 	dma_cookie_t			last_completed;
45 	enum dma_status			status;
46 	int				dma_request;
47 	struct scatterlist		*sg_list;
48 };
49 
50 #define MAX_DMA_CHANNELS 8
51 
52 struct imxdma_engine {
53 	struct device			*dev;
54 	struct device_dma_parameters	dma_parms;
55 	struct dma_device		dma_device;
56 	struct imxdma_channel		channel[MAX_DMA_CHANNELS];
57 };
58 
59 static struct imxdma_channel *to_imxdma_chan(struct dma_chan *chan)
60 {
61 	return container_of(chan, struct imxdma_channel, chan);
62 }
63 
64 static void imxdma_handle(struct imxdma_channel *imxdmac)
65 {
66 	if (imxdmac->desc.callback)
67 		imxdmac->desc.callback(imxdmac->desc.callback_param);
68 	imxdmac->last_completed = imxdmac->desc.cookie;
69 }
70 
71 static void imxdma_irq_handler(int channel, void *data)
72 {
73 	struct imxdma_channel *imxdmac = data;
74 
75 	imxdmac->status = DMA_SUCCESS;
76 	imxdma_handle(imxdmac);
77 }
78 
79 static void imxdma_err_handler(int channel, void *data, int error)
80 {
81 	struct imxdma_channel *imxdmac = data;
82 
83 	imxdmac->status = DMA_ERROR;
84 	imxdma_handle(imxdmac);
85 }
86 
87 static void imxdma_progression(int channel, void *data,
88 		struct scatterlist *sg)
89 {
90 	struct imxdma_channel *imxdmac = data;
91 
92 	imxdmac->status = DMA_SUCCESS;
93 	imxdma_handle(imxdmac);
94 }
95 
96 static int imxdma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
97 		unsigned long arg)
98 {
99 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
100 	struct dma_slave_config *dmaengine_cfg = (void *)arg;
101 	int ret;
102 	unsigned int mode = 0;
103 
104 	switch (cmd) {
105 	case DMA_TERMINATE_ALL:
106 		imxdmac->status = DMA_ERROR;
107 		imx_dma_disable(imxdmac->imxdma_channel);
108 		return 0;
109 	case DMA_SLAVE_CONFIG:
110 		if (dmaengine_cfg->direction == DMA_DEV_TO_MEM) {
111 			imxdmac->per_address = dmaengine_cfg->src_addr;
112 			imxdmac->watermark_level = dmaengine_cfg->src_maxburst;
113 			imxdmac->word_size = dmaengine_cfg->src_addr_width;
114 		} else {
115 			imxdmac->per_address = dmaengine_cfg->dst_addr;
116 			imxdmac->watermark_level = dmaengine_cfg->dst_maxburst;
117 			imxdmac->word_size = dmaengine_cfg->dst_addr_width;
118 		}
119 
120 		switch (imxdmac->word_size) {
121 		case DMA_SLAVE_BUSWIDTH_1_BYTE:
122 			mode = IMX_DMA_MEMSIZE_8;
123 			break;
124 		case DMA_SLAVE_BUSWIDTH_2_BYTES:
125 			mode = IMX_DMA_MEMSIZE_16;
126 			break;
127 		default:
128 		case DMA_SLAVE_BUSWIDTH_4_BYTES:
129 			mode = IMX_DMA_MEMSIZE_32;
130 			break;
131 		}
132 		ret = imx_dma_config_channel(imxdmac->imxdma_channel,
133 				mode | IMX_DMA_TYPE_FIFO,
134 				IMX_DMA_MEMSIZE_32 | IMX_DMA_TYPE_LINEAR,
135 				imxdmac->dma_request, 1);
136 
137 		if (ret)
138 			return ret;
139 
140 		imx_dma_config_burstlen(imxdmac->imxdma_channel,
141 				imxdmac->watermark_level * imxdmac->word_size);
142 
143 		return 0;
144 	default:
145 		return -ENOSYS;
146 	}
147 
148 	return -EINVAL;
149 }
150 
151 static enum dma_status imxdma_tx_status(struct dma_chan *chan,
152 					    dma_cookie_t cookie,
153 					    struct dma_tx_state *txstate)
154 {
155 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
156 	dma_cookie_t last_used;
157 	enum dma_status ret;
158 
159 	last_used = chan->cookie;
160 
161 	ret = dma_async_is_complete(cookie, imxdmac->last_completed, last_used);
162 	dma_set_tx_state(txstate, imxdmac->last_completed, last_used, 0);
163 
164 	return ret;
165 }
166 
167 static dma_cookie_t imxdma_assign_cookie(struct imxdma_channel *imxdma)
168 {
169 	dma_cookie_t cookie = imxdma->chan.cookie;
170 
171 	if (++cookie < 0)
172 		cookie = 1;
173 
174 	imxdma->chan.cookie = cookie;
175 	imxdma->desc.cookie = cookie;
176 
177 	return cookie;
178 }
179 
180 static dma_cookie_t imxdma_tx_submit(struct dma_async_tx_descriptor *tx)
181 {
182 	struct imxdma_channel *imxdmac = to_imxdma_chan(tx->chan);
183 	dma_cookie_t cookie;
184 
185 	spin_lock_irq(&imxdmac->lock);
186 
187 	cookie = imxdma_assign_cookie(imxdmac);
188 
189 	imx_dma_enable(imxdmac->imxdma_channel);
190 
191 	spin_unlock_irq(&imxdmac->lock);
192 
193 	return cookie;
194 }
195 
196 static int imxdma_alloc_chan_resources(struct dma_chan *chan)
197 {
198 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
199 	struct imx_dma_data *data = chan->private;
200 
201 	imxdmac->dma_request = data->dma_request;
202 
203 	dma_async_tx_descriptor_init(&imxdmac->desc, chan);
204 	imxdmac->desc.tx_submit = imxdma_tx_submit;
205 	/* txd.flags will be overwritten in prep funcs */
206 	imxdmac->desc.flags = DMA_CTRL_ACK;
207 
208 	imxdmac->status = DMA_SUCCESS;
209 
210 	return 0;
211 }
212 
213 static void imxdma_free_chan_resources(struct dma_chan *chan)
214 {
215 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
216 
217 	imx_dma_disable(imxdmac->imxdma_channel);
218 
219 	if (imxdmac->sg_list) {
220 		kfree(imxdmac->sg_list);
221 		imxdmac->sg_list = NULL;
222 	}
223 }
224 
225 static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
226 		struct dma_chan *chan, struct scatterlist *sgl,
227 		unsigned int sg_len, enum dma_transfer_direction direction,
228 		unsigned long flags)
229 {
230 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
231 	struct scatterlist *sg;
232 	int i, ret, dma_length = 0;
233 	unsigned int dmamode;
234 
235 	if (imxdmac->status == DMA_IN_PROGRESS)
236 		return NULL;
237 
238 	imxdmac->status = DMA_IN_PROGRESS;
239 
240 	for_each_sg(sgl, sg, sg_len, i) {
241 		dma_length += sg->length;
242 	}
243 
244 	if (direction == DMA_DEV_TO_MEM)
245 		dmamode = DMA_MODE_READ;
246 	else
247 		dmamode = DMA_MODE_WRITE;
248 
249 	switch (imxdmac->word_size) {
250 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
251 		if (sgl->length & 3 || sgl->dma_address & 3)
252 			return NULL;
253 		break;
254 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
255 		if (sgl->length & 1 || sgl->dma_address & 1)
256 			return NULL;
257 		break;
258 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
259 		break;
260 	default:
261 		return NULL;
262 	}
263 
264 	ret = imx_dma_setup_sg(imxdmac->imxdma_channel, sgl, sg_len,
265 		 dma_length, imxdmac->per_address, dmamode);
266 	if (ret)
267 		return NULL;
268 
269 	return &imxdmac->desc;
270 }
271 
272 static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
273 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
274 		size_t period_len, enum dma_transfer_direction direction)
275 {
276 	struct imxdma_channel *imxdmac = to_imxdma_chan(chan);
277 	struct imxdma_engine *imxdma = imxdmac->imxdma;
278 	int i, ret;
279 	unsigned int periods = buf_len / period_len;
280 	unsigned int dmamode;
281 
282 	dev_dbg(imxdma->dev, "%s channel: %d buf_len=%d period_len=%d\n",
283 			__func__, imxdmac->channel, buf_len, period_len);
284 
285 	if (imxdmac->status == DMA_IN_PROGRESS)
286 		return NULL;
287 	imxdmac->status = DMA_IN_PROGRESS;
288 
289 	ret = imx_dma_setup_progression_handler(imxdmac->imxdma_channel,
290 			imxdma_progression);
291 	if (ret) {
292 		dev_err(imxdma->dev, "Failed to setup the DMA handler\n");
293 		return NULL;
294 	}
295 
296 	if (imxdmac->sg_list)
297 		kfree(imxdmac->sg_list);
298 
299 	imxdmac->sg_list = kcalloc(periods + 1,
300 			sizeof(struct scatterlist), GFP_KERNEL);
301 	if (!imxdmac->sg_list)
302 		return NULL;
303 
304 	sg_init_table(imxdmac->sg_list, periods);
305 
306 	for (i = 0; i < periods; i++) {
307 		imxdmac->sg_list[i].page_link = 0;
308 		imxdmac->sg_list[i].offset = 0;
309 		imxdmac->sg_list[i].dma_address = dma_addr;
310 		imxdmac->sg_list[i].length = period_len;
311 		dma_addr += period_len;
312 	}
313 
314 	/* close the loop */
315 	imxdmac->sg_list[periods].offset = 0;
316 	imxdmac->sg_list[periods].length = 0;
317 	imxdmac->sg_list[periods].page_link =
318 		((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
319 
320 	if (direction == DMA_DEV_TO_MEM)
321 		dmamode = DMA_MODE_READ;
322 	else
323 		dmamode = DMA_MODE_WRITE;
324 
325 	ret = imx_dma_setup_sg(imxdmac->imxdma_channel, imxdmac->sg_list, periods,
326 		 IMX_DMA_LENGTH_LOOP, imxdmac->per_address, dmamode);
327 	if (ret)
328 		return NULL;
329 
330 	return &imxdmac->desc;
331 }
332 
333 static void imxdma_issue_pending(struct dma_chan *chan)
334 {
335 	/*
336 	 * Nothing to do. We only have a single descriptor
337 	 */
338 }
339 
340 static int __init imxdma_probe(struct platform_device *pdev)
341 {
342 	struct imxdma_engine *imxdma;
343 	int ret, i;
344 
345 	imxdma = kzalloc(sizeof(*imxdma), GFP_KERNEL);
346 	if (!imxdma)
347 		return -ENOMEM;
348 
349 	INIT_LIST_HEAD(&imxdma->dma_device.channels);
350 
351 	dma_cap_set(DMA_SLAVE, imxdma->dma_device.cap_mask);
352 	dma_cap_set(DMA_CYCLIC, imxdma->dma_device.cap_mask);
353 
354 	/* Initialize channel parameters */
355 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
356 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
357 
358 		imxdmac->imxdma_channel = imx_dma_request_by_prio("dmaengine",
359 				DMA_PRIO_MEDIUM);
360 		if ((int)imxdmac->channel < 0) {
361 			ret = -ENODEV;
362 			goto err_init;
363 		}
364 
365 		imx_dma_setup_handlers(imxdmac->imxdma_channel,
366 		       imxdma_irq_handler, imxdma_err_handler, imxdmac);
367 
368 		imxdmac->imxdma = imxdma;
369 		spin_lock_init(&imxdmac->lock);
370 
371 		imxdmac->chan.device = &imxdma->dma_device;
372 		imxdmac->channel = i;
373 
374 		/* Add the channel to the DMAC list */
375 		list_add_tail(&imxdmac->chan.device_node, &imxdma->dma_device.channels);
376 	}
377 
378 	imxdma->dev = &pdev->dev;
379 	imxdma->dma_device.dev = &pdev->dev;
380 
381 	imxdma->dma_device.device_alloc_chan_resources = imxdma_alloc_chan_resources;
382 	imxdma->dma_device.device_free_chan_resources = imxdma_free_chan_resources;
383 	imxdma->dma_device.device_tx_status = imxdma_tx_status;
384 	imxdma->dma_device.device_prep_slave_sg = imxdma_prep_slave_sg;
385 	imxdma->dma_device.device_prep_dma_cyclic = imxdma_prep_dma_cyclic;
386 	imxdma->dma_device.device_control = imxdma_control;
387 	imxdma->dma_device.device_issue_pending = imxdma_issue_pending;
388 
389 	platform_set_drvdata(pdev, imxdma);
390 
391 	imxdma->dma_device.dev->dma_parms = &imxdma->dma_parms;
392 	dma_set_max_seg_size(imxdma->dma_device.dev, 0xffffff);
393 
394 	ret = dma_async_device_register(&imxdma->dma_device);
395 	if (ret) {
396 		dev_err(&pdev->dev, "unable to register\n");
397 		goto err_init;
398 	}
399 
400 	return 0;
401 
402 err_init:
403 	while (--i >= 0) {
404 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
405 		imx_dma_free(imxdmac->imxdma_channel);
406 	}
407 
408 	kfree(imxdma);
409 	return ret;
410 }
411 
412 static int __exit imxdma_remove(struct platform_device *pdev)
413 {
414 	struct imxdma_engine *imxdma = platform_get_drvdata(pdev);
415 	int i;
416 
417         dma_async_device_unregister(&imxdma->dma_device);
418 
419 	for (i = 0; i < MAX_DMA_CHANNELS; i++) {
420 		struct imxdma_channel *imxdmac = &imxdma->channel[i];
421 
422 		 imx_dma_free(imxdmac->imxdma_channel);
423 	}
424 
425         kfree(imxdma);
426 
427         return 0;
428 }
429 
430 static struct platform_driver imxdma_driver = {
431 	.driver		= {
432 		.name	= "imx-dma",
433 	},
434 	.remove		= __exit_p(imxdma_remove),
435 };
436 
437 static int __init imxdma_module_init(void)
438 {
439 	return platform_driver_probe(&imxdma_driver, imxdma_probe);
440 }
441 subsys_initcall(imxdma_module_init);
442 
443 MODULE_AUTHOR("Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>");
444 MODULE_DESCRIPTION("i.MX dma driver");
445 MODULE_LICENSE("GPL");
446