xref: /linux/drivers/dma/mpc512x_dma.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2  * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3  * Copyright (C) Semihalf 2009
4  * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5  * Copyright (C) Alexander Popov, Promcontroller 2014
6  * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
7  *
8  * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
9  * (defines, structures and comments) was taken from MPC5121 DMA driver
10  * written by Hongjun Chen <hong-jun.chen@freescale.com>.
11  *
12  * Approved as OSADL project by a majority of OSADL members and funded
13  * by OSADL membership fees in 2009;  for details see www.osadl.org.
14  *
15  * This program is free software; you can redistribute it and/or modify it
16  * under the terms of the GNU General Public License as published by the Free
17  * Software Foundation; either version 2 of the License, or (at your option)
18  * any later version.
19  *
20  * This program is distributed in the hope that it will be useful, but WITHOUT
21  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
22  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
23  * more details.
24  *
25  * The full GNU General Public License is included in this distribution in the
26  * file called COPYING.
27  */
28 
29 /*
30  * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
31  * (tested using dmatest module) and data transfers between memory and
32  * peripheral I/O memory by means of slave scatter/gather with these
33  * limitations:
34  *  - chunked transfers (described by s/g lists with more than one item) are
35  *     refused as long as proper support for scatter/gather is missing
36  *  - transfers on MPC8308 always start from software as this SoC does not have
37  *     external request lines for peripheral flow control
38  *  - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
39  *     MPC512x), and 32 bytes are supported, and, consequently, source
40  *     addresses and destination addresses must be aligned accordingly;
41  *     furthermore, for MPC512x SoCs, the transfer size must be aligned on
42  *     (chunk size * maxburst)
43  */
44 
45 #include <linux/module.h>
46 #include <linux/dmaengine.h>
47 #include <linux/dma-mapping.h>
48 #include <linux/interrupt.h>
49 #include <linux/io.h>
50 #include <linux/slab.h>
51 #include <linux/of_address.h>
52 #include <linux/of_device.h>
53 #include <linux/of_irq.h>
54 #include <linux/of_dma.h>
55 #include <linux/of_platform.h>
56 
57 #include <linux/random.h>
58 
59 #include "dmaengine.h"
60 
61 /* Number of DMA Transfer descriptors allocated per channel */
62 #define MPC_DMA_DESCRIPTORS	64
63 
64 /* Macro definitions */
65 #define MPC_DMA_TCD_OFFSET	0x1000
66 
67 /*
68  * Maximum channel counts for individual hardware variants
69  * and the maximum channel count over all supported controllers,
70  * used for data structure size
71  */
72 #define MPC8308_DMACHAN_MAX	16
73 #define MPC512x_DMACHAN_MAX	64
74 #define MPC_DMA_CHANNELS	64
75 
76 /* Arbitration mode of group and channel */
77 #define MPC_DMA_DMACR_EDCG	(1 << 31)
78 #define MPC_DMA_DMACR_ERGA	(1 << 3)
79 #define MPC_DMA_DMACR_ERCA	(1 << 2)
80 
81 /* Error codes */
82 #define MPC_DMA_DMAES_VLD	(1 << 31)
83 #define MPC_DMA_DMAES_GPE	(1 << 15)
84 #define MPC_DMA_DMAES_CPE	(1 << 14)
85 #define MPC_DMA_DMAES_ERRCHN(err) \
86 				(((err) >> 8) & 0x3f)
87 #define MPC_DMA_DMAES_SAE	(1 << 7)
88 #define MPC_DMA_DMAES_SOE	(1 << 6)
89 #define MPC_DMA_DMAES_DAE	(1 << 5)
90 #define MPC_DMA_DMAES_DOE	(1 << 4)
91 #define MPC_DMA_DMAES_NCE	(1 << 3)
92 #define MPC_DMA_DMAES_SGE	(1 << 2)
93 #define MPC_DMA_DMAES_SBE	(1 << 1)
94 #define MPC_DMA_DMAES_DBE	(1 << 0)
95 
96 #define MPC_DMA_DMAGPOR_SNOOP_ENABLE	(1 << 6)
97 
98 #define MPC_DMA_TSIZE_1		0x00
99 #define MPC_DMA_TSIZE_2		0x01
100 #define MPC_DMA_TSIZE_4		0x02
101 #define MPC_DMA_TSIZE_16	0x04
102 #define MPC_DMA_TSIZE_32	0x05
103 
104 /* MPC5121 DMA engine registers */
105 struct __attribute__ ((__packed__)) mpc_dma_regs {
106 	/* 0x00 */
107 	u32 dmacr;		/* DMA control register */
108 	u32 dmaes;		/* DMA error status */
109 	/* 0x08 */
110 	u32 dmaerqh;		/* DMA enable request high(channels 63~32) */
111 	u32 dmaerql;		/* DMA enable request low(channels 31~0) */
112 	u32 dmaeeih;		/* DMA enable error interrupt high(ch63~32) */
113 	u32 dmaeeil;		/* DMA enable error interrupt low(ch31~0) */
114 	/* 0x18 */
115 	u8 dmaserq;		/* DMA set enable request */
116 	u8 dmacerq;		/* DMA clear enable request */
117 	u8 dmaseei;		/* DMA set enable error interrupt */
118 	u8 dmaceei;		/* DMA clear enable error interrupt */
119 	/* 0x1c */
120 	u8 dmacint;		/* DMA clear interrupt request */
121 	u8 dmacerr;		/* DMA clear error */
122 	u8 dmassrt;		/* DMA set start bit */
123 	u8 dmacdne;		/* DMA clear DONE status bit */
124 	/* 0x20 */
125 	u32 dmainth;		/* DMA interrupt request high(ch63~32) */
126 	u32 dmaintl;		/* DMA interrupt request low(ch31~0) */
127 	u32 dmaerrh;		/* DMA error high(ch63~32) */
128 	u32 dmaerrl;		/* DMA error low(ch31~0) */
129 	/* 0x30 */
130 	u32 dmahrsh;		/* DMA hw request status high(ch63~32) */
131 	u32 dmahrsl;		/* DMA hardware request status low(ch31~0) */
132 	union {
133 		u32 dmaihsa;	/* DMA interrupt high select AXE(ch63~32) */
134 		u32 dmagpor;	/* (General purpose register on MPC8308) */
135 	};
136 	u32 dmailsa;		/* DMA interrupt low select AXE(ch31~0) */
137 	/* 0x40 ~ 0xff */
138 	u32 reserve0[48];	/* Reserved */
139 	/* 0x100 */
140 	u8 dchpri[MPC_DMA_CHANNELS];
141 	/* DMA channels(0~63) priority */
142 };
143 
144 struct __attribute__ ((__packed__)) mpc_dma_tcd {
145 	/* 0x00 */
146 	u32 saddr;		/* Source address */
147 
148 	u32 smod:5;		/* Source address modulo */
149 	u32 ssize:3;		/* Source data transfer size */
150 	u32 dmod:5;		/* Destination address modulo */
151 	u32 dsize:3;		/* Destination data transfer size */
152 	u32 soff:16;		/* Signed source address offset */
153 
154 	/* 0x08 */
155 	u32 nbytes;		/* Inner "minor" byte count */
156 	u32 slast;		/* Last source address adjustment */
157 	u32 daddr;		/* Destination address */
158 
159 	/* 0x14 */
160 	u32 citer_elink:1;	/* Enable channel-to-channel linking on
161 				 * minor loop complete
162 				 */
163 	u32 citer_linkch:6;	/* Link channel for minor loop complete */
164 	u32 citer:9;		/* Current "major" iteration count */
165 	u32 doff:16;		/* Signed destination address offset */
166 
167 	/* 0x18 */
168 	u32 dlast_sga;		/* Last Destination address adjustment/scatter
169 				 * gather address
170 				 */
171 
172 	/* 0x1c */
173 	u32 biter_elink:1;	/* Enable channel-to-channel linking on major
174 				 * loop complete
175 				 */
176 	u32 biter_linkch:6;
177 	u32 biter:9;		/* Beginning "major" iteration count */
178 	u32 bwc:2;		/* Bandwidth control */
179 	u32 major_linkch:6;	/* Link channel number */
180 	u32 done:1;		/* Channel done */
181 	u32 active:1;		/* Channel active */
182 	u32 major_elink:1;	/* Enable channel-to-channel linking on major
183 				 * loop complete
184 				 */
185 	u32 e_sg:1;		/* Enable scatter/gather processing */
186 	u32 d_req:1;		/* Disable request */
187 	u32 int_half:1;		/* Enable an interrupt when major counter is
188 				 * half complete
189 				 */
190 	u32 int_maj:1;		/* Enable an interrupt when major iteration
191 				 * count completes
192 				 */
193 	u32 start:1;		/* Channel start */
194 };
195 
196 struct mpc_dma_desc {
197 	struct dma_async_tx_descriptor	desc;
198 	struct mpc_dma_tcd		*tcd;
199 	dma_addr_t			tcd_paddr;
200 	int				error;
201 	struct list_head		node;
202 	int				will_access_peripheral;
203 };
204 
205 struct mpc_dma_chan {
206 	struct dma_chan			chan;
207 	struct list_head		free;
208 	struct list_head		prepared;
209 	struct list_head		queued;
210 	struct list_head		active;
211 	struct list_head		completed;
212 	struct mpc_dma_tcd		*tcd;
213 	dma_addr_t			tcd_paddr;
214 
215 	/* Settings for access to peripheral FIFO */
216 	dma_addr_t			src_per_paddr;
217 	u32				src_tcd_nunits;
218 	u8				swidth;
219 	dma_addr_t			dst_per_paddr;
220 	u32				dst_tcd_nunits;
221 	u8				dwidth;
222 
223 	/* Lock for this structure */
224 	spinlock_t			lock;
225 };
226 
227 struct mpc_dma {
228 	struct dma_device		dma;
229 	struct tasklet_struct		tasklet;
230 	struct mpc_dma_chan		channels[MPC_DMA_CHANNELS];
231 	struct mpc_dma_regs __iomem	*regs;
232 	struct mpc_dma_tcd __iomem	*tcd;
233 	int				irq;
234 	int				irq2;
235 	uint				error_status;
236 	int				is_mpc8308;
237 
238 	/* Lock for error_status field in this structure */
239 	spinlock_t			error_status_lock;
240 };
241 
242 #define DRV_NAME	"mpc512x_dma"
243 
244 /* Convert struct dma_chan to struct mpc_dma_chan */
245 static inline struct mpc_dma_chan *dma_chan_to_mpc_dma_chan(struct dma_chan *c)
246 {
247 	return container_of(c, struct mpc_dma_chan, chan);
248 }
249 
250 /* Convert struct dma_chan to struct mpc_dma */
251 static inline struct mpc_dma *dma_chan_to_mpc_dma(struct dma_chan *c)
252 {
253 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(c);
254 
255 	return container_of(mchan, struct mpc_dma, channels[c->chan_id]);
256 }
257 
258 /*
259  * Execute all queued DMA descriptors.
260  *
261  * Following requirements must be met while calling mpc_dma_execute():
262  *	a) mchan->lock is acquired,
263  *	b) mchan->active list is empty,
264  *	c) mchan->queued list contains at least one entry.
265  */
266 static void mpc_dma_execute(struct mpc_dma_chan *mchan)
267 {
268 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
269 	struct mpc_dma_desc *first = NULL;
270 	struct mpc_dma_desc *prev = NULL;
271 	struct mpc_dma_desc *mdesc;
272 	int cid = mchan->chan.chan_id;
273 
274 	while (!list_empty(&mchan->queued)) {
275 		mdesc = list_first_entry(&mchan->queued,
276 						struct mpc_dma_desc, node);
277 		/*
278 		 * Grab either several mem-to-mem transfer descriptors
279 		 * or one peripheral transfer descriptor,
280 		 * don't mix mem-to-mem and peripheral transfer descriptors
281 		 * within the same 'active' list.
282 		 */
283 		if (mdesc->will_access_peripheral) {
284 			if (list_empty(&mchan->active))
285 				list_move_tail(&mdesc->node, &mchan->active);
286 			break;
287 		} else {
288 			list_move_tail(&mdesc->node, &mchan->active);
289 		}
290 	}
291 
292 	/* Chain descriptors into one transaction */
293 	list_for_each_entry(mdesc, &mchan->active, node) {
294 		if (!first)
295 			first = mdesc;
296 
297 		if (!prev) {
298 			prev = mdesc;
299 			continue;
300 		}
301 
302 		prev->tcd->dlast_sga = mdesc->tcd_paddr;
303 		prev->tcd->e_sg = 1;
304 		mdesc->tcd->start = 1;
305 
306 		prev = mdesc;
307 	}
308 
309 	prev->tcd->int_maj = 1;
310 
311 	/* Send first descriptor in chain into hardware */
312 	memcpy_toio(&mdma->tcd[cid], first->tcd, sizeof(struct mpc_dma_tcd));
313 
314 	if (first != prev)
315 		mdma->tcd[cid].e_sg = 1;
316 
317 	if (mdma->is_mpc8308) {
318 		/* MPC8308, no request lines, software initiated start */
319 		out_8(&mdma->regs->dmassrt, cid);
320 	} else if (first->will_access_peripheral) {
321 		/* Peripherals involved, start by external request signal */
322 		out_8(&mdma->regs->dmaserq, cid);
323 	} else {
324 		/* Memory to memory transfer, software initiated start */
325 		out_8(&mdma->regs->dmassrt, cid);
326 	}
327 }
328 
329 /* Handle interrupt on one half of DMA controller (32 channels) */
330 static void mpc_dma_irq_process(struct mpc_dma *mdma, u32 is, u32 es, int off)
331 {
332 	struct mpc_dma_chan *mchan;
333 	struct mpc_dma_desc *mdesc;
334 	u32 status = is | es;
335 	int ch;
336 
337 	while ((ch = fls(status) - 1) >= 0) {
338 		status &= ~(1 << ch);
339 		mchan = &mdma->channels[ch + off];
340 
341 		spin_lock(&mchan->lock);
342 
343 		out_8(&mdma->regs->dmacint, ch + off);
344 		out_8(&mdma->regs->dmacerr, ch + off);
345 
346 		/* Check error status */
347 		if (es & (1 << ch))
348 			list_for_each_entry(mdesc, &mchan->active, node)
349 				mdesc->error = -EIO;
350 
351 		/* Execute queued descriptors */
352 		list_splice_tail_init(&mchan->active, &mchan->completed);
353 		if (!list_empty(&mchan->queued))
354 			mpc_dma_execute(mchan);
355 
356 		spin_unlock(&mchan->lock);
357 	}
358 }
359 
360 /* Interrupt handler */
361 static irqreturn_t mpc_dma_irq(int irq, void *data)
362 {
363 	struct mpc_dma *mdma = data;
364 	uint es;
365 
366 	/* Save error status register */
367 	es = in_be32(&mdma->regs->dmaes);
368 	spin_lock(&mdma->error_status_lock);
369 	if ((es & MPC_DMA_DMAES_VLD) && mdma->error_status == 0)
370 		mdma->error_status = es;
371 	spin_unlock(&mdma->error_status_lock);
372 
373 	/* Handle interrupt on each channel */
374 	if (mdma->dma.chancnt > 32) {
375 		mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmainth),
376 					in_be32(&mdma->regs->dmaerrh), 32);
377 	}
378 	mpc_dma_irq_process(mdma, in_be32(&mdma->regs->dmaintl),
379 					in_be32(&mdma->regs->dmaerrl), 0);
380 
381 	/* Schedule tasklet */
382 	tasklet_schedule(&mdma->tasklet);
383 
384 	return IRQ_HANDLED;
385 }
386 
387 /* process completed descriptors */
388 static void mpc_dma_process_completed(struct mpc_dma *mdma)
389 {
390 	dma_cookie_t last_cookie = 0;
391 	struct mpc_dma_chan *mchan;
392 	struct mpc_dma_desc *mdesc;
393 	struct dma_async_tx_descriptor *desc;
394 	unsigned long flags;
395 	LIST_HEAD(list);
396 	int i;
397 
398 	for (i = 0; i < mdma->dma.chancnt; i++) {
399 		mchan = &mdma->channels[i];
400 
401 		/* Get all completed descriptors */
402 		spin_lock_irqsave(&mchan->lock, flags);
403 		if (!list_empty(&mchan->completed))
404 			list_splice_tail_init(&mchan->completed, &list);
405 		spin_unlock_irqrestore(&mchan->lock, flags);
406 
407 		if (list_empty(&list))
408 			continue;
409 
410 		/* Execute callbacks and run dependencies */
411 		list_for_each_entry(mdesc, &list, node) {
412 			desc = &mdesc->desc;
413 
414 			dmaengine_desc_get_callback_invoke(desc, NULL);
415 
416 			last_cookie = desc->cookie;
417 			dma_run_dependencies(desc);
418 		}
419 
420 		/* Free descriptors */
421 		spin_lock_irqsave(&mchan->lock, flags);
422 		list_splice_tail_init(&list, &mchan->free);
423 		mchan->chan.completed_cookie = last_cookie;
424 		spin_unlock_irqrestore(&mchan->lock, flags);
425 	}
426 }
427 
428 /* DMA Tasklet */
429 static void mpc_dma_tasklet(unsigned long data)
430 {
431 	struct mpc_dma *mdma = (void *)data;
432 	unsigned long flags;
433 	uint es;
434 
435 	spin_lock_irqsave(&mdma->error_status_lock, flags);
436 	es = mdma->error_status;
437 	mdma->error_status = 0;
438 	spin_unlock_irqrestore(&mdma->error_status_lock, flags);
439 
440 	/* Print nice error report */
441 	if (es) {
442 		dev_err(mdma->dma.dev,
443 			"Hardware reported following error(s) on channel %u:\n",
444 						      MPC_DMA_DMAES_ERRCHN(es));
445 
446 		if (es & MPC_DMA_DMAES_GPE)
447 			dev_err(mdma->dma.dev, "- Group Priority Error\n");
448 		if (es & MPC_DMA_DMAES_CPE)
449 			dev_err(mdma->dma.dev, "- Channel Priority Error\n");
450 		if (es & MPC_DMA_DMAES_SAE)
451 			dev_err(mdma->dma.dev, "- Source Address Error\n");
452 		if (es & MPC_DMA_DMAES_SOE)
453 			dev_err(mdma->dma.dev, "- Source Offset Configuration Error\n");
454 		if (es & MPC_DMA_DMAES_DAE)
455 			dev_err(mdma->dma.dev, "- Destination Address Error\n");
456 		if (es & MPC_DMA_DMAES_DOE)
457 			dev_err(mdma->dma.dev, "- Destination Offset Configuration Error\n");
458 		if (es & MPC_DMA_DMAES_NCE)
459 			dev_err(mdma->dma.dev, "- NBytes/Citter Configuration Error\n");
460 		if (es & MPC_DMA_DMAES_SGE)
461 			dev_err(mdma->dma.dev, "- Scatter/Gather Configuration Error\n");
462 		if (es & MPC_DMA_DMAES_SBE)
463 			dev_err(mdma->dma.dev, "- Source Bus Error\n");
464 		if (es & MPC_DMA_DMAES_DBE)
465 			dev_err(mdma->dma.dev, "- Destination Bus Error\n");
466 	}
467 
468 	mpc_dma_process_completed(mdma);
469 }
470 
471 /* Submit descriptor to hardware */
472 static dma_cookie_t mpc_dma_tx_submit(struct dma_async_tx_descriptor *txd)
473 {
474 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(txd->chan);
475 	struct mpc_dma_desc *mdesc;
476 	unsigned long flags;
477 	dma_cookie_t cookie;
478 
479 	mdesc = container_of(txd, struct mpc_dma_desc, desc);
480 
481 	spin_lock_irqsave(&mchan->lock, flags);
482 
483 	/* Move descriptor to queue */
484 	list_move_tail(&mdesc->node, &mchan->queued);
485 
486 	/* If channel is idle, execute all queued descriptors */
487 	if (list_empty(&mchan->active))
488 		mpc_dma_execute(mchan);
489 
490 	/* Update cookie */
491 	cookie = dma_cookie_assign(txd);
492 	spin_unlock_irqrestore(&mchan->lock, flags);
493 
494 	return cookie;
495 }
496 
497 /* Alloc channel resources */
498 static int mpc_dma_alloc_chan_resources(struct dma_chan *chan)
499 {
500 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
501 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
502 	struct mpc_dma_desc *mdesc;
503 	struct mpc_dma_tcd *tcd;
504 	dma_addr_t tcd_paddr;
505 	unsigned long flags;
506 	LIST_HEAD(descs);
507 	int i;
508 
509 	/* Alloc DMA memory for Transfer Control Descriptors */
510 	tcd = dma_alloc_coherent(mdma->dma.dev,
511 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
512 							&tcd_paddr, GFP_KERNEL);
513 	if (!tcd)
514 		return -ENOMEM;
515 
516 	/* Alloc descriptors for this channel */
517 	for (i = 0; i < MPC_DMA_DESCRIPTORS; i++) {
518 		mdesc = kzalloc(sizeof(struct mpc_dma_desc), GFP_KERNEL);
519 		if (!mdesc) {
520 			dev_notice(mdma->dma.dev,
521 				"Memory allocation error. Allocated only %u descriptors\n", i);
522 			break;
523 		}
524 
525 		dma_async_tx_descriptor_init(&mdesc->desc, chan);
526 		mdesc->desc.flags = DMA_CTRL_ACK;
527 		mdesc->desc.tx_submit = mpc_dma_tx_submit;
528 
529 		mdesc->tcd = &tcd[i];
530 		mdesc->tcd_paddr = tcd_paddr + (i * sizeof(struct mpc_dma_tcd));
531 
532 		list_add_tail(&mdesc->node, &descs);
533 	}
534 
535 	/* Return error only if no descriptors were allocated */
536 	if (i == 0) {
537 		dma_free_coherent(mdma->dma.dev,
538 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
539 								tcd, tcd_paddr);
540 		return -ENOMEM;
541 	}
542 
543 	spin_lock_irqsave(&mchan->lock, flags);
544 	mchan->tcd = tcd;
545 	mchan->tcd_paddr = tcd_paddr;
546 	list_splice_tail_init(&descs, &mchan->free);
547 	spin_unlock_irqrestore(&mchan->lock, flags);
548 
549 	/* Enable Error Interrupt */
550 	out_8(&mdma->regs->dmaseei, chan->chan_id);
551 
552 	return 0;
553 }
554 
555 /* Free channel resources */
556 static void mpc_dma_free_chan_resources(struct dma_chan *chan)
557 {
558 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
559 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
560 	struct mpc_dma_desc *mdesc, *tmp;
561 	struct mpc_dma_tcd *tcd;
562 	dma_addr_t tcd_paddr;
563 	unsigned long flags;
564 	LIST_HEAD(descs);
565 
566 	spin_lock_irqsave(&mchan->lock, flags);
567 
568 	/* Channel must be idle */
569 	BUG_ON(!list_empty(&mchan->prepared));
570 	BUG_ON(!list_empty(&mchan->queued));
571 	BUG_ON(!list_empty(&mchan->active));
572 	BUG_ON(!list_empty(&mchan->completed));
573 
574 	/* Move data */
575 	list_splice_tail_init(&mchan->free, &descs);
576 	tcd = mchan->tcd;
577 	tcd_paddr = mchan->tcd_paddr;
578 
579 	spin_unlock_irqrestore(&mchan->lock, flags);
580 
581 	/* Free DMA memory used by descriptors */
582 	dma_free_coherent(mdma->dma.dev,
583 			MPC_DMA_DESCRIPTORS * sizeof(struct mpc_dma_tcd),
584 								tcd, tcd_paddr);
585 
586 	/* Free descriptors */
587 	list_for_each_entry_safe(mdesc, tmp, &descs, node)
588 		kfree(mdesc);
589 
590 	/* Disable Error Interrupt */
591 	out_8(&mdma->regs->dmaceei, chan->chan_id);
592 }
593 
594 /* Send all pending descriptor to hardware */
595 static void mpc_dma_issue_pending(struct dma_chan *chan)
596 {
597 	/*
598 	 * We are posting descriptors to the hardware as soon as
599 	 * they are ready, so this function does nothing.
600 	 */
601 }
602 
603 /* Check request completion status */
604 static enum dma_status
605 mpc_dma_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
606 	       struct dma_tx_state *txstate)
607 {
608 	return dma_cookie_status(chan, cookie, txstate);
609 }
610 
611 /* Prepare descriptor for memory to memory copy */
612 static struct dma_async_tx_descriptor *
613 mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
614 					size_t len, unsigned long flags)
615 {
616 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
617 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
618 	struct mpc_dma_desc *mdesc = NULL;
619 	struct mpc_dma_tcd *tcd;
620 	unsigned long iflags;
621 
622 	/* Get free descriptor */
623 	spin_lock_irqsave(&mchan->lock, iflags);
624 	if (!list_empty(&mchan->free)) {
625 		mdesc = list_first_entry(&mchan->free, struct mpc_dma_desc,
626 									node);
627 		list_del(&mdesc->node);
628 	}
629 	spin_unlock_irqrestore(&mchan->lock, iflags);
630 
631 	if (!mdesc) {
632 		/* try to free completed descriptors */
633 		mpc_dma_process_completed(mdma);
634 		return NULL;
635 	}
636 
637 	mdesc->error = 0;
638 	mdesc->will_access_peripheral = 0;
639 	tcd = mdesc->tcd;
640 
641 	/* Prepare Transfer Control Descriptor for this transaction */
642 	memset(tcd, 0, sizeof(struct mpc_dma_tcd));
643 
644 	if (IS_ALIGNED(src | dst | len, 32)) {
645 		tcd->ssize = MPC_DMA_TSIZE_32;
646 		tcd->dsize = MPC_DMA_TSIZE_32;
647 		tcd->soff = 32;
648 		tcd->doff = 32;
649 	} else if (!mdma->is_mpc8308 && IS_ALIGNED(src | dst | len, 16)) {
650 		/* MPC8308 doesn't support 16 byte transfers */
651 		tcd->ssize = MPC_DMA_TSIZE_16;
652 		tcd->dsize = MPC_DMA_TSIZE_16;
653 		tcd->soff = 16;
654 		tcd->doff = 16;
655 	} else if (IS_ALIGNED(src | dst | len, 4)) {
656 		tcd->ssize = MPC_DMA_TSIZE_4;
657 		tcd->dsize = MPC_DMA_TSIZE_4;
658 		tcd->soff = 4;
659 		tcd->doff = 4;
660 	} else if (IS_ALIGNED(src | dst | len, 2)) {
661 		tcd->ssize = MPC_DMA_TSIZE_2;
662 		tcd->dsize = MPC_DMA_TSIZE_2;
663 		tcd->soff = 2;
664 		tcd->doff = 2;
665 	} else {
666 		tcd->ssize = MPC_DMA_TSIZE_1;
667 		tcd->dsize = MPC_DMA_TSIZE_1;
668 		tcd->soff = 1;
669 		tcd->doff = 1;
670 	}
671 
672 	tcd->saddr = src;
673 	tcd->daddr = dst;
674 	tcd->nbytes = len;
675 	tcd->biter = 1;
676 	tcd->citer = 1;
677 
678 	/* Place descriptor in prepared list */
679 	spin_lock_irqsave(&mchan->lock, iflags);
680 	list_add_tail(&mdesc->node, &mchan->prepared);
681 	spin_unlock_irqrestore(&mchan->lock, iflags);
682 
683 	return &mdesc->desc;
684 }
685 
686 inline u8 buswidth_to_dmatsize(u8 buswidth)
687 {
688 	u8 res;
689 
690 	for (res = 0; buswidth > 1; buswidth /= 2)
691 		res++;
692 	return res;
693 }
694 
695 static struct dma_async_tx_descriptor *
696 mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
697 		unsigned int sg_len, enum dma_transfer_direction direction,
698 		unsigned long flags, void *context)
699 {
700 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
701 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
702 	struct mpc_dma_desc *mdesc = NULL;
703 	dma_addr_t per_paddr;
704 	u32 tcd_nunits;
705 	struct mpc_dma_tcd *tcd;
706 	unsigned long iflags;
707 	struct scatterlist *sg;
708 	size_t len;
709 	int iter, i;
710 
711 	/* Currently there is no proper support for scatter/gather */
712 	if (sg_len != 1)
713 		return NULL;
714 
715 	if (!is_slave_direction(direction))
716 		return NULL;
717 
718 	for_each_sg(sgl, sg, sg_len, i) {
719 		spin_lock_irqsave(&mchan->lock, iflags);
720 
721 		mdesc = list_first_entry(&mchan->free,
722 						struct mpc_dma_desc, node);
723 		if (!mdesc) {
724 			spin_unlock_irqrestore(&mchan->lock, iflags);
725 			/* Try to free completed descriptors */
726 			mpc_dma_process_completed(mdma);
727 			return NULL;
728 		}
729 
730 		list_del(&mdesc->node);
731 
732 		if (direction == DMA_DEV_TO_MEM) {
733 			per_paddr = mchan->src_per_paddr;
734 			tcd_nunits = mchan->src_tcd_nunits;
735 		} else {
736 			per_paddr = mchan->dst_per_paddr;
737 			tcd_nunits = mchan->dst_tcd_nunits;
738 		}
739 
740 		spin_unlock_irqrestore(&mchan->lock, iflags);
741 
742 		if (per_paddr == 0 || tcd_nunits == 0)
743 			goto err_prep;
744 
745 		mdesc->error = 0;
746 		mdesc->will_access_peripheral = 1;
747 
748 		/* Prepare Transfer Control Descriptor for this transaction */
749 		tcd = mdesc->tcd;
750 
751 		memset(tcd, 0, sizeof(struct mpc_dma_tcd));
752 
753 		if (direction == DMA_DEV_TO_MEM) {
754 			tcd->saddr = per_paddr;
755 			tcd->daddr = sg_dma_address(sg);
756 
757 			if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
758 				goto err_prep;
759 
760 			tcd->soff = 0;
761 			tcd->doff = mchan->dwidth;
762 		} else {
763 			tcd->saddr = sg_dma_address(sg);
764 			tcd->daddr = per_paddr;
765 
766 			if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
767 				goto err_prep;
768 
769 			tcd->soff = mchan->swidth;
770 			tcd->doff = 0;
771 		}
772 
773 		tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
774 		tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
775 
776 		if (mdma->is_mpc8308) {
777 			tcd->nbytes = sg_dma_len(sg);
778 			if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
779 				goto err_prep;
780 
781 			/* No major loops for MPC8303 */
782 			tcd->biter = 1;
783 			tcd->citer = 1;
784 		} else {
785 			len = sg_dma_len(sg);
786 			tcd->nbytes = tcd_nunits * tcd->ssize;
787 			if (!IS_ALIGNED(len, tcd->nbytes))
788 				goto err_prep;
789 
790 			iter = len / tcd->nbytes;
791 			if (iter >= 1 << 15) {
792 				/* len is too big */
793 				goto err_prep;
794 			}
795 			/* citer_linkch contains the high bits of iter */
796 			tcd->biter = iter & 0x1ff;
797 			tcd->biter_linkch = iter >> 9;
798 			tcd->citer = tcd->biter;
799 			tcd->citer_linkch = tcd->biter_linkch;
800 		}
801 
802 		tcd->e_sg = 0;
803 		tcd->d_req = 1;
804 
805 		/* Place descriptor in prepared list */
806 		spin_lock_irqsave(&mchan->lock, iflags);
807 		list_add_tail(&mdesc->node, &mchan->prepared);
808 		spin_unlock_irqrestore(&mchan->lock, iflags);
809 	}
810 
811 	return &mdesc->desc;
812 
813 err_prep:
814 	/* Put the descriptor back */
815 	spin_lock_irqsave(&mchan->lock, iflags);
816 	list_add_tail(&mdesc->node, &mchan->free);
817 	spin_unlock_irqrestore(&mchan->lock, iflags);
818 
819 	return NULL;
820 }
821 
822 inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
823 {
824 	switch (buswidth) {
825 	case 16:
826 		if (is_mpc8308)
827 			return false;
828 	case 1:
829 	case 2:
830 	case 4:
831 	case 32:
832 		break;
833 	default:
834 		return false;
835 	}
836 
837 	return true;
838 }
839 
840 static int mpc_dma_device_config(struct dma_chan *chan,
841 				 struct dma_slave_config *cfg)
842 {
843 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
844 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
845 	unsigned long flags;
846 
847 	/*
848 	 * Software constraints:
849 	 *  - only transfers between a peripheral device and memory are
850 	 *     supported
851 	 *  - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
852 	 *     are supported, and, consequently, source addresses and
853 	 *     destination addresses; must be aligned accordingly; furthermore,
854 	 *     for MPC512x SoCs, the transfer size must be aligned on (chunk
855 	 *     size * maxburst)
856 	 *  - during the transfer, the RAM address is incremented by the size
857 	 *     of transfer chunk
858 	 *  - the peripheral port's address is constant during the transfer.
859 	 */
860 
861 	if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
862 	    !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
863 		return -EINVAL;
864 	}
865 
866 	if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
867 	    !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
868 		return -EINVAL;
869 
870 	spin_lock_irqsave(&mchan->lock, flags);
871 
872 	mchan->src_per_paddr = cfg->src_addr;
873 	mchan->src_tcd_nunits = cfg->src_maxburst;
874 	mchan->swidth = cfg->src_addr_width;
875 	mchan->dst_per_paddr = cfg->dst_addr;
876 	mchan->dst_tcd_nunits = cfg->dst_maxburst;
877 	mchan->dwidth = cfg->dst_addr_width;
878 
879 	/* Apply defaults */
880 	if (mchan->src_tcd_nunits == 0)
881 		mchan->src_tcd_nunits = 1;
882 	if (mchan->dst_tcd_nunits == 0)
883 		mchan->dst_tcd_nunits = 1;
884 
885 	spin_unlock_irqrestore(&mchan->lock, flags);
886 
887 	return 0;
888 }
889 
890 static int mpc_dma_device_terminate_all(struct dma_chan *chan)
891 {
892 	struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
893 	struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
894 	unsigned long flags;
895 
896 	/* Disable channel requests */
897 	spin_lock_irqsave(&mchan->lock, flags);
898 
899 	out_8(&mdma->regs->dmacerq, chan->chan_id);
900 	list_splice_tail_init(&mchan->prepared, &mchan->free);
901 	list_splice_tail_init(&mchan->queued, &mchan->free);
902 	list_splice_tail_init(&mchan->active, &mchan->free);
903 
904 	spin_unlock_irqrestore(&mchan->lock, flags);
905 
906 	return 0;
907 }
908 
909 static int mpc_dma_probe(struct platform_device *op)
910 {
911 	struct device_node *dn = op->dev.of_node;
912 	struct device *dev = &op->dev;
913 	struct dma_device *dma;
914 	struct mpc_dma *mdma;
915 	struct mpc_dma_chan *mchan;
916 	struct resource res;
917 	ulong regs_start, regs_size;
918 	int retval, i;
919 	u8 chancnt;
920 
921 	mdma = devm_kzalloc(dev, sizeof(struct mpc_dma), GFP_KERNEL);
922 	if (!mdma) {
923 		retval = -ENOMEM;
924 		goto err;
925 	}
926 
927 	mdma->irq = irq_of_parse_and_map(dn, 0);
928 	if (!mdma->irq) {
929 		dev_err(dev, "Error mapping IRQ!\n");
930 		retval = -EINVAL;
931 		goto err;
932 	}
933 
934 	if (of_device_is_compatible(dn, "fsl,mpc8308-dma")) {
935 		mdma->is_mpc8308 = 1;
936 		mdma->irq2 = irq_of_parse_and_map(dn, 1);
937 		if (!mdma->irq2) {
938 			dev_err(dev, "Error mapping IRQ!\n");
939 			retval = -EINVAL;
940 			goto err_dispose1;
941 		}
942 	}
943 
944 	retval = of_address_to_resource(dn, 0, &res);
945 	if (retval) {
946 		dev_err(dev, "Error parsing memory region!\n");
947 		goto err_dispose2;
948 	}
949 
950 	regs_start = res.start;
951 	regs_size = resource_size(&res);
952 
953 	if (!devm_request_mem_region(dev, regs_start, regs_size, DRV_NAME)) {
954 		dev_err(dev, "Error requesting memory region!\n");
955 		retval = -EBUSY;
956 		goto err_dispose2;
957 	}
958 
959 	mdma->regs = devm_ioremap(dev, regs_start, regs_size);
960 	if (!mdma->regs) {
961 		dev_err(dev, "Error mapping memory region!\n");
962 		retval = -ENOMEM;
963 		goto err_dispose2;
964 	}
965 
966 	mdma->tcd = (struct mpc_dma_tcd *)((u8 *)(mdma->regs)
967 							+ MPC_DMA_TCD_OFFSET);
968 
969 	retval = request_irq(mdma->irq, &mpc_dma_irq, 0, DRV_NAME, mdma);
970 	if (retval) {
971 		dev_err(dev, "Error requesting IRQ!\n");
972 		retval = -EINVAL;
973 		goto err_dispose2;
974 	}
975 
976 	if (mdma->is_mpc8308) {
977 		retval = request_irq(mdma->irq2, &mpc_dma_irq, 0,
978 							DRV_NAME, mdma);
979 		if (retval) {
980 			dev_err(dev, "Error requesting IRQ2!\n");
981 			retval = -EINVAL;
982 			goto err_free1;
983 		}
984 	}
985 
986 	spin_lock_init(&mdma->error_status_lock);
987 
988 	dma = &mdma->dma;
989 	dma->dev = dev;
990 	dma->device_alloc_chan_resources = mpc_dma_alloc_chan_resources;
991 	dma->device_free_chan_resources = mpc_dma_free_chan_resources;
992 	dma->device_issue_pending = mpc_dma_issue_pending;
993 	dma->device_tx_status = mpc_dma_tx_status;
994 	dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
995 	dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
996 	dma->device_config = mpc_dma_device_config;
997 	dma->device_terminate_all = mpc_dma_device_terminate_all;
998 
999 	INIT_LIST_HEAD(&dma->channels);
1000 	dma_cap_set(DMA_MEMCPY, dma->cap_mask);
1001 	dma_cap_set(DMA_SLAVE, dma->cap_mask);
1002 
1003 	if (mdma->is_mpc8308)
1004 		chancnt = MPC8308_DMACHAN_MAX;
1005 	else
1006 		chancnt = MPC512x_DMACHAN_MAX;
1007 
1008 	for (i = 0; i < chancnt; i++) {
1009 		mchan = &mdma->channels[i];
1010 
1011 		mchan->chan.device = dma;
1012 		dma_cookie_init(&mchan->chan);
1013 
1014 		INIT_LIST_HEAD(&mchan->free);
1015 		INIT_LIST_HEAD(&mchan->prepared);
1016 		INIT_LIST_HEAD(&mchan->queued);
1017 		INIT_LIST_HEAD(&mchan->active);
1018 		INIT_LIST_HEAD(&mchan->completed);
1019 
1020 		spin_lock_init(&mchan->lock);
1021 		list_add_tail(&mchan->chan.device_node, &dma->channels);
1022 	}
1023 
1024 	tasklet_init(&mdma->tasklet, mpc_dma_tasklet, (unsigned long)mdma);
1025 
1026 	/*
1027 	 * Configure DMA Engine:
1028 	 * - Dynamic clock,
1029 	 * - Round-robin group arbitration,
1030 	 * - Round-robin channel arbitration.
1031 	 */
1032 	if (mdma->is_mpc8308) {
1033 		/* MPC8308 has 16 channels and lacks some registers */
1034 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_ERCA);
1035 
1036 		/* enable snooping */
1037 		out_be32(&mdma->regs->dmagpor, MPC_DMA_DMAGPOR_SNOOP_ENABLE);
1038 		/* Disable error interrupts */
1039 		out_be32(&mdma->regs->dmaeeil, 0);
1040 
1041 		/* Clear interrupts status */
1042 		out_be32(&mdma->regs->dmaintl, 0xFFFF);
1043 		out_be32(&mdma->regs->dmaerrl, 0xFFFF);
1044 	} else {
1045 		out_be32(&mdma->regs->dmacr, MPC_DMA_DMACR_EDCG |
1046 						MPC_DMA_DMACR_ERGA |
1047 						MPC_DMA_DMACR_ERCA);
1048 
1049 		/* Disable hardware DMA requests */
1050 		out_be32(&mdma->regs->dmaerqh, 0);
1051 		out_be32(&mdma->regs->dmaerql, 0);
1052 
1053 		/* Disable error interrupts */
1054 		out_be32(&mdma->regs->dmaeeih, 0);
1055 		out_be32(&mdma->regs->dmaeeil, 0);
1056 
1057 		/* Clear interrupts status */
1058 		out_be32(&mdma->regs->dmainth, 0xFFFFFFFF);
1059 		out_be32(&mdma->regs->dmaintl, 0xFFFFFFFF);
1060 		out_be32(&mdma->regs->dmaerrh, 0xFFFFFFFF);
1061 		out_be32(&mdma->regs->dmaerrl, 0xFFFFFFFF);
1062 
1063 		/* Route interrupts to IPIC */
1064 		out_be32(&mdma->regs->dmaihsa, 0);
1065 		out_be32(&mdma->regs->dmailsa, 0);
1066 	}
1067 
1068 	/* Register DMA engine */
1069 	dev_set_drvdata(dev, mdma);
1070 	retval = dma_async_device_register(dma);
1071 	if (retval)
1072 		goto err_free2;
1073 
1074 	/* Register with OF helpers for DMA lookups (nonfatal) */
1075 	if (dev->of_node) {
1076 		retval = of_dma_controller_register(dev->of_node,
1077 						of_dma_xlate_by_chan_id, mdma);
1078 		if (retval)
1079 			dev_warn(dev, "Could not register for OF lookup\n");
1080 	}
1081 
1082 	return 0;
1083 
1084 err_free2:
1085 	if (mdma->is_mpc8308)
1086 		free_irq(mdma->irq2, mdma);
1087 err_free1:
1088 	free_irq(mdma->irq, mdma);
1089 err_dispose2:
1090 	if (mdma->is_mpc8308)
1091 		irq_dispose_mapping(mdma->irq2);
1092 err_dispose1:
1093 	irq_dispose_mapping(mdma->irq);
1094 err:
1095 	return retval;
1096 }
1097 
1098 static int mpc_dma_remove(struct platform_device *op)
1099 {
1100 	struct device *dev = &op->dev;
1101 	struct mpc_dma *mdma = dev_get_drvdata(dev);
1102 
1103 	if (dev->of_node)
1104 		of_dma_controller_free(dev->of_node);
1105 	dma_async_device_unregister(&mdma->dma);
1106 	if (mdma->is_mpc8308) {
1107 		free_irq(mdma->irq2, mdma);
1108 		irq_dispose_mapping(mdma->irq2);
1109 	}
1110 	free_irq(mdma->irq, mdma);
1111 	irq_dispose_mapping(mdma->irq);
1112 	tasklet_kill(&mdma->tasklet);
1113 
1114 	return 0;
1115 }
1116 
1117 static const struct of_device_id mpc_dma_match[] = {
1118 	{ .compatible = "fsl,mpc5121-dma", },
1119 	{ .compatible = "fsl,mpc8308-dma", },
1120 	{},
1121 };
1122 MODULE_DEVICE_TABLE(of, mpc_dma_match);
1123 
1124 static struct platform_driver mpc_dma_driver = {
1125 	.probe		= mpc_dma_probe,
1126 	.remove		= mpc_dma_remove,
1127 	.driver = {
1128 		.name = DRV_NAME,
1129 		.of_match_table	= mpc_dma_match,
1130 	},
1131 };
1132 
1133 module_platform_driver(mpc_dma_driver);
1134 
1135 MODULE_LICENSE("GPL");
1136 MODULE_AUTHOR("Piotr Ziecik <kosmo@semihalf.com>");
1137