xref: /linux/drivers/dma/fsldma.c (revision 2a2c74b2efcb1a0ca3fdcb5fbb96ad8de6a29177)
1 /*
2  * Freescale MPC85xx, MPC83xx DMA Engine support
3  *
4  * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
5  *
6  * Author:
7  *   Zhang Wei <wei.zhang@freescale.com>, Jul 2007
8  *   Ebony Zhu <ebony.zhu@freescale.com>, May 2007
9  *
10  * Description:
11  *   DMA engine driver for Freescale MPC8540 DMA controller, which is
12  *   also fit for MPC8560, MPC8555, MPC8548, MPC8641, and etc.
13  *   The support for MPC8349 DMA controller is also added.
14  *
15  * This driver instructs the DMA controller to issue the PCI Read Multiple
16  * command for PCI read operations, instead of using the default PCI Read Line
17  * command. Please be aware that this setting may result in read pre-fetching
18  * on some platforms.
19  *
20  * This is free software; you can redistribute it and/or modify
21  * it under the terms of the GNU General Public License as published by
22  * the Free Software Foundation; either version 2 of the License, or
23  * (at your option) any later version.
24  *
25  */
26 
27 #include <linux/init.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/slab.h>
31 #include <linux/interrupt.h>
32 #include <linux/dmaengine.h>
33 #include <linux/delay.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dmapool.h>
36 #include <linux/of_address.h>
37 #include <linux/of_irq.h>
38 #include <linux/of_platform.h>
39 
40 #include "dmaengine.h"
41 #include "fsldma.h"
42 
43 #define chan_dbg(chan, fmt, arg...)					\
44 	dev_dbg(chan->dev, "%s: " fmt, chan->name, ##arg)
45 #define chan_err(chan, fmt, arg...)					\
46 	dev_err(chan->dev, "%s: " fmt, chan->name, ##arg)
47 
48 static const char msg_ld_oom[] = "No free memory for link descriptor";
49 
50 /*
51  * Register Helpers
52  */
53 
54 static void set_sr(struct fsldma_chan *chan, u32 val)
55 {
56 	DMA_OUT(chan, &chan->regs->sr, val, 32);
57 }
58 
59 static u32 get_sr(struct fsldma_chan *chan)
60 {
61 	return DMA_IN(chan, &chan->regs->sr, 32);
62 }
63 
64 static void set_cdar(struct fsldma_chan *chan, dma_addr_t addr)
65 {
66 	DMA_OUT(chan, &chan->regs->cdar, addr | FSL_DMA_SNEN, 64);
67 }
68 
69 static dma_addr_t get_cdar(struct fsldma_chan *chan)
70 {
71 	return DMA_IN(chan, &chan->regs->cdar, 64) & ~FSL_DMA_SNEN;
72 }
73 
74 static u32 get_bcr(struct fsldma_chan *chan)
75 {
76 	return DMA_IN(chan, &chan->regs->bcr, 32);
77 }
78 
79 /*
80  * Descriptor Helpers
81  */
82 
83 static void set_desc_cnt(struct fsldma_chan *chan,
84 				struct fsl_dma_ld_hw *hw, u32 count)
85 {
86 	hw->count = CPU_TO_DMA(chan, count, 32);
87 }
88 
89 static void set_desc_src(struct fsldma_chan *chan,
90 			 struct fsl_dma_ld_hw *hw, dma_addr_t src)
91 {
92 	u64 snoop_bits;
93 
94 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
95 		? ((u64)FSL_DMA_SATR_SREADTYPE_SNOOP_READ << 32) : 0;
96 	hw->src_addr = CPU_TO_DMA(chan, snoop_bits | src, 64);
97 }
98 
99 static void set_desc_dst(struct fsldma_chan *chan,
100 			 struct fsl_dma_ld_hw *hw, dma_addr_t dst)
101 {
102 	u64 snoop_bits;
103 
104 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX)
105 		? ((u64)FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE << 32) : 0;
106 	hw->dst_addr = CPU_TO_DMA(chan, snoop_bits | dst, 64);
107 }
108 
109 static void set_desc_next(struct fsldma_chan *chan,
110 			  struct fsl_dma_ld_hw *hw, dma_addr_t next)
111 {
112 	u64 snoop_bits;
113 
114 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
115 		? FSL_DMA_SNEN : 0;
116 	hw->next_ln_addr = CPU_TO_DMA(chan, snoop_bits | next, 64);
117 }
118 
119 static void set_ld_eol(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
120 {
121 	u64 snoop_bits;
122 
123 	snoop_bits = ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_83XX)
124 		? FSL_DMA_SNEN : 0;
125 
126 	desc->hw.next_ln_addr = CPU_TO_DMA(chan,
127 		DMA_TO_CPU(chan, desc->hw.next_ln_addr, 64) | FSL_DMA_EOL
128 			| snoop_bits, 64);
129 }
130 
131 /*
132  * DMA Engine Hardware Control Helpers
133  */
134 
135 static void dma_init(struct fsldma_chan *chan)
136 {
137 	/* Reset the channel */
138 	DMA_OUT(chan, &chan->regs->mr, 0, 32);
139 
140 	switch (chan->feature & FSL_DMA_IP_MASK) {
141 	case FSL_DMA_IP_85XX:
142 		/* Set the channel to below modes:
143 		 * EIE - Error interrupt enable
144 		 * EOLNIE - End of links interrupt enable
145 		 * BWC - Bandwidth sharing among channels
146 		 */
147 		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_BWC
148 				| FSL_DMA_MR_EIE | FSL_DMA_MR_EOLNIE, 32);
149 		break;
150 	case FSL_DMA_IP_83XX:
151 		/* Set the channel to below modes:
152 		 * EOTIE - End-of-transfer interrupt enable
153 		 * PRC_RM - PCI read multiple
154 		 */
155 		DMA_OUT(chan, &chan->regs->mr, FSL_DMA_MR_EOTIE
156 				| FSL_DMA_MR_PRC_RM, 32);
157 		break;
158 	}
159 }
160 
161 static int dma_is_idle(struct fsldma_chan *chan)
162 {
163 	u32 sr = get_sr(chan);
164 	return (!(sr & FSL_DMA_SR_CB)) || (sr & FSL_DMA_SR_CH);
165 }
166 
167 /*
168  * Start the DMA controller
169  *
170  * Preconditions:
171  * - the CDAR register must point to the start descriptor
172  * - the MRn[CS] bit must be cleared
173  */
174 static void dma_start(struct fsldma_chan *chan)
175 {
176 	u32 mode;
177 
178 	mode = DMA_IN(chan, &chan->regs->mr, 32);
179 
180 	if (chan->feature & FSL_DMA_CHAN_PAUSE_EXT) {
181 		DMA_OUT(chan, &chan->regs->bcr, 0, 32);
182 		mode |= FSL_DMA_MR_EMP_EN;
183 	} else {
184 		mode &= ~FSL_DMA_MR_EMP_EN;
185 	}
186 
187 	if (chan->feature & FSL_DMA_CHAN_START_EXT) {
188 		mode |= FSL_DMA_MR_EMS_EN;
189 	} else {
190 		mode &= ~FSL_DMA_MR_EMS_EN;
191 		mode |= FSL_DMA_MR_CS;
192 	}
193 
194 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
195 }
196 
197 static void dma_halt(struct fsldma_chan *chan)
198 {
199 	u32 mode;
200 	int i;
201 
202 	/* read the mode register */
203 	mode = DMA_IN(chan, &chan->regs->mr, 32);
204 
205 	/*
206 	 * The 85xx controller supports channel abort, which will stop
207 	 * the current transfer. On 83xx, this bit is the transfer error
208 	 * mask bit, which should not be changed.
209 	 */
210 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
211 		mode |= FSL_DMA_MR_CA;
212 		DMA_OUT(chan, &chan->regs->mr, mode, 32);
213 
214 		mode &= ~FSL_DMA_MR_CA;
215 	}
216 
217 	/* stop the DMA controller */
218 	mode &= ~(FSL_DMA_MR_CS | FSL_DMA_MR_EMS_EN);
219 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
220 
221 	/* wait for the DMA controller to become idle */
222 	for (i = 0; i < 100; i++) {
223 		if (dma_is_idle(chan))
224 			return;
225 
226 		udelay(10);
227 	}
228 
229 	if (!dma_is_idle(chan))
230 		chan_err(chan, "DMA halt timeout!\n");
231 }
232 
233 /**
234  * fsl_chan_set_src_loop_size - Set source address hold transfer size
235  * @chan : Freescale DMA channel
236  * @size     : Address loop size, 0 for disable loop
237  *
238  * The set source address hold transfer size. The source
239  * address hold or loop transfer size is when the DMA transfer
240  * data from source address (SA), if the loop size is 4, the DMA will
241  * read data from SA, SA + 1, SA + 2, SA + 3, then loop back to SA,
242  * SA + 1 ... and so on.
243  */
244 static void fsl_chan_set_src_loop_size(struct fsldma_chan *chan, int size)
245 {
246 	u32 mode;
247 
248 	mode = DMA_IN(chan, &chan->regs->mr, 32);
249 
250 	switch (size) {
251 	case 0:
252 		mode &= ~FSL_DMA_MR_SAHE;
253 		break;
254 	case 1:
255 	case 2:
256 	case 4:
257 	case 8:
258 		mode |= FSL_DMA_MR_SAHE | (__ilog2(size) << 14);
259 		break;
260 	}
261 
262 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
263 }
264 
265 /**
266  * fsl_chan_set_dst_loop_size - Set destination address hold transfer size
267  * @chan : Freescale DMA channel
268  * @size     : Address loop size, 0 for disable loop
269  *
270  * The set destination address hold transfer size. The destination
271  * address hold or loop transfer size is when the DMA transfer
272  * data to destination address (TA), if the loop size is 4, the DMA will
273  * write data to TA, TA + 1, TA + 2, TA + 3, then loop back to TA,
274  * TA + 1 ... and so on.
275  */
276 static void fsl_chan_set_dst_loop_size(struct fsldma_chan *chan, int size)
277 {
278 	u32 mode;
279 
280 	mode = DMA_IN(chan, &chan->regs->mr, 32);
281 
282 	switch (size) {
283 	case 0:
284 		mode &= ~FSL_DMA_MR_DAHE;
285 		break;
286 	case 1:
287 	case 2:
288 	case 4:
289 	case 8:
290 		mode |= FSL_DMA_MR_DAHE | (__ilog2(size) << 16);
291 		break;
292 	}
293 
294 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
295 }
296 
297 /**
298  * fsl_chan_set_request_count - Set DMA Request Count for external control
299  * @chan : Freescale DMA channel
300  * @size     : Number of bytes to transfer in a single request
301  *
302  * The Freescale DMA channel can be controlled by the external signal DREQ#.
303  * The DMA request count is how many bytes are allowed to transfer before
304  * pausing the channel, after which a new assertion of DREQ# resumes channel
305  * operation.
306  *
307  * A size of 0 disables external pause control. The maximum size is 1024.
308  */
309 static void fsl_chan_set_request_count(struct fsldma_chan *chan, int size)
310 {
311 	u32 mode;
312 
313 	BUG_ON(size > 1024);
314 
315 	mode = DMA_IN(chan, &chan->regs->mr, 32);
316 	mode |= (__ilog2(size) << 24) & 0x0f000000;
317 
318 	DMA_OUT(chan, &chan->regs->mr, mode, 32);
319 }
320 
321 /**
322  * fsl_chan_toggle_ext_pause - Toggle channel external pause status
323  * @chan : Freescale DMA channel
324  * @enable   : 0 is disabled, 1 is enabled.
325  *
326  * The Freescale DMA channel can be controlled by the external signal DREQ#.
327  * The DMA Request Count feature should be used in addition to this feature
328  * to set the number of bytes to transfer before pausing the channel.
329  */
330 static void fsl_chan_toggle_ext_pause(struct fsldma_chan *chan, int enable)
331 {
332 	if (enable)
333 		chan->feature |= FSL_DMA_CHAN_PAUSE_EXT;
334 	else
335 		chan->feature &= ~FSL_DMA_CHAN_PAUSE_EXT;
336 }
337 
338 /**
339  * fsl_chan_toggle_ext_start - Toggle channel external start status
340  * @chan : Freescale DMA channel
341  * @enable   : 0 is disabled, 1 is enabled.
342  *
343  * If enable the external start, the channel can be started by an
344  * external DMA start pin. So the dma_start() does not start the
345  * transfer immediately. The DMA channel will wait for the
346  * control pin asserted.
347  */
348 static void fsl_chan_toggle_ext_start(struct fsldma_chan *chan, int enable)
349 {
350 	if (enable)
351 		chan->feature |= FSL_DMA_CHAN_START_EXT;
352 	else
353 		chan->feature &= ~FSL_DMA_CHAN_START_EXT;
354 }
355 
356 static void append_ld_queue(struct fsldma_chan *chan, struct fsl_desc_sw *desc)
357 {
358 	struct fsl_desc_sw *tail = to_fsl_desc(chan->ld_pending.prev);
359 
360 	if (list_empty(&chan->ld_pending))
361 		goto out_splice;
362 
363 	/*
364 	 * Add the hardware descriptor to the chain of hardware descriptors
365 	 * that already exists in memory.
366 	 *
367 	 * This will un-set the EOL bit of the existing transaction, and the
368 	 * last link in this transaction will become the EOL descriptor.
369 	 */
370 	set_desc_next(chan, &tail->hw, desc->async_tx.phys);
371 
372 	/*
373 	 * Add the software descriptor and all children to the list
374 	 * of pending transactions
375 	 */
376 out_splice:
377 	list_splice_tail_init(&desc->tx_list, &chan->ld_pending);
378 }
379 
380 static dma_cookie_t fsl_dma_tx_submit(struct dma_async_tx_descriptor *tx)
381 {
382 	struct fsldma_chan *chan = to_fsl_chan(tx->chan);
383 	struct fsl_desc_sw *desc = tx_to_fsl_desc(tx);
384 	struct fsl_desc_sw *child;
385 	unsigned long flags;
386 	dma_cookie_t cookie = -EINVAL;
387 
388 	spin_lock_irqsave(&chan->desc_lock, flags);
389 
390 	/*
391 	 * assign cookies to all of the software descriptors
392 	 * that make up this transaction
393 	 */
394 	list_for_each_entry(child, &desc->tx_list, node) {
395 		cookie = dma_cookie_assign(&child->async_tx);
396 	}
397 
398 	/* put this transaction onto the tail of the pending queue */
399 	append_ld_queue(chan, desc);
400 
401 	spin_unlock_irqrestore(&chan->desc_lock, flags);
402 
403 	return cookie;
404 }
405 
406 /**
407  * fsl_dma_alloc_descriptor - Allocate descriptor from channel's DMA pool.
408  * @chan : Freescale DMA channel
409  *
410  * Return - The descriptor allocated. NULL for failed.
411  */
412 static struct fsl_desc_sw *fsl_dma_alloc_descriptor(struct fsldma_chan *chan)
413 {
414 	struct fsl_desc_sw *desc;
415 	dma_addr_t pdesc;
416 
417 	desc = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &pdesc);
418 	if (!desc) {
419 		chan_dbg(chan, "out of memory for link descriptor\n");
420 		return NULL;
421 	}
422 
423 	memset(desc, 0, sizeof(*desc));
424 	INIT_LIST_HEAD(&desc->tx_list);
425 	dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
426 	desc->async_tx.tx_submit = fsl_dma_tx_submit;
427 	desc->async_tx.phys = pdesc;
428 
429 #ifdef FSL_DMA_LD_DEBUG
430 	chan_dbg(chan, "LD %p allocated\n", desc);
431 #endif
432 
433 	return desc;
434 }
435 
436 /**
437  * fsl_dma_alloc_chan_resources - Allocate resources for DMA channel.
438  * @chan : Freescale DMA channel
439  *
440  * This function will create a dma pool for descriptor allocation.
441  *
442  * Return - The number of descriptors allocated.
443  */
444 static int fsl_dma_alloc_chan_resources(struct dma_chan *dchan)
445 {
446 	struct fsldma_chan *chan = to_fsl_chan(dchan);
447 
448 	/* Has this channel already been allocated? */
449 	if (chan->desc_pool)
450 		return 1;
451 
452 	/*
453 	 * We need the descriptor to be aligned to 32bytes
454 	 * for meeting FSL DMA specification requirement.
455 	 */
456 	chan->desc_pool = dma_pool_create(chan->name, chan->dev,
457 					  sizeof(struct fsl_desc_sw),
458 					  __alignof__(struct fsl_desc_sw), 0);
459 	if (!chan->desc_pool) {
460 		chan_err(chan, "unable to allocate descriptor pool\n");
461 		return -ENOMEM;
462 	}
463 
464 	/* there is at least one descriptor free to be allocated */
465 	return 1;
466 }
467 
468 /**
469  * fsldma_free_desc_list - Free all descriptors in a queue
470  * @chan: Freescae DMA channel
471  * @list: the list to free
472  *
473  * LOCKING: must hold chan->desc_lock
474  */
475 static void fsldma_free_desc_list(struct fsldma_chan *chan,
476 				  struct list_head *list)
477 {
478 	struct fsl_desc_sw *desc, *_desc;
479 
480 	list_for_each_entry_safe(desc, _desc, list, node) {
481 		list_del(&desc->node);
482 #ifdef FSL_DMA_LD_DEBUG
483 		chan_dbg(chan, "LD %p free\n", desc);
484 #endif
485 		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
486 	}
487 }
488 
489 static void fsldma_free_desc_list_reverse(struct fsldma_chan *chan,
490 					  struct list_head *list)
491 {
492 	struct fsl_desc_sw *desc, *_desc;
493 
494 	list_for_each_entry_safe_reverse(desc, _desc, list, node) {
495 		list_del(&desc->node);
496 #ifdef FSL_DMA_LD_DEBUG
497 		chan_dbg(chan, "LD %p free\n", desc);
498 #endif
499 		dma_pool_free(chan->desc_pool, desc, desc->async_tx.phys);
500 	}
501 }
502 
503 /**
504  * fsl_dma_free_chan_resources - Free all resources of the channel.
505  * @chan : Freescale DMA channel
506  */
507 static void fsl_dma_free_chan_resources(struct dma_chan *dchan)
508 {
509 	struct fsldma_chan *chan = to_fsl_chan(dchan);
510 	unsigned long flags;
511 
512 	chan_dbg(chan, "free all channel resources\n");
513 	spin_lock_irqsave(&chan->desc_lock, flags);
514 	fsldma_free_desc_list(chan, &chan->ld_pending);
515 	fsldma_free_desc_list(chan, &chan->ld_running);
516 	spin_unlock_irqrestore(&chan->desc_lock, flags);
517 
518 	dma_pool_destroy(chan->desc_pool);
519 	chan->desc_pool = NULL;
520 }
521 
522 static struct dma_async_tx_descriptor *
523 fsl_dma_prep_interrupt(struct dma_chan *dchan, unsigned long flags)
524 {
525 	struct fsldma_chan *chan;
526 	struct fsl_desc_sw *new;
527 
528 	if (!dchan)
529 		return NULL;
530 
531 	chan = to_fsl_chan(dchan);
532 
533 	new = fsl_dma_alloc_descriptor(chan);
534 	if (!new) {
535 		chan_err(chan, "%s\n", msg_ld_oom);
536 		return NULL;
537 	}
538 
539 	new->async_tx.cookie = -EBUSY;
540 	new->async_tx.flags = flags;
541 
542 	/* Insert the link descriptor to the LD ring */
543 	list_add_tail(&new->node, &new->tx_list);
544 
545 	/* Set End-of-link to the last link descriptor of new list */
546 	set_ld_eol(chan, new);
547 
548 	return &new->async_tx;
549 }
550 
551 static struct dma_async_tx_descriptor *
552 fsl_dma_prep_memcpy(struct dma_chan *dchan,
553 	dma_addr_t dma_dst, dma_addr_t dma_src,
554 	size_t len, unsigned long flags)
555 {
556 	struct fsldma_chan *chan;
557 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new;
558 	size_t copy;
559 
560 	if (!dchan)
561 		return NULL;
562 
563 	if (!len)
564 		return NULL;
565 
566 	chan = to_fsl_chan(dchan);
567 
568 	do {
569 
570 		/* Allocate the link descriptor from DMA pool */
571 		new = fsl_dma_alloc_descriptor(chan);
572 		if (!new) {
573 			chan_err(chan, "%s\n", msg_ld_oom);
574 			goto fail;
575 		}
576 
577 		copy = min(len, (size_t)FSL_DMA_BCR_MAX_CNT);
578 
579 		set_desc_cnt(chan, &new->hw, copy);
580 		set_desc_src(chan, &new->hw, dma_src);
581 		set_desc_dst(chan, &new->hw, dma_dst);
582 
583 		if (!first)
584 			first = new;
585 		else
586 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
587 
588 		new->async_tx.cookie = 0;
589 		async_tx_ack(&new->async_tx);
590 
591 		prev = new;
592 		len -= copy;
593 		dma_src += copy;
594 		dma_dst += copy;
595 
596 		/* Insert the link descriptor to the LD ring */
597 		list_add_tail(&new->node, &first->tx_list);
598 	} while (len);
599 
600 	new->async_tx.flags = flags; /* client is in control of this ack */
601 	new->async_tx.cookie = -EBUSY;
602 
603 	/* Set End-of-link to the last link descriptor of new list */
604 	set_ld_eol(chan, new);
605 
606 	return &first->async_tx;
607 
608 fail:
609 	if (!first)
610 		return NULL;
611 
612 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
613 	return NULL;
614 }
615 
616 static struct dma_async_tx_descriptor *fsl_dma_prep_sg(struct dma_chan *dchan,
617 	struct scatterlist *dst_sg, unsigned int dst_nents,
618 	struct scatterlist *src_sg, unsigned int src_nents,
619 	unsigned long flags)
620 {
621 	struct fsl_desc_sw *first = NULL, *prev = NULL, *new = NULL;
622 	struct fsldma_chan *chan = to_fsl_chan(dchan);
623 	size_t dst_avail, src_avail;
624 	dma_addr_t dst, src;
625 	size_t len;
626 
627 	/* basic sanity checks */
628 	if (dst_nents == 0 || src_nents == 0)
629 		return NULL;
630 
631 	if (dst_sg == NULL || src_sg == NULL)
632 		return NULL;
633 
634 	/*
635 	 * TODO: should we check that both scatterlists have the same
636 	 * TODO: number of bytes in total? Is that really an error?
637 	 */
638 
639 	/* get prepared for the loop */
640 	dst_avail = sg_dma_len(dst_sg);
641 	src_avail = sg_dma_len(src_sg);
642 
643 	/* run until we are out of scatterlist entries */
644 	while (true) {
645 
646 		/* create the largest transaction possible */
647 		len = min_t(size_t, src_avail, dst_avail);
648 		len = min_t(size_t, len, FSL_DMA_BCR_MAX_CNT);
649 		if (len == 0)
650 			goto fetch;
651 
652 		dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) - dst_avail;
653 		src = sg_dma_address(src_sg) + sg_dma_len(src_sg) - src_avail;
654 
655 		/* allocate and populate the descriptor */
656 		new = fsl_dma_alloc_descriptor(chan);
657 		if (!new) {
658 			chan_err(chan, "%s\n", msg_ld_oom);
659 			goto fail;
660 		}
661 
662 		set_desc_cnt(chan, &new->hw, len);
663 		set_desc_src(chan, &new->hw, src);
664 		set_desc_dst(chan, &new->hw, dst);
665 
666 		if (!first)
667 			first = new;
668 		else
669 			set_desc_next(chan, &prev->hw, new->async_tx.phys);
670 
671 		new->async_tx.cookie = 0;
672 		async_tx_ack(&new->async_tx);
673 		prev = new;
674 
675 		/* Insert the link descriptor to the LD ring */
676 		list_add_tail(&new->node, &first->tx_list);
677 
678 		/* update metadata */
679 		dst_avail -= len;
680 		src_avail -= len;
681 
682 fetch:
683 		/* fetch the next dst scatterlist entry */
684 		if (dst_avail == 0) {
685 
686 			/* no more entries: we're done */
687 			if (dst_nents == 0)
688 				break;
689 
690 			/* fetch the next entry: if there are no more: done */
691 			dst_sg = sg_next(dst_sg);
692 			if (dst_sg == NULL)
693 				break;
694 
695 			dst_nents--;
696 			dst_avail = sg_dma_len(dst_sg);
697 		}
698 
699 		/* fetch the next src scatterlist entry */
700 		if (src_avail == 0) {
701 
702 			/* no more entries: we're done */
703 			if (src_nents == 0)
704 				break;
705 
706 			/* fetch the next entry: if there are no more: done */
707 			src_sg = sg_next(src_sg);
708 			if (src_sg == NULL)
709 				break;
710 
711 			src_nents--;
712 			src_avail = sg_dma_len(src_sg);
713 		}
714 	}
715 
716 	new->async_tx.flags = flags; /* client is in control of this ack */
717 	new->async_tx.cookie = -EBUSY;
718 
719 	/* Set End-of-link to the last link descriptor of new list */
720 	set_ld_eol(chan, new);
721 
722 	return &first->async_tx;
723 
724 fail:
725 	if (!first)
726 		return NULL;
727 
728 	fsldma_free_desc_list_reverse(chan, &first->tx_list);
729 	return NULL;
730 }
731 
732 /**
733  * fsl_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
734  * @chan: DMA channel
735  * @sgl: scatterlist to transfer to/from
736  * @sg_len: number of entries in @scatterlist
737  * @direction: DMA direction
738  * @flags: DMAEngine flags
739  * @context: transaction context (ignored)
740  *
741  * Prepare a set of descriptors for a DMA_SLAVE transaction. Following the
742  * DMA_SLAVE API, this gets the device-specific information from the
743  * chan->private variable.
744  */
745 static struct dma_async_tx_descriptor *fsl_dma_prep_slave_sg(
746 	struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
747 	enum dma_transfer_direction direction, unsigned long flags,
748 	void *context)
749 {
750 	/*
751 	 * This operation is not supported on the Freescale DMA controller
752 	 *
753 	 * However, we need to provide the function pointer to allow the
754 	 * device_control() method to work.
755 	 */
756 	return NULL;
757 }
758 
759 static int fsl_dma_device_control(struct dma_chan *dchan,
760 				  enum dma_ctrl_cmd cmd, unsigned long arg)
761 {
762 	struct dma_slave_config *config;
763 	struct fsldma_chan *chan;
764 	unsigned long flags;
765 	int size;
766 
767 	if (!dchan)
768 		return -EINVAL;
769 
770 	chan = to_fsl_chan(dchan);
771 
772 	switch (cmd) {
773 	case DMA_TERMINATE_ALL:
774 		spin_lock_irqsave(&chan->desc_lock, flags);
775 
776 		/* Halt the DMA engine */
777 		dma_halt(chan);
778 
779 		/* Remove and free all of the descriptors in the LD queue */
780 		fsldma_free_desc_list(chan, &chan->ld_pending);
781 		fsldma_free_desc_list(chan, &chan->ld_running);
782 		chan->idle = true;
783 
784 		spin_unlock_irqrestore(&chan->desc_lock, flags);
785 		return 0;
786 
787 	case DMA_SLAVE_CONFIG:
788 		config = (struct dma_slave_config *)arg;
789 
790 		/* make sure the channel supports setting burst size */
791 		if (!chan->set_request_count)
792 			return -ENXIO;
793 
794 		/* we set the controller burst size depending on direction */
795 		if (config->direction == DMA_MEM_TO_DEV)
796 			size = config->dst_addr_width * config->dst_maxburst;
797 		else
798 			size = config->src_addr_width * config->src_maxburst;
799 
800 		chan->set_request_count(chan, size);
801 		return 0;
802 
803 	case FSLDMA_EXTERNAL_START:
804 
805 		/* make sure the channel supports external start */
806 		if (!chan->toggle_ext_start)
807 			return -ENXIO;
808 
809 		chan->toggle_ext_start(chan, arg);
810 		return 0;
811 
812 	default:
813 		return -ENXIO;
814 	}
815 
816 	return 0;
817 }
818 
819 /**
820  * fsldma_cleanup_descriptor - cleanup and free a single link descriptor
821  * @chan: Freescale DMA channel
822  * @desc: descriptor to cleanup and free
823  *
824  * This function is used on a descriptor which has been executed by the DMA
825  * controller. It will run any callbacks, submit any dependencies, and then
826  * free the descriptor.
827  */
828 static void fsldma_cleanup_descriptor(struct fsldma_chan *chan,
829 				      struct fsl_desc_sw *desc)
830 {
831 	struct dma_async_tx_descriptor *txd = &desc->async_tx;
832 
833 	/* Run the link descriptor callback function */
834 	if (txd->callback) {
835 #ifdef FSL_DMA_LD_DEBUG
836 		chan_dbg(chan, "LD %p callback\n", desc);
837 #endif
838 		txd->callback(txd->callback_param);
839 	}
840 
841 	/* Run any dependencies */
842 	dma_run_dependencies(txd);
843 
844 	dma_descriptor_unmap(txd);
845 #ifdef FSL_DMA_LD_DEBUG
846 	chan_dbg(chan, "LD %p free\n", desc);
847 #endif
848 	dma_pool_free(chan->desc_pool, desc, txd->phys);
849 }
850 
851 /**
852  * fsl_chan_xfer_ld_queue - transfer any pending transactions
853  * @chan : Freescale DMA channel
854  *
855  * HARDWARE STATE: idle
856  * LOCKING: must hold chan->desc_lock
857  */
858 static void fsl_chan_xfer_ld_queue(struct fsldma_chan *chan)
859 {
860 	struct fsl_desc_sw *desc;
861 
862 	/*
863 	 * If the list of pending descriptors is empty, then we
864 	 * don't need to do any work at all
865 	 */
866 	if (list_empty(&chan->ld_pending)) {
867 		chan_dbg(chan, "no pending LDs\n");
868 		return;
869 	}
870 
871 	/*
872 	 * The DMA controller is not idle, which means that the interrupt
873 	 * handler will start any queued transactions when it runs after
874 	 * this transaction finishes
875 	 */
876 	if (!chan->idle) {
877 		chan_dbg(chan, "DMA controller still busy\n");
878 		return;
879 	}
880 
881 	/*
882 	 * If there are some link descriptors which have not been
883 	 * transferred, we need to start the controller
884 	 */
885 
886 	/*
887 	 * Move all elements from the queue of pending transactions
888 	 * onto the list of running transactions
889 	 */
890 	chan_dbg(chan, "idle, starting controller\n");
891 	desc = list_first_entry(&chan->ld_pending, struct fsl_desc_sw, node);
892 	list_splice_tail_init(&chan->ld_pending, &chan->ld_running);
893 
894 	/*
895 	 * The 85xx DMA controller doesn't clear the channel start bit
896 	 * automatically at the end of a transfer. Therefore we must clear
897 	 * it in software before starting the transfer.
898 	 */
899 	if ((chan->feature & FSL_DMA_IP_MASK) == FSL_DMA_IP_85XX) {
900 		u32 mode;
901 
902 		mode = DMA_IN(chan, &chan->regs->mr, 32);
903 		mode &= ~FSL_DMA_MR_CS;
904 		DMA_OUT(chan, &chan->regs->mr, mode, 32);
905 	}
906 
907 	/*
908 	 * Program the descriptor's address into the DMA controller,
909 	 * then start the DMA transaction
910 	 */
911 	set_cdar(chan, desc->async_tx.phys);
912 	get_cdar(chan);
913 
914 	dma_start(chan);
915 	chan->idle = false;
916 }
917 
918 /**
919  * fsl_dma_memcpy_issue_pending - Issue the DMA start command
920  * @chan : Freescale DMA channel
921  */
922 static void fsl_dma_memcpy_issue_pending(struct dma_chan *dchan)
923 {
924 	struct fsldma_chan *chan = to_fsl_chan(dchan);
925 	unsigned long flags;
926 
927 	spin_lock_irqsave(&chan->desc_lock, flags);
928 	fsl_chan_xfer_ld_queue(chan);
929 	spin_unlock_irqrestore(&chan->desc_lock, flags);
930 }
931 
932 /**
933  * fsl_tx_status - Determine the DMA status
934  * @chan : Freescale DMA channel
935  */
936 static enum dma_status fsl_tx_status(struct dma_chan *dchan,
937 					dma_cookie_t cookie,
938 					struct dma_tx_state *txstate)
939 {
940 	return dma_cookie_status(dchan, cookie, txstate);
941 }
942 
943 /*----------------------------------------------------------------------------*/
944 /* Interrupt Handling                                                         */
945 /*----------------------------------------------------------------------------*/
946 
947 static irqreturn_t fsldma_chan_irq(int irq, void *data)
948 {
949 	struct fsldma_chan *chan = data;
950 	u32 stat;
951 
952 	/* save and clear the status register */
953 	stat = get_sr(chan);
954 	set_sr(chan, stat);
955 	chan_dbg(chan, "irq: stat = 0x%x\n", stat);
956 
957 	/* check that this was really our device */
958 	stat &= ~(FSL_DMA_SR_CB | FSL_DMA_SR_CH);
959 	if (!stat)
960 		return IRQ_NONE;
961 
962 	if (stat & FSL_DMA_SR_TE)
963 		chan_err(chan, "Transfer Error!\n");
964 
965 	/*
966 	 * Programming Error
967 	 * The DMA_INTERRUPT async_tx is a NULL transfer, which will
968 	 * trigger a PE interrupt.
969 	 */
970 	if (stat & FSL_DMA_SR_PE) {
971 		chan_dbg(chan, "irq: Programming Error INT\n");
972 		stat &= ~FSL_DMA_SR_PE;
973 		if (get_bcr(chan) != 0)
974 			chan_err(chan, "Programming Error!\n");
975 	}
976 
977 	/*
978 	 * For MPC8349, EOCDI event need to update cookie
979 	 * and start the next transfer if it exist.
980 	 */
981 	if (stat & FSL_DMA_SR_EOCDI) {
982 		chan_dbg(chan, "irq: End-of-Chain link INT\n");
983 		stat &= ~FSL_DMA_SR_EOCDI;
984 	}
985 
986 	/*
987 	 * If it current transfer is the end-of-transfer,
988 	 * we should clear the Channel Start bit for
989 	 * prepare next transfer.
990 	 */
991 	if (stat & FSL_DMA_SR_EOLNI) {
992 		chan_dbg(chan, "irq: End-of-link INT\n");
993 		stat &= ~FSL_DMA_SR_EOLNI;
994 	}
995 
996 	/* check that the DMA controller is really idle */
997 	if (!dma_is_idle(chan))
998 		chan_err(chan, "irq: controller not idle!\n");
999 
1000 	/* check that we handled all of the bits */
1001 	if (stat)
1002 		chan_err(chan, "irq: unhandled sr 0x%08x\n", stat);
1003 
1004 	/*
1005 	 * Schedule the tasklet to handle all cleanup of the current
1006 	 * transaction. It will start a new transaction if there is
1007 	 * one pending.
1008 	 */
1009 	tasklet_schedule(&chan->tasklet);
1010 	chan_dbg(chan, "irq: Exit\n");
1011 	return IRQ_HANDLED;
1012 }
1013 
1014 static void dma_do_tasklet(unsigned long data)
1015 {
1016 	struct fsldma_chan *chan = (struct fsldma_chan *)data;
1017 	struct fsl_desc_sw *desc, *_desc;
1018 	LIST_HEAD(ld_cleanup);
1019 	unsigned long flags;
1020 
1021 	chan_dbg(chan, "tasklet entry\n");
1022 
1023 	spin_lock_irqsave(&chan->desc_lock, flags);
1024 
1025 	/* update the cookie if we have some descriptors to cleanup */
1026 	if (!list_empty(&chan->ld_running)) {
1027 		dma_cookie_t cookie;
1028 
1029 		desc = to_fsl_desc(chan->ld_running.prev);
1030 		cookie = desc->async_tx.cookie;
1031 		dma_cookie_complete(&desc->async_tx);
1032 
1033 		chan_dbg(chan, "completed_cookie=%d\n", cookie);
1034 	}
1035 
1036 	/*
1037 	 * move the descriptors to a temporary list so we can drop the lock
1038 	 * during the entire cleanup operation
1039 	 */
1040 	list_splice_tail_init(&chan->ld_running, &ld_cleanup);
1041 
1042 	/* the hardware is now idle and ready for more */
1043 	chan->idle = true;
1044 
1045 	/*
1046 	 * Start any pending transactions automatically
1047 	 *
1048 	 * In the ideal case, we keep the DMA controller busy while we go
1049 	 * ahead and free the descriptors below.
1050 	 */
1051 	fsl_chan_xfer_ld_queue(chan);
1052 	spin_unlock_irqrestore(&chan->desc_lock, flags);
1053 
1054 	/* Run the callback for each descriptor, in order */
1055 	list_for_each_entry_safe(desc, _desc, &ld_cleanup, node) {
1056 
1057 		/* Remove from the list of transactions */
1058 		list_del(&desc->node);
1059 
1060 		/* Run all cleanup for this descriptor */
1061 		fsldma_cleanup_descriptor(chan, desc);
1062 	}
1063 
1064 	chan_dbg(chan, "tasklet exit\n");
1065 }
1066 
1067 static irqreturn_t fsldma_ctrl_irq(int irq, void *data)
1068 {
1069 	struct fsldma_device *fdev = data;
1070 	struct fsldma_chan *chan;
1071 	unsigned int handled = 0;
1072 	u32 gsr, mask;
1073 	int i;
1074 
1075 	gsr = (fdev->feature & FSL_DMA_BIG_ENDIAN) ? in_be32(fdev->regs)
1076 						   : in_le32(fdev->regs);
1077 	mask = 0xff000000;
1078 	dev_dbg(fdev->dev, "IRQ: gsr 0x%.8x\n", gsr);
1079 
1080 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1081 		chan = fdev->chan[i];
1082 		if (!chan)
1083 			continue;
1084 
1085 		if (gsr & mask) {
1086 			dev_dbg(fdev->dev, "IRQ: chan %d\n", chan->id);
1087 			fsldma_chan_irq(irq, chan);
1088 			handled++;
1089 		}
1090 
1091 		gsr &= ~mask;
1092 		mask >>= 8;
1093 	}
1094 
1095 	return IRQ_RETVAL(handled);
1096 }
1097 
1098 static void fsldma_free_irqs(struct fsldma_device *fdev)
1099 {
1100 	struct fsldma_chan *chan;
1101 	int i;
1102 
1103 	if (fdev->irq != NO_IRQ) {
1104 		dev_dbg(fdev->dev, "free per-controller IRQ\n");
1105 		free_irq(fdev->irq, fdev);
1106 		return;
1107 	}
1108 
1109 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1110 		chan = fdev->chan[i];
1111 		if (chan && chan->irq != NO_IRQ) {
1112 			chan_dbg(chan, "free per-channel IRQ\n");
1113 			free_irq(chan->irq, chan);
1114 		}
1115 	}
1116 }
1117 
1118 static int fsldma_request_irqs(struct fsldma_device *fdev)
1119 {
1120 	struct fsldma_chan *chan;
1121 	int ret;
1122 	int i;
1123 
1124 	/* if we have a per-controller IRQ, use that */
1125 	if (fdev->irq != NO_IRQ) {
1126 		dev_dbg(fdev->dev, "request per-controller IRQ\n");
1127 		ret = request_irq(fdev->irq, fsldma_ctrl_irq, IRQF_SHARED,
1128 				  "fsldma-controller", fdev);
1129 		return ret;
1130 	}
1131 
1132 	/* no per-controller IRQ, use the per-channel IRQs */
1133 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1134 		chan = fdev->chan[i];
1135 		if (!chan)
1136 			continue;
1137 
1138 		if (chan->irq == NO_IRQ) {
1139 			chan_err(chan, "interrupts property missing in device tree\n");
1140 			ret = -ENODEV;
1141 			goto out_unwind;
1142 		}
1143 
1144 		chan_dbg(chan, "request per-channel IRQ\n");
1145 		ret = request_irq(chan->irq, fsldma_chan_irq, IRQF_SHARED,
1146 				  "fsldma-chan", chan);
1147 		if (ret) {
1148 			chan_err(chan, "unable to request per-channel IRQ\n");
1149 			goto out_unwind;
1150 		}
1151 	}
1152 
1153 	return 0;
1154 
1155 out_unwind:
1156 	for (/* none */; i >= 0; i--) {
1157 		chan = fdev->chan[i];
1158 		if (!chan)
1159 			continue;
1160 
1161 		if (chan->irq == NO_IRQ)
1162 			continue;
1163 
1164 		free_irq(chan->irq, chan);
1165 	}
1166 
1167 	return ret;
1168 }
1169 
1170 /*----------------------------------------------------------------------------*/
1171 /* OpenFirmware Subsystem                                                     */
1172 /*----------------------------------------------------------------------------*/
1173 
1174 static int fsl_dma_chan_probe(struct fsldma_device *fdev,
1175 	struct device_node *node, u32 feature, const char *compatible)
1176 {
1177 	struct fsldma_chan *chan;
1178 	struct resource res;
1179 	int err;
1180 
1181 	/* alloc channel */
1182 	chan = kzalloc(sizeof(*chan), GFP_KERNEL);
1183 	if (!chan) {
1184 		dev_err(fdev->dev, "no free memory for DMA channels!\n");
1185 		err = -ENOMEM;
1186 		goto out_return;
1187 	}
1188 
1189 	/* ioremap registers for use */
1190 	chan->regs = of_iomap(node, 0);
1191 	if (!chan->regs) {
1192 		dev_err(fdev->dev, "unable to ioremap registers\n");
1193 		err = -ENOMEM;
1194 		goto out_free_chan;
1195 	}
1196 
1197 	err = of_address_to_resource(node, 0, &res);
1198 	if (err) {
1199 		dev_err(fdev->dev, "unable to find 'reg' property\n");
1200 		goto out_iounmap_regs;
1201 	}
1202 
1203 	chan->feature = feature;
1204 	if (!fdev->feature)
1205 		fdev->feature = chan->feature;
1206 
1207 	/*
1208 	 * If the DMA device's feature is different than the feature
1209 	 * of its channels, report the bug
1210 	 */
1211 	WARN_ON(fdev->feature != chan->feature);
1212 
1213 	chan->dev = fdev->dev;
1214 	chan->id = (res.start & 0xfff) < 0x300 ?
1215 		   ((res.start - 0x100) & 0xfff) >> 7 :
1216 		   ((res.start - 0x200) & 0xfff) >> 7;
1217 	if (chan->id >= FSL_DMA_MAX_CHANS_PER_DEVICE) {
1218 		dev_err(fdev->dev, "too many channels for device\n");
1219 		err = -EINVAL;
1220 		goto out_iounmap_regs;
1221 	}
1222 
1223 	fdev->chan[chan->id] = chan;
1224 	tasklet_init(&chan->tasklet, dma_do_tasklet, (unsigned long)chan);
1225 	snprintf(chan->name, sizeof(chan->name), "chan%d", chan->id);
1226 
1227 	/* Initialize the channel */
1228 	dma_init(chan);
1229 
1230 	/* Clear cdar registers */
1231 	set_cdar(chan, 0);
1232 
1233 	switch (chan->feature & FSL_DMA_IP_MASK) {
1234 	case FSL_DMA_IP_85XX:
1235 		chan->toggle_ext_pause = fsl_chan_toggle_ext_pause;
1236 	case FSL_DMA_IP_83XX:
1237 		chan->toggle_ext_start = fsl_chan_toggle_ext_start;
1238 		chan->set_src_loop_size = fsl_chan_set_src_loop_size;
1239 		chan->set_dst_loop_size = fsl_chan_set_dst_loop_size;
1240 		chan->set_request_count = fsl_chan_set_request_count;
1241 	}
1242 
1243 	spin_lock_init(&chan->desc_lock);
1244 	INIT_LIST_HEAD(&chan->ld_pending);
1245 	INIT_LIST_HEAD(&chan->ld_running);
1246 	chan->idle = true;
1247 
1248 	chan->common.device = &fdev->common;
1249 	dma_cookie_init(&chan->common);
1250 
1251 	/* find the IRQ line, if it exists in the device tree */
1252 	chan->irq = irq_of_parse_and_map(node, 0);
1253 
1254 	/* Add the channel to DMA device channel list */
1255 	list_add_tail(&chan->common.device_node, &fdev->common.channels);
1256 	fdev->common.chancnt++;
1257 
1258 	dev_info(fdev->dev, "#%d (%s), irq %d\n", chan->id, compatible,
1259 		 chan->irq != NO_IRQ ? chan->irq : fdev->irq);
1260 
1261 	return 0;
1262 
1263 out_iounmap_regs:
1264 	iounmap(chan->regs);
1265 out_free_chan:
1266 	kfree(chan);
1267 out_return:
1268 	return err;
1269 }
1270 
1271 static void fsl_dma_chan_remove(struct fsldma_chan *chan)
1272 {
1273 	irq_dispose_mapping(chan->irq);
1274 	list_del(&chan->common.device_node);
1275 	iounmap(chan->regs);
1276 	kfree(chan);
1277 }
1278 
1279 static int fsldma_of_probe(struct platform_device *op)
1280 {
1281 	struct fsldma_device *fdev;
1282 	struct device_node *child;
1283 	int err;
1284 
1285 	fdev = kzalloc(sizeof(*fdev), GFP_KERNEL);
1286 	if (!fdev) {
1287 		dev_err(&op->dev, "No enough memory for 'priv'\n");
1288 		err = -ENOMEM;
1289 		goto out_return;
1290 	}
1291 
1292 	fdev->dev = &op->dev;
1293 	INIT_LIST_HEAD(&fdev->common.channels);
1294 
1295 	/* ioremap the registers for use */
1296 	fdev->regs = of_iomap(op->dev.of_node, 0);
1297 	if (!fdev->regs) {
1298 		dev_err(&op->dev, "unable to ioremap registers\n");
1299 		err = -ENOMEM;
1300 		goto out_free_fdev;
1301 	}
1302 
1303 	/* map the channel IRQ if it exists, but don't hookup the handler yet */
1304 	fdev->irq = irq_of_parse_and_map(op->dev.of_node, 0);
1305 
1306 	dma_cap_set(DMA_MEMCPY, fdev->common.cap_mask);
1307 	dma_cap_set(DMA_INTERRUPT, fdev->common.cap_mask);
1308 	dma_cap_set(DMA_SG, fdev->common.cap_mask);
1309 	dma_cap_set(DMA_SLAVE, fdev->common.cap_mask);
1310 	fdev->common.device_alloc_chan_resources = fsl_dma_alloc_chan_resources;
1311 	fdev->common.device_free_chan_resources = fsl_dma_free_chan_resources;
1312 	fdev->common.device_prep_dma_interrupt = fsl_dma_prep_interrupt;
1313 	fdev->common.device_prep_dma_memcpy = fsl_dma_prep_memcpy;
1314 	fdev->common.device_prep_dma_sg = fsl_dma_prep_sg;
1315 	fdev->common.device_tx_status = fsl_tx_status;
1316 	fdev->common.device_issue_pending = fsl_dma_memcpy_issue_pending;
1317 	fdev->common.device_prep_slave_sg = fsl_dma_prep_slave_sg;
1318 	fdev->common.device_control = fsl_dma_device_control;
1319 	fdev->common.dev = &op->dev;
1320 
1321 	dma_set_mask(&(op->dev), DMA_BIT_MASK(36));
1322 
1323 	platform_set_drvdata(op, fdev);
1324 
1325 	/*
1326 	 * We cannot use of_platform_bus_probe() because there is no
1327 	 * of_platform_bus_remove(). Instead, we manually instantiate every DMA
1328 	 * channel object.
1329 	 */
1330 	for_each_child_of_node(op->dev.of_node, child) {
1331 		if (of_device_is_compatible(child, "fsl,eloplus-dma-channel")) {
1332 			fsl_dma_chan_probe(fdev, child,
1333 				FSL_DMA_IP_85XX | FSL_DMA_BIG_ENDIAN,
1334 				"fsl,eloplus-dma-channel");
1335 		}
1336 
1337 		if (of_device_is_compatible(child, "fsl,elo-dma-channel")) {
1338 			fsl_dma_chan_probe(fdev, child,
1339 				FSL_DMA_IP_83XX | FSL_DMA_LITTLE_ENDIAN,
1340 				"fsl,elo-dma-channel");
1341 		}
1342 	}
1343 
1344 	/*
1345 	 * Hookup the IRQ handler(s)
1346 	 *
1347 	 * If we have a per-controller interrupt, we prefer that to the
1348 	 * per-channel interrupts to reduce the number of shared interrupt
1349 	 * handlers on the same IRQ line
1350 	 */
1351 	err = fsldma_request_irqs(fdev);
1352 	if (err) {
1353 		dev_err(fdev->dev, "unable to request IRQs\n");
1354 		goto out_free_fdev;
1355 	}
1356 
1357 	dma_async_device_register(&fdev->common);
1358 	return 0;
1359 
1360 out_free_fdev:
1361 	irq_dispose_mapping(fdev->irq);
1362 	kfree(fdev);
1363 out_return:
1364 	return err;
1365 }
1366 
1367 static int fsldma_of_remove(struct platform_device *op)
1368 {
1369 	struct fsldma_device *fdev;
1370 	unsigned int i;
1371 
1372 	fdev = platform_get_drvdata(op);
1373 	dma_async_device_unregister(&fdev->common);
1374 
1375 	fsldma_free_irqs(fdev);
1376 
1377 	for (i = 0; i < FSL_DMA_MAX_CHANS_PER_DEVICE; i++) {
1378 		if (fdev->chan[i])
1379 			fsl_dma_chan_remove(fdev->chan[i]);
1380 	}
1381 
1382 	iounmap(fdev->regs);
1383 	kfree(fdev);
1384 
1385 	return 0;
1386 }
1387 
1388 static const struct of_device_id fsldma_of_ids[] = {
1389 	{ .compatible = "fsl,elo3-dma", },
1390 	{ .compatible = "fsl,eloplus-dma", },
1391 	{ .compatible = "fsl,elo-dma", },
1392 	{}
1393 };
1394 
1395 static struct platform_driver fsldma_of_driver = {
1396 	.driver = {
1397 		.name = "fsl-elo-dma",
1398 		.owner = THIS_MODULE,
1399 		.of_match_table = fsldma_of_ids,
1400 	},
1401 	.probe = fsldma_of_probe,
1402 	.remove = fsldma_of_remove,
1403 };
1404 
1405 /*----------------------------------------------------------------------------*/
1406 /* Module Init / Exit                                                         */
1407 /*----------------------------------------------------------------------------*/
1408 
1409 static __init int fsldma_init(void)
1410 {
1411 	pr_info("Freescale Elo series DMA driver\n");
1412 	return platform_driver_register(&fsldma_of_driver);
1413 }
1414 
1415 static void __exit fsldma_exit(void)
1416 {
1417 	platform_driver_unregister(&fsldma_of_driver);
1418 }
1419 
1420 subsys_initcall(fsldma_init);
1421 module_exit(fsldma_exit);
1422 
1423 MODULE_DESCRIPTION("Freescale Elo series DMA driver");
1424 MODULE_LICENSE("GPL");
1425