xref: /linux/drivers/dma/ep93xx_dma.c (revision cff4fa8415a3224a5abdd2b1dd7f431e4ea49366)
1 /*
2  * Driver for the Cirrus Logic EP93xx DMA Controller
3  *
4  * Copyright (C) 2011 Mika Westerberg
5  *
6  * DMA M2P implementation is based on the original
7  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8  *
9  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10  *   Copyright (C) 2006 Applied Data Systems
11  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12  *
13  * This driver is based on dw_dmac and amba-pl08x drivers.
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19  */
20 
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/platform_device.h>
26 #include <linux/slab.h>
27 
28 #include <mach/dma.h>
29 
30 /* M2P registers */
31 #define M2P_CONTROL			0x0000
32 #define M2P_CONTROL_STALLINT		BIT(0)
33 #define M2P_CONTROL_NFBINT		BIT(1)
34 #define M2P_CONTROL_CH_ERROR_INT	BIT(3)
35 #define M2P_CONTROL_ENABLE		BIT(4)
36 #define M2P_CONTROL_ICE			BIT(6)
37 
38 #define M2P_INTERRUPT			0x0004
39 #define M2P_INTERRUPT_STALL		BIT(0)
40 #define M2P_INTERRUPT_NFB		BIT(1)
41 #define M2P_INTERRUPT_ERROR		BIT(3)
42 
43 #define M2P_PPALLOC			0x0008
44 #define M2P_STATUS			0x000c
45 
46 #define M2P_MAXCNT0			0x0020
47 #define M2P_BASE0			0x0024
48 #define M2P_MAXCNT1			0x0030
49 #define M2P_BASE1			0x0034
50 
51 #define M2P_STATE_IDLE			0
52 #define M2P_STATE_STALL			1
53 #define M2P_STATE_ON			2
54 #define M2P_STATE_NEXT			3
55 
56 /* M2M registers */
57 #define M2M_CONTROL			0x0000
58 #define M2M_CONTROL_DONEINT		BIT(2)
59 #define M2M_CONTROL_ENABLE		BIT(3)
60 #define M2M_CONTROL_START		BIT(4)
61 #define M2M_CONTROL_DAH			BIT(11)
62 #define M2M_CONTROL_SAH			BIT(12)
63 #define M2M_CONTROL_PW_SHIFT		9
64 #define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
65 #define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
66 #define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
67 #define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_TM_SHIFT		13
69 #define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
70 #define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
71 #define M2M_CONTROL_RSS_SHIFT		22
72 #define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
73 #define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
74 #define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
75 #define M2M_CONTROL_NO_HDSK		BIT(24)
76 #define M2M_CONTROL_PWSC_SHIFT		25
77 
78 #define M2M_INTERRUPT			0x0004
79 #define M2M_INTERRUPT_DONEINT		BIT(1)
80 
81 #define M2M_BCR0			0x0010
82 #define M2M_BCR1			0x0014
83 #define M2M_SAR_BASE0			0x0018
84 #define M2M_SAR_BASE1			0x001c
85 #define M2M_DAR_BASE0			0x002c
86 #define M2M_DAR_BASE1			0x0030
87 
88 #define DMA_MAX_CHAN_BYTES		0xffff
89 #define DMA_MAX_CHAN_DESCRIPTORS	32
90 
91 struct ep93xx_dma_engine;
92 
93 /**
94  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
95  * @src_addr: source address of the transaction
96  * @dst_addr: destination address of the transaction
97  * @size: size of the transaction (in bytes)
98  * @complete: this descriptor is completed
99  * @txd: dmaengine API descriptor
100  * @tx_list: list of linked descriptors
101  * @node: link used for putting this into a channel queue
102  */
103 struct ep93xx_dma_desc {
104 	u32				src_addr;
105 	u32				dst_addr;
106 	size_t				size;
107 	bool				complete;
108 	struct dma_async_tx_descriptor	txd;
109 	struct list_head		tx_list;
110 	struct list_head		node;
111 };
112 
113 /**
114  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
115  * @chan: dmaengine API channel
116  * @edma: pointer to to the engine device
117  * @regs: memory mapped registers
118  * @irq: interrupt number of the channel
119  * @clk: clock used by this channel
120  * @tasklet: channel specific tasklet used for callbacks
121  * @lock: lock protecting the fields following
122  * @flags: flags for the channel
123  * @buffer: which buffer to use next (0/1)
124  * @last_completed: last completed cookie value
125  * @active: flattened chain of descriptors currently being processed
126  * @queue: pending descriptors which are handled next
127  * @free_list: list of free descriptors which can be used
128  * @runtime_addr: physical address currently used as dest/src (M2M only). This
129  *                is set via %DMA_SLAVE_CONFIG before slave operation is
130  *                prepared
131  * @runtime_ctrl: M2M runtime values for the control register.
132  *
133  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
134  * will have slightly different scheme here: @active points to a head of
135  * flattened DMA descriptor chain.
136  *
137  * @queue holds pending transactions. These are linked through the first
138  * descriptor in the chain. When a descriptor is moved to the @active queue,
139  * the first and chained descriptors are flattened into a single list.
140  *
141  * @chan.private holds pointer to &struct ep93xx_dma_data which contains
142  * necessary channel configuration information. For memcpy channels this must
143  * be %NULL.
144  */
145 struct ep93xx_dma_chan {
146 	struct dma_chan			chan;
147 	const struct ep93xx_dma_engine	*edma;
148 	void __iomem			*regs;
149 	int				irq;
150 	struct clk			*clk;
151 	struct tasklet_struct		tasklet;
152 	/* protects the fields following */
153 	spinlock_t			lock;
154 	unsigned long			flags;
155 /* Channel is configured for cyclic transfers */
156 #define EP93XX_DMA_IS_CYCLIC		0
157 
158 	int				buffer;
159 	dma_cookie_t			last_completed;
160 	struct list_head		active;
161 	struct list_head		queue;
162 	struct list_head		free_list;
163 	u32				runtime_addr;
164 	u32				runtime_ctrl;
165 };
166 
167 /**
168  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
169  * @dma_dev: holds the dmaengine device
170  * @m2m: is this an M2M or M2P device
171  * @hw_setup: method which sets the channel up for operation
172  * @hw_shutdown: shuts the channel down and flushes whatever is left
173  * @hw_submit: pushes active descriptor(s) to the hardware
174  * @hw_interrupt: handle the interrupt
175  * @num_channels: number of channels for this instance
176  * @channels: array of channels
177  *
178  * There is one instance of this struct for the M2P channels and one for the
179  * M2M channels. hw_xxx() methods are used to perform operations which are
180  * different on M2M and M2P channels. These methods are called with channel
181  * lock held and interrupts disabled so they cannot sleep.
182  */
183 struct ep93xx_dma_engine {
184 	struct dma_device	dma_dev;
185 	bool			m2m;
186 	int			(*hw_setup)(struct ep93xx_dma_chan *);
187 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
188 	void			(*hw_submit)(struct ep93xx_dma_chan *);
189 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
190 #define INTERRUPT_UNKNOWN	0
191 #define INTERRUPT_DONE		1
192 #define INTERRUPT_NEXT_BUFFER	2
193 
194 	size_t			num_channels;
195 	struct ep93xx_dma_chan	channels[];
196 };
197 
198 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
199 {
200 	return &edmac->chan.dev->device;
201 }
202 
203 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
204 {
205 	return container_of(chan, struct ep93xx_dma_chan, chan);
206 }
207 
208 /**
209  * ep93xx_dma_set_active - set new active descriptor chain
210  * @edmac: channel
211  * @desc: head of the new active descriptor chain
212  *
213  * Sets @desc to be the head of the new active descriptor chain. This is the
214  * chain which is processed next. The active list must be empty before calling
215  * this function.
216  *
217  * Called with @edmac->lock held and interrupts disabled.
218  */
219 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
220 				  struct ep93xx_dma_desc *desc)
221 {
222 	BUG_ON(!list_empty(&edmac->active));
223 
224 	list_add_tail(&desc->node, &edmac->active);
225 
226 	/* Flatten the @desc->tx_list chain into @edmac->active list */
227 	while (!list_empty(&desc->tx_list)) {
228 		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
229 			struct ep93xx_dma_desc, node);
230 
231 		/*
232 		 * We copy the callback parameters from the first descriptor
233 		 * to all the chained descriptors. This way we can call the
234 		 * callback without having to find out the first descriptor in
235 		 * the chain. Useful for cyclic transfers.
236 		 */
237 		d->txd.callback = desc->txd.callback;
238 		d->txd.callback_param = desc->txd.callback_param;
239 
240 		list_move_tail(&d->node, &edmac->active);
241 	}
242 }
243 
244 /* Called with @edmac->lock held and interrupts disabled */
245 static struct ep93xx_dma_desc *
246 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
247 {
248 	return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
249 }
250 
251 /**
252  * ep93xx_dma_advance_active - advances to the next active descriptor
253  * @edmac: channel
254  *
255  * Function advances active descriptor to the next in the @edmac->active and
256  * returns %true if we still have descriptors in the chain to process.
257  * Otherwise returns %false.
258  *
259  * When the channel is in cyclic mode always returns %true.
260  *
261  * Called with @edmac->lock held and interrupts disabled.
262  */
263 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
264 {
265 	list_rotate_left(&edmac->active);
266 
267 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
268 		return true;
269 
270 	/*
271 	 * If txd.cookie is set it means that we are back in the first
272 	 * descriptor in the chain and hence done with it.
273 	 */
274 	return !ep93xx_dma_get_active(edmac)->txd.cookie;
275 }
276 
277 /*
278  * M2P DMA implementation
279  */
280 
281 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
282 {
283 	writel(control, edmac->regs + M2P_CONTROL);
284 	/*
285 	 * EP93xx User's Guide states that we must perform a dummy read after
286 	 * write to the control register.
287 	 */
288 	readl(edmac->regs + M2P_CONTROL);
289 }
290 
291 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
292 {
293 	struct ep93xx_dma_data *data = edmac->chan.private;
294 	u32 control;
295 
296 	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
297 
298 	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
299 		| M2P_CONTROL_ENABLE;
300 	m2p_set_control(edmac, control);
301 
302 	return 0;
303 }
304 
305 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
306 {
307 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
308 }
309 
310 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
311 {
312 	u32 control;
313 
314 	control = readl(edmac->regs + M2P_CONTROL);
315 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
316 	m2p_set_control(edmac, control);
317 
318 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
319 		cpu_relax();
320 
321 	m2p_set_control(edmac, 0);
322 
323 	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
324 		cpu_relax();
325 }
326 
327 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
328 {
329 	struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
330 	u32 bus_addr;
331 
332 	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
333 		bus_addr = desc->src_addr;
334 	else
335 		bus_addr = desc->dst_addr;
336 
337 	if (edmac->buffer == 0) {
338 		writel(desc->size, edmac->regs + M2P_MAXCNT0);
339 		writel(bus_addr, edmac->regs + M2P_BASE0);
340 	} else {
341 		writel(desc->size, edmac->regs + M2P_MAXCNT1);
342 		writel(bus_addr, edmac->regs + M2P_BASE1);
343 	}
344 
345 	edmac->buffer ^= 1;
346 }
347 
348 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
349 {
350 	u32 control = readl(edmac->regs + M2P_CONTROL);
351 
352 	m2p_fill_desc(edmac);
353 	control |= M2P_CONTROL_STALLINT;
354 
355 	if (ep93xx_dma_advance_active(edmac)) {
356 		m2p_fill_desc(edmac);
357 		control |= M2P_CONTROL_NFBINT;
358 	}
359 
360 	m2p_set_control(edmac, control);
361 }
362 
363 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
364 {
365 	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
366 	u32 control;
367 
368 	if (irq_status & M2P_INTERRUPT_ERROR) {
369 		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
370 
371 		/* Clear the error interrupt */
372 		writel(1, edmac->regs + M2P_INTERRUPT);
373 
374 		/*
375 		 * It seems that there is no easy way of reporting errors back
376 		 * to client so we just report the error here and continue as
377 		 * usual.
378 		 *
379 		 * Revisit this when there is a mechanism to report back the
380 		 * errors.
381 		 */
382 		dev_err(chan2dev(edmac),
383 			"DMA transfer failed! Details:\n"
384 			"\tcookie	: %d\n"
385 			"\tsrc_addr	: 0x%08x\n"
386 			"\tdst_addr	: 0x%08x\n"
387 			"\tsize		: %zu\n",
388 			desc->txd.cookie, desc->src_addr, desc->dst_addr,
389 			desc->size);
390 	}
391 
392 	switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
393 	case M2P_INTERRUPT_STALL:
394 		/* Disable interrupts */
395 		control = readl(edmac->regs + M2P_CONTROL);
396 		control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
397 		m2p_set_control(edmac, control);
398 
399 		return INTERRUPT_DONE;
400 
401 	case M2P_INTERRUPT_NFB:
402 		if (ep93xx_dma_advance_active(edmac))
403 			m2p_fill_desc(edmac);
404 
405 		return INTERRUPT_NEXT_BUFFER;
406 	}
407 
408 	return INTERRUPT_UNKNOWN;
409 }
410 
411 /*
412  * M2M DMA implementation
413  *
414  * For the M2M transfers we don't use NFB at all. This is because it simply
415  * doesn't work well with memcpy transfers. When you submit both buffers it is
416  * extremely unlikely that you get an NFB interrupt, but it instead reports
417  * DONE interrupt and both buffers are already transferred which means that we
418  * weren't able to update the next buffer.
419  *
420  * So for now we "simulate" NFB by just submitting buffer after buffer
421  * without double buffering.
422  */
423 
424 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
425 {
426 	const struct ep93xx_dma_data *data = edmac->chan.private;
427 	u32 control = 0;
428 
429 	if (!data) {
430 		/* This is memcpy channel, nothing to configure */
431 		writel(control, edmac->regs + M2M_CONTROL);
432 		return 0;
433 	}
434 
435 	switch (data->port) {
436 	case EP93XX_DMA_SSP:
437 		/*
438 		 * This was found via experimenting - anything less than 5
439 		 * causes the channel to perform only a partial transfer which
440 		 * leads to problems since we don't get DONE interrupt then.
441 		 */
442 		control = (5 << M2M_CONTROL_PWSC_SHIFT);
443 		control |= M2M_CONTROL_NO_HDSK;
444 
445 		if (data->direction == DMA_TO_DEVICE) {
446 			control |= M2M_CONTROL_DAH;
447 			control |= M2M_CONTROL_TM_TX;
448 			control |= M2M_CONTROL_RSS_SSPTX;
449 		} else {
450 			control |= M2M_CONTROL_SAH;
451 			control |= M2M_CONTROL_TM_RX;
452 			control |= M2M_CONTROL_RSS_SSPRX;
453 		}
454 		break;
455 
456 	case EP93XX_DMA_IDE:
457 		/*
458 		 * This IDE part is totally untested. Values below are taken
459 		 * from the EP93xx Users's Guide and might not be correct.
460 		 */
461 		control |= M2M_CONTROL_NO_HDSK;
462 		control |= M2M_CONTROL_RSS_IDE;
463 		control |= M2M_CONTROL_PW_16;
464 
465 		if (data->direction == DMA_TO_DEVICE) {
466 			/* Worst case from the UG */
467 			control = (3 << M2M_CONTROL_PWSC_SHIFT);
468 			control |= M2M_CONTROL_DAH;
469 			control |= M2M_CONTROL_TM_TX;
470 		} else {
471 			control = (2 << M2M_CONTROL_PWSC_SHIFT);
472 			control |= M2M_CONTROL_SAH;
473 			control |= M2M_CONTROL_TM_RX;
474 		}
475 		break;
476 
477 	default:
478 		return -EINVAL;
479 	}
480 
481 	writel(control, edmac->regs + M2M_CONTROL);
482 	return 0;
483 }
484 
485 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
486 {
487 	/* Just disable the channel */
488 	writel(0, edmac->regs + M2M_CONTROL);
489 }
490 
491 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
492 {
493 	struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
494 
495 	if (edmac->buffer == 0) {
496 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
497 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
498 		writel(desc->size, edmac->regs + M2M_BCR0);
499 	} else {
500 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
501 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
502 		writel(desc->size, edmac->regs + M2M_BCR1);
503 	}
504 
505 	edmac->buffer ^= 1;
506 }
507 
508 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
509 {
510 	struct ep93xx_dma_data *data = edmac->chan.private;
511 	u32 control = readl(edmac->regs + M2M_CONTROL);
512 
513 	/*
514 	 * Since we allow clients to configure PW (peripheral width) we always
515 	 * clear PW bits here and then set them according what is given in
516 	 * the runtime configuration.
517 	 */
518 	control &= ~M2M_CONTROL_PW_MASK;
519 	control |= edmac->runtime_ctrl;
520 
521 	m2m_fill_desc(edmac);
522 	control |= M2M_CONTROL_DONEINT;
523 
524 	/*
525 	 * Now we can finally enable the channel. For M2M channel this must be
526 	 * done _after_ the BCRx registers are programmed.
527 	 */
528 	control |= M2M_CONTROL_ENABLE;
529 	writel(control, edmac->regs + M2M_CONTROL);
530 
531 	if (!data) {
532 		/*
533 		 * For memcpy channels the software trigger must be asserted
534 		 * in order to start the memcpy operation.
535 		 */
536 		control |= M2M_CONTROL_START;
537 		writel(control, edmac->regs + M2M_CONTROL);
538 	}
539 }
540 
541 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
542 {
543 	u32 control;
544 
545 	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
546 		return INTERRUPT_UNKNOWN;
547 
548 	/* Clear the DONE bit */
549 	writel(0, edmac->regs + M2M_INTERRUPT);
550 
551 	/* Disable interrupts and the channel */
552 	control = readl(edmac->regs + M2M_CONTROL);
553 	control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
554 	writel(control, edmac->regs + M2M_CONTROL);
555 
556 	/*
557 	 * Since we only get DONE interrupt we have to find out ourselves
558 	 * whether there still is something to process. So we try to advance
559 	 * the chain an see whether it succeeds.
560 	 */
561 	if (ep93xx_dma_advance_active(edmac)) {
562 		edmac->edma->hw_submit(edmac);
563 		return INTERRUPT_NEXT_BUFFER;
564 	}
565 
566 	return INTERRUPT_DONE;
567 }
568 
569 /*
570  * DMA engine API implementation
571  */
572 
573 static struct ep93xx_dma_desc *
574 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
575 {
576 	struct ep93xx_dma_desc *desc, *_desc;
577 	struct ep93xx_dma_desc *ret = NULL;
578 	unsigned long flags;
579 
580 	spin_lock_irqsave(&edmac->lock, flags);
581 	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
582 		if (async_tx_test_ack(&desc->txd)) {
583 			list_del_init(&desc->node);
584 
585 			/* Re-initialize the descriptor */
586 			desc->src_addr = 0;
587 			desc->dst_addr = 0;
588 			desc->size = 0;
589 			desc->complete = false;
590 			desc->txd.cookie = 0;
591 			desc->txd.callback = NULL;
592 			desc->txd.callback_param = NULL;
593 
594 			ret = desc;
595 			break;
596 		}
597 	}
598 	spin_unlock_irqrestore(&edmac->lock, flags);
599 	return ret;
600 }
601 
602 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
603 				struct ep93xx_dma_desc *desc)
604 {
605 	if (desc) {
606 		unsigned long flags;
607 
608 		spin_lock_irqsave(&edmac->lock, flags);
609 		list_splice_init(&desc->tx_list, &edmac->free_list);
610 		list_add(&desc->node, &edmac->free_list);
611 		spin_unlock_irqrestore(&edmac->lock, flags);
612 	}
613 }
614 
615 /**
616  * ep93xx_dma_advance_work - start processing the next pending transaction
617  * @edmac: channel
618  *
619  * If we have pending transactions queued and we are currently idling, this
620  * function takes the next queued transaction from the @edmac->queue and
621  * pushes it to the hardware for execution.
622  */
623 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
624 {
625 	struct ep93xx_dma_desc *new;
626 	unsigned long flags;
627 
628 	spin_lock_irqsave(&edmac->lock, flags);
629 	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
630 		spin_unlock_irqrestore(&edmac->lock, flags);
631 		return;
632 	}
633 
634 	/* Take the next descriptor from the pending queue */
635 	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
636 	list_del_init(&new->node);
637 
638 	ep93xx_dma_set_active(edmac, new);
639 
640 	/* Push it to the hardware */
641 	edmac->edma->hw_submit(edmac);
642 	spin_unlock_irqrestore(&edmac->lock, flags);
643 }
644 
645 static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
646 {
647 	struct device *dev = desc->txd.chan->device->dev;
648 
649 	if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
650 		if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
651 			dma_unmap_single(dev, desc->src_addr, desc->size,
652 					 DMA_TO_DEVICE);
653 		else
654 			dma_unmap_page(dev, desc->src_addr, desc->size,
655 				       DMA_TO_DEVICE);
656 	}
657 	if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
658 		if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
659 			dma_unmap_single(dev, desc->dst_addr, desc->size,
660 					 DMA_FROM_DEVICE);
661 		else
662 			dma_unmap_page(dev, desc->dst_addr, desc->size,
663 				       DMA_FROM_DEVICE);
664 	}
665 }
666 
667 static void ep93xx_dma_tasklet(unsigned long data)
668 {
669 	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
670 	struct ep93xx_dma_desc *desc, *d;
671 	dma_async_tx_callback callback;
672 	void *callback_param;
673 	LIST_HEAD(list);
674 
675 	spin_lock_irq(&edmac->lock);
676 	desc = ep93xx_dma_get_active(edmac);
677 	if (desc->complete) {
678 		edmac->last_completed = desc->txd.cookie;
679 		list_splice_init(&edmac->active, &list);
680 	}
681 	spin_unlock_irq(&edmac->lock);
682 
683 	/* Pick up the next descriptor from the queue */
684 	ep93xx_dma_advance_work(edmac);
685 
686 	callback = desc->txd.callback;
687 	callback_param = desc->txd.callback_param;
688 
689 	/* Now we can release all the chained descriptors */
690 	list_for_each_entry_safe(desc, d, &list, node) {
691 		/*
692 		 * For the memcpy channels the API requires us to unmap the
693 		 * buffers unless requested otherwise.
694 		 */
695 		if (!edmac->chan.private)
696 			ep93xx_dma_unmap_buffers(desc);
697 
698 		ep93xx_dma_desc_put(edmac, desc);
699 	}
700 
701 	if (callback)
702 		callback(callback_param);
703 }
704 
705 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
706 {
707 	struct ep93xx_dma_chan *edmac = dev_id;
708 	irqreturn_t ret = IRQ_HANDLED;
709 
710 	spin_lock(&edmac->lock);
711 
712 	switch (edmac->edma->hw_interrupt(edmac)) {
713 	case INTERRUPT_DONE:
714 		ep93xx_dma_get_active(edmac)->complete = true;
715 		tasklet_schedule(&edmac->tasklet);
716 		break;
717 
718 	case INTERRUPT_NEXT_BUFFER:
719 		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
720 			tasklet_schedule(&edmac->tasklet);
721 		break;
722 
723 	default:
724 		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
725 		ret = IRQ_NONE;
726 		break;
727 	}
728 
729 	spin_unlock(&edmac->lock);
730 	return ret;
731 }
732 
733 /**
734  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
735  * @tx: descriptor to be executed
736  *
737  * Function will execute given descriptor on the hardware or if the hardware
738  * is busy, queue the descriptor to be executed later on. Returns cookie which
739  * can be used to poll the status of the descriptor.
740  */
741 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
742 {
743 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
744 	struct ep93xx_dma_desc *desc;
745 	dma_cookie_t cookie;
746 	unsigned long flags;
747 
748 	spin_lock_irqsave(&edmac->lock, flags);
749 
750 	cookie = edmac->chan.cookie;
751 
752 	if (++cookie < 0)
753 		cookie = 1;
754 
755 	desc = container_of(tx, struct ep93xx_dma_desc, txd);
756 
757 	edmac->chan.cookie = cookie;
758 	desc->txd.cookie = cookie;
759 
760 	/*
761 	 * If nothing is currently prosessed, we push this descriptor
762 	 * directly to the hardware. Otherwise we put the descriptor
763 	 * to the pending queue.
764 	 */
765 	if (list_empty(&edmac->active)) {
766 		ep93xx_dma_set_active(edmac, desc);
767 		edmac->edma->hw_submit(edmac);
768 	} else {
769 		list_add_tail(&desc->node, &edmac->queue);
770 	}
771 
772 	spin_unlock_irqrestore(&edmac->lock, flags);
773 	return cookie;
774 }
775 
776 /**
777  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
778  * @chan: channel to allocate resources
779  *
780  * Function allocates necessary resources for the given DMA channel and
781  * returns number of allocated descriptors for the channel. Negative errno
782  * is returned in case of failure.
783  */
784 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
785 {
786 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
787 	struct ep93xx_dma_data *data = chan->private;
788 	const char *name = dma_chan_name(chan);
789 	int ret, i;
790 
791 	/* Sanity check the channel parameters */
792 	if (!edmac->edma->m2m) {
793 		if (!data)
794 			return -EINVAL;
795 		if (data->port < EP93XX_DMA_I2S1 ||
796 		    data->port > EP93XX_DMA_IRDA)
797 			return -EINVAL;
798 		if (data->direction != ep93xx_dma_chan_direction(chan))
799 			return -EINVAL;
800 	} else {
801 		if (data) {
802 			switch (data->port) {
803 			case EP93XX_DMA_SSP:
804 			case EP93XX_DMA_IDE:
805 				if (data->direction != DMA_TO_DEVICE &&
806 				    data->direction != DMA_FROM_DEVICE)
807 					return -EINVAL;
808 				break;
809 			default:
810 				return -EINVAL;
811 			}
812 		}
813 	}
814 
815 	if (data && data->name)
816 		name = data->name;
817 
818 	ret = clk_enable(edmac->clk);
819 	if (ret)
820 		return ret;
821 
822 	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
823 	if (ret)
824 		goto fail_clk_disable;
825 
826 	spin_lock_irq(&edmac->lock);
827 	edmac->last_completed = 1;
828 	edmac->chan.cookie = 1;
829 	ret = edmac->edma->hw_setup(edmac);
830 	spin_unlock_irq(&edmac->lock);
831 
832 	if (ret)
833 		goto fail_free_irq;
834 
835 	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
836 		struct ep93xx_dma_desc *desc;
837 
838 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
839 		if (!desc) {
840 			dev_warn(chan2dev(edmac), "not enough descriptors\n");
841 			break;
842 		}
843 
844 		INIT_LIST_HEAD(&desc->tx_list);
845 
846 		dma_async_tx_descriptor_init(&desc->txd, chan);
847 		desc->txd.flags = DMA_CTRL_ACK;
848 		desc->txd.tx_submit = ep93xx_dma_tx_submit;
849 
850 		ep93xx_dma_desc_put(edmac, desc);
851 	}
852 
853 	return i;
854 
855 fail_free_irq:
856 	free_irq(edmac->irq, edmac);
857 fail_clk_disable:
858 	clk_disable(edmac->clk);
859 
860 	return ret;
861 }
862 
863 /**
864  * ep93xx_dma_free_chan_resources - release resources for the channel
865  * @chan: channel
866  *
867  * Function releases all the resources allocated for the given channel.
868  * The channel must be idle when this is called.
869  */
870 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
871 {
872 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
873 	struct ep93xx_dma_desc *desc, *d;
874 	unsigned long flags;
875 	LIST_HEAD(list);
876 
877 	BUG_ON(!list_empty(&edmac->active));
878 	BUG_ON(!list_empty(&edmac->queue));
879 
880 	spin_lock_irqsave(&edmac->lock, flags);
881 	edmac->edma->hw_shutdown(edmac);
882 	edmac->runtime_addr = 0;
883 	edmac->runtime_ctrl = 0;
884 	edmac->buffer = 0;
885 	list_splice_init(&edmac->free_list, &list);
886 	spin_unlock_irqrestore(&edmac->lock, flags);
887 
888 	list_for_each_entry_safe(desc, d, &list, node)
889 		kfree(desc);
890 
891 	clk_disable(edmac->clk);
892 	free_irq(edmac->irq, edmac);
893 }
894 
895 /**
896  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
897  * @chan: channel
898  * @dest: destination bus address
899  * @src: source bus address
900  * @len: size of the transaction
901  * @flags: flags for the descriptor
902  *
903  * Returns a valid DMA descriptor or %NULL in case of failure.
904  */
905 static struct dma_async_tx_descriptor *
906 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
907 			   dma_addr_t src, size_t len, unsigned long flags)
908 {
909 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
910 	struct ep93xx_dma_desc *desc, *first;
911 	size_t bytes, offset;
912 
913 	first = NULL;
914 	for (offset = 0; offset < len; offset += bytes) {
915 		desc = ep93xx_dma_desc_get(edmac);
916 		if (!desc) {
917 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
918 			goto fail;
919 		}
920 
921 		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
922 
923 		desc->src_addr = src + offset;
924 		desc->dst_addr = dest + offset;
925 		desc->size = bytes;
926 
927 		if (!first)
928 			first = desc;
929 		else
930 			list_add_tail(&desc->node, &first->tx_list);
931 	}
932 
933 	first->txd.cookie = -EBUSY;
934 	first->txd.flags = flags;
935 
936 	return &first->txd;
937 fail:
938 	ep93xx_dma_desc_put(edmac, first);
939 	return NULL;
940 }
941 
942 /**
943  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
944  * @chan: channel
945  * @sgl: list of buffers to transfer
946  * @sg_len: number of entries in @sgl
947  * @dir: direction of tha DMA transfer
948  * @flags: flags for the descriptor
949  *
950  * Returns a valid DMA descriptor or %NULL in case of failure.
951  */
952 static struct dma_async_tx_descriptor *
953 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
954 			 unsigned int sg_len, enum dma_data_direction dir,
955 			 unsigned long flags)
956 {
957 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
958 	struct ep93xx_dma_desc *desc, *first;
959 	struct scatterlist *sg;
960 	int i;
961 
962 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
963 		dev_warn(chan2dev(edmac),
964 			 "channel was configured with different direction\n");
965 		return NULL;
966 	}
967 
968 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
969 		dev_warn(chan2dev(edmac),
970 			 "channel is already used for cyclic transfers\n");
971 		return NULL;
972 	}
973 
974 	first = NULL;
975 	for_each_sg(sgl, sg, sg_len, i) {
976 		size_t sg_len = sg_dma_len(sg);
977 
978 		if (sg_len > DMA_MAX_CHAN_BYTES) {
979 			dev_warn(chan2dev(edmac), "too big transfer size %d\n",
980 				 sg_len);
981 			goto fail;
982 		}
983 
984 		desc = ep93xx_dma_desc_get(edmac);
985 		if (!desc) {
986 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
987 			goto fail;
988 		}
989 
990 		if (dir == DMA_TO_DEVICE) {
991 			desc->src_addr = sg_dma_address(sg);
992 			desc->dst_addr = edmac->runtime_addr;
993 		} else {
994 			desc->src_addr = edmac->runtime_addr;
995 			desc->dst_addr = sg_dma_address(sg);
996 		}
997 		desc->size = sg_len;
998 
999 		if (!first)
1000 			first = desc;
1001 		else
1002 			list_add_tail(&desc->node, &first->tx_list);
1003 	}
1004 
1005 	first->txd.cookie = -EBUSY;
1006 	first->txd.flags = flags;
1007 
1008 	return &first->txd;
1009 
1010 fail:
1011 	ep93xx_dma_desc_put(edmac, first);
1012 	return NULL;
1013 }
1014 
1015 /**
1016  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1017  * @chan: channel
1018  * @dma_addr: DMA mapped address of the buffer
1019  * @buf_len: length of the buffer (in bytes)
1020  * @period_len: lenght of a single period
1021  * @dir: direction of the operation
1022  *
1023  * Prepares a descriptor for cyclic DMA operation. This means that once the
1024  * descriptor is submitted, we will be submitting in a @period_len sized
1025  * buffers and calling callback once the period has been elapsed. Transfer
1026  * terminates only when client calls dmaengine_terminate_all() for this
1027  * channel.
1028  *
1029  * Returns a valid DMA descriptor or %NULL in case of failure.
1030  */
1031 static struct dma_async_tx_descriptor *
1032 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1033 			   size_t buf_len, size_t period_len,
1034 			   enum dma_data_direction dir)
1035 {
1036 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1037 	struct ep93xx_dma_desc *desc, *first;
1038 	size_t offset = 0;
1039 
1040 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1041 		dev_warn(chan2dev(edmac),
1042 			 "channel was configured with different direction\n");
1043 		return NULL;
1044 	}
1045 
1046 	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1047 		dev_warn(chan2dev(edmac),
1048 			 "channel is already used for cyclic transfers\n");
1049 		return NULL;
1050 	}
1051 
1052 	if (period_len > DMA_MAX_CHAN_BYTES) {
1053 		dev_warn(chan2dev(edmac), "too big period length %d\n",
1054 			 period_len);
1055 		return NULL;
1056 	}
1057 
1058 	/* Split the buffer into period size chunks */
1059 	first = NULL;
1060 	for (offset = 0; offset < buf_len; offset += period_len) {
1061 		desc = ep93xx_dma_desc_get(edmac);
1062 		if (!desc) {
1063 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1064 			goto fail;
1065 		}
1066 
1067 		if (dir == DMA_TO_DEVICE) {
1068 			desc->src_addr = dma_addr + offset;
1069 			desc->dst_addr = edmac->runtime_addr;
1070 		} else {
1071 			desc->src_addr = edmac->runtime_addr;
1072 			desc->dst_addr = dma_addr + offset;
1073 		}
1074 
1075 		desc->size = period_len;
1076 
1077 		if (!first)
1078 			first = desc;
1079 		else
1080 			list_add_tail(&desc->node, &first->tx_list);
1081 	}
1082 
1083 	first->txd.cookie = -EBUSY;
1084 
1085 	return &first->txd;
1086 
1087 fail:
1088 	ep93xx_dma_desc_put(edmac, first);
1089 	return NULL;
1090 }
1091 
1092 /**
1093  * ep93xx_dma_terminate_all - terminate all transactions
1094  * @edmac: channel
1095  *
1096  * Stops all DMA transactions. All descriptors are put back to the
1097  * @edmac->free_list and callbacks are _not_ called.
1098  */
1099 static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1100 {
1101 	struct ep93xx_dma_desc *desc, *_d;
1102 	unsigned long flags;
1103 	LIST_HEAD(list);
1104 
1105 	spin_lock_irqsave(&edmac->lock, flags);
1106 	/* First we disable and flush the DMA channel */
1107 	edmac->edma->hw_shutdown(edmac);
1108 	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1109 	list_splice_init(&edmac->active, &list);
1110 	list_splice_init(&edmac->queue, &list);
1111 	/*
1112 	 * We then re-enable the channel. This way we can continue submitting
1113 	 * the descriptors by just calling ->hw_submit() again.
1114 	 */
1115 	edmac->edma->hw_setup(edmac);
1116 	spin_unlock_irqrestore(&edmac->lock, flags);
1117 
1118 	list_for_each_entry_safe(desc, _d, &list, node)
1119 		ep93xx_dma_desc_put(edmac, desc);
1120 
1121 	return 0;
1122 }
1123 
1124 static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1125 				   struct dma_slave_config *config)
1126 {
1127 	enum dma_slave_buswidth width;
1128 	unsigned long flags;
1129 	u32 addr, ctrl;
1130 
1131 	if (!edmac->edma->m2m)
1132 		return -EINVAL;
1133 
1134 	switch (config->direction) {
1135 	case DMA_FROM_DEVICE:
1136 		width = config->src_addr_width;
1137 		addr = config->src_addr;
1138 		break;
1139 
1140 	case DMA_TO_DEVICE:
1141 		width = config->dst_addr_width;
1142 		addr = config->dst_addr;
1143 		break;
1144 
1145 	default:
1146 		return -EINVAL;
1147 	}
1148 
1149 	switch (width) {
1150 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1151 		ctrl = 0;
1152 		break;
1153 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1154 		ctrl = M2M_CONTROL_PW_16;
1155 		break;
1156 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1157 		ctrl = M2M_CONTROL_PW_32;
1158 		break;
1159 	default:
1160 		return -EINVAL;
1161 	}
1162 
1163 	spin_lock_irqsave(&edmac->lock, flags);
1164 	edmac->runtime_addr = addr;
1165 	edmac->runtime_ctrl = ctrl;
1166 	spin_unlock_irqrestore(&edmac->lock, flags);
1167 
1168 	return 0;
1169 }
1170 
1171 /**
1172  * ep93xx_dma_control - manipulate all pending operations on a channel
1173  * @chan: channel
1174  * @cmd: control command to perform
1175  * @arg: optional argument
1176  *
1177  * Controls the channel. Function returns %0 in case of success or negative
1178  * error in case of failure.
1179  */
1180 static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1181 			      unsigned long arg)
1182 {
1183 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1184 	struct dma_slave_config *config;
1185 
1186 	switch (cmd) {
1187 	case DMA_TERMINATE_ALL:
1188 		return ep93xx_dma_terminate_all(edmac);
1189 
1190 	case DMA_SLAVE_CONFIG:
1191 		config = (struct dma_slave_config *)arg;
1192 		return ep93xx_dma_slave_config(edmac, config);
1193 
1194 	default:
1195 		break;
1196 	}
1197 
1198 	return -ENOSYS;
1199 }
1200 
1201 /**
1202  * ep93xx_dma_tx_status - check if a transaction is completed
1203  * @chan: channel
1204  * @cookie: transaction specific cookie
1205  * @state: state of the transaction is stored here if given
1206  *
1207  * This function can be used to query state of a given transaction.
1208  */
1209 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1210 					    dma_cookie_t cookie,
1211 					    struct dma_tx_state *state)
1212 {
1213 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1214 	dma_cookie_t last_used, last_completed;
1215 	enum dma_status ret;
1216 	unsigned long flags;
1217 
1218 	spin_lock_irqsave(&edmac->lock, flags);
1219 	last_used = chan->cookie;
1220 	last_completed = edmac->last_completed;
1221 	spin_unlock_irqrestore(&edmac->lock, flags);
1222 
1223 	ret = dma_async_is_complete(cookie, last_completed, last_used);
1224 	dma_set_tx_state(state, last_completed, last_used, 0);
1225 
1226 	return ret;
1227 }
1228 
1229 /**
1230  * ep93xx_dma_issue_pending - push pending transactions to the hardware
1231  * @chan: channel
1232  *
1233  * When this function is called, all pending transactions are pushed to the
1234  * hardware and executed.
1235  */
1236 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1237 {
1238 	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1239 }
1240 
1241 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1242 {
1243 	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1244 	struct ep93xx_dma_engine *edma;
1245 	struct dma_device *dma_dev;
1246 	size_t edma_size;
1247 	int ret, i;
1248 
1249 	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1250 	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1251 	if (!edma)
1252 		return -ENOMEM;
1253 
1254 	dma_dev = &edma->dma_dev;
1255 	edma->m2m = platform_get_device_id(pdev)->driver_data;
1256 	edma->num_channels = pdata->num_channels;
1257 
1258 	INIT_LIST_HEAD(&dma_dev->channels);
1259 	for (i = 0; i < pdata->num_channels; i++) {
1260 		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1261 		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1262 
1263 		edmac->chan.device = dma_dev;
1264 		edmac->regs = cdata->base;
1265 		edmac->irq = cdata->irq;
1266 		edmac->edma = edma;
1267 
1268 		edmac->clk = clk_get(NULL, cdata->name);
1269 		if (IS_ERR(edmac->clk)) {
1270 			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1271 				 cdata->name);
1272 			continue;
1273 		}
1274 
1275 		spin_lock_init(&edmac->lock);
1276 		INIT_LIST_HEAD(&edmac->active);
1277 		INIT_LIST_HEAD(&edmac->queue);
1278 		INIT_LIST_HEAD(&edmac->free_list);
1279 		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1280 			     (unsigned long)edmac);
1281 
1282 		list_add_tail(&edmac->chan.device_node,
1283 			      &dma_dev->channels);
1284 	}
1285 
1286 	dma_cap_zero(dma_dev->cap_mask);
1287 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1288 	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1289 
1290 	dma_dev->dev = &pdev->dev;
1291 	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1292 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1293 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1294 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1295 	dma_dev->device_control = ep93xx_dma_control;
1296 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1297 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1298 
1299 	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1300 
1301 	if (edma->m2m) {
1302 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1303 		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1304 
1305 		edma->hw_setup = m2m_hw_setup;
1306 		edma->hw_shutdown = m2m_hw_shutdown;
1307 		edma->hw_submit = m2m_hw_submit;
1308 		edma->hw_interrupt = m2m_hw_interrupt;
1309 	} else {
1310 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1311 
1312 		edma->hw_setup = m2p_hw_setup;
1313 		edma->hw_shutdown = m2p_hw_shutdown;
1314 		edma->hw_submit = m2p_hw_submit;
1315 		edma->hw_interrupt = m2p_hw_interrupt;
1316 	}
1317 
1318 	ret = dma_async_device_register(dma_dev);
1319 	if (unlikely(ret)) {
1320 		for (i = 0; i < edma->num_channels; i++) {
1321 			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1322 			if (!IS_ERR_OR_NULL(edmac->clk))
1323 				clk_put(edmac->clk);
1324 		}
1325 		kfree(edma);
1326 	} else {
1327 		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1328 			 edma->m2m ? "M" : "P");
1329 	}
1330 
1331 	return ret;
1332 }
1333 
1334 static struct platform_device_id ep93xx_dma_driver_ids[] = {
1335 	{ "ep93xx-dma-m2p", 0 },
1336 	{ "ep93xx-dma-m2m", 1 },
1337 	{ },
1338 };
1339 
1340 static struct platform_driver ep93xx_dma_driver = {
1341 	.driver		= {
1342 		.name	= "ep93xx-dma",
1343 	},
1344 	.id_table	= ep93xx_dma_driver_ids,
1345 };
1346 
1347 static int __init ep93xx_dma_module_init(void)
1348 {
1349 	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1350 }
1351 subsys_initcall(ep93xx_dma_module_init);
1352 
1353 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1354 MODULE_DESCRIPTION("EP93xx DMA driver");
1355 MODULE_LICENSE("GPL");
1356