xref: /linux/drivers/dma/ep93xx_dma.c (revision 56bbd86257f899ced7ef9c58210dda4edbd40871)
1 /*
2  * Driver for the Cirrus Logic EP93xx DMA Controller
3  *
4  * Copyright (C) 2011 Mika Westerberg
5  *
6  * DMA M2P implementation is based on the original
7  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8  *
9  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10  *   Copyright (C) 2006 Applied Data Systems
11  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12  *
13  * This driver is based on dw_dmac and amba-pl08x drivers.
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19  */
20 
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 
29 #include <linux/platform_data/dma-ep93xx.h>
30 
31 #include "dmaengine.h"
32 
33 /* M2P registers */
34 #define M2P_CONTROL			0x0000
35 #define M2P_CONTROL_STALLINT		BIT(0)
36 #define M2P_CONTROL_NFBINT		BIT(1)
37 #define M2P_CONTROL_CH_ERROR_INT	BIT(3)
38 #define M2P_CONTROL_ENABLE		BIT(4)
39 #define M2P_CONTROL_ICE			BIT(6)
40 
41 #define M2P_INTERRUPT			0x0004
42 #define M2P_INTERRUPT_STALL		BIT(0)
43 #define M2P_INTERRUPT_NFB		BIT(1)
44 #define M2P_INTERRUPT_ERROR		BIT(3)
45 
46 #define M2P_PPALLOC			0x0008
47 #define M2P_STATUS			0x000c
48 
49 #define M2P_MAXCNT0			0x0020
50 #define M2P_BASE0			0x0024
51 #define M2P_MAXCNT1			0x0030
52 #define M2P_BASE1			0x0034
53 
54 #define M2P_STATE_IDLE			0
55 #define M2P_STATE_STALL			1
56 #define M2P_STATE_ON			2
57 #define M2P_STATE_NEXT			3
58 
59 /* M2M registers */
60 #define M2M_CONTROL			0x0000
61 #define M2M_CONTROL_DONEINT		BIT(2)
62 #define M2M_CONTROL_ENABLE		BIT(3)
63 #define M2M_CONTROL_START		BIT(4)
64 #define M2M_CONTROL_DAH			BIT(11)
65 #define M2M_CONTROL_SAH			BIT(12)
66 #define M2M_CONTROL_PW_SHIFT		9
67 #define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
70 #define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
71 #define M2M_CONTROL_TM_SHIFT		13
72 #define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
73 #define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
74 #define M2M_CONTROL_NFBINT		BIT(21)
75 #define M2M_CONTROL_RSS_SHIFT		22
76 #define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
77 #define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
78 #define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
79 #define M2M_CONTROL_NO_HDSK		BIT(24)
80 #define M2M_CONTROL_PWSC_SHIFT		25
81 
82 #define M2M_INTERRUPT			0x0004
83 #define M2M_INTERRUPT_MASK		6
84 
85 #define M2M_STATUS			0x000c
86 #define M2M_STATUS_CTL_SHIFT		1
87 #define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
88 #define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
89 #define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
90 #define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
91 #define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
92 #define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
93 #define M2M_STATUS_BUF_SHIFT		4
94 #define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
95 #define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
96 #define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
97 #define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
98 #define M2M_STATUS_DONE			BIT(6)
99 
100 #define M2M_BCR0			0x0010
101 #define M2M_BCR1			0x0014
102 #define M2M_SAR_BASE0			0x0018
103 #define M2M_SAR_BASE1			0x001c
104 #define M2M_DAR_BASE0			0x002c
105 #define M2M_DAR_BASE1			0x0030
106 
107 #define DMA_MAX_CHAN_BYTES		0xffff
108 #define DMA_MAX_CHAN_DESCRIPTORS	32
109 
110 struct ep93xx_dma_engine;
111 
112 /**
113  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
114  * @src_addr: source address of the transaction
115  * @dst_addr: destination address of the transaction
116  * @size: size of the transaction (in bytes)
117  * @complete: this descriptor is completed
118  * @txd: dmaengine API descriptor
119  * @tx_list: list of linked descriptors
120  * @node: link used for putting this into a channel queue
121  */
122 struct ep93xx_dma_desc {
123 	u32				src_addr;
124 	u32				dst_addr;
125 	size_t				size;
126 	bool				complete;
127 	struct dma_async_tx_descriptor	txd;
128 	struct list_head		tx_list;
129 	struct list_head		node;
130 };
131 
132 /**
133  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
134  * @chan: dmaengine API channel
135  * @edma: pointer to to the engine device
136  * @regs: memory mapped registers
137  * @irq: interrupt number of the channel
138  * @clk: clock used by this channel
139  * @tasklet: channel specific tasklet used for callbacks
140  * @lock: lock protecting the fields following
141  * @flags: flags for the channel
142  * @buffer: which buffer to use next (0/1)
143  * @active: flattened chain of descriptors currently being processed
144  * @queue: pending descriptors which are handled next
145  * @free_list: list of free descriptors which can be used
146  * @runtime_addr: physical address currently used as dest/src (M2M only). This
147  *                is set via .device_config before slave operation is
148  *                prepared
149  * @runtime_ctrl: M2M runtime values for the control register.
150  *
151  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
152  * will have slightly different scheme here: @active points to a head of
153  * flattened DMA descriptor chain.
154  *
155  * @queue holds pending transactions. These are linked through the first
156  * descriptor in the chain. When a descriptor is moved to the @active queue,
157  * the first and chained descriptors are flattened into a single list.
158  *
159  * @chan.private holds pointer to &struct ep93xx_dma_data which contains
160  * necessary channel configuration information. For memcpy channels this must
161  * be %NULL.
162  */
163 struct ep93xx_dma_chan {
164 	struct dma_chan			chan;
165 	const struct ep93xx_dma_engine	*edma;
166 	void __iomem			*regs;
167 	int				irq;
168 	struct clk			*clk;
169 	struct tasklet_struct		tasklet;
170 	/* protects the fields following */
171 	spinlock_t			lock;
172 	unsigned long			flags;
173 /* Channel is configured for cyclic transfers */
174 #define EP93XX_DMA_IS_CYCLIC		0
175 
176 	int				buffer;
177 	struct list_head		active;
178 	struct list_head		queue;
179 	struct list_head		free_list;
180 	u32				runtime_addr;
181 	u32				runtime_ctrl;
182 };
183 
184 /**
185  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
186  * @dma_dev: holds the dmaengine device
187  * @m2m: is this an M2M or M2P device
188  * @hw_setup: method which sets the channel up for operation
189  * @hw_shutdown: shuts the channel down and flushes whatever is left
190  * @hw_submit: pushes active descriptor(s) to the hardware
191  * @hw_interrupt: handle the interrupt
192  * @num_channels: number of channels for this instance
193  * @channels: array of channels
194  *
195  * There is one instance of this struct for the M2P channels and one for the
196  * M2M channels. hw_xxx() methods are used to perform operations which are
197  * different on M2M and M2P channels. These methods are called with channel
198  * lock held and interrupts disabled so they cannot sleep.
199  */
200 struct ep93xx_dma_engine {
201 	struct dma_device	dma_dev;
202 	bool			m2m;
203 	int			(*hw_setup)(struct ep93xx_dma_chan *);
204 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
205 	void			(*hw_submit)(struct ep93xx_dma_chan *);
206 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
207 #define INTERRUPT_UNKNOWN	0
208 #define INTERRUPT_DONE		1
209 #define INTERRUPT_NEXT_BUFFER	2
210 
211 	size_t			num_channels;
212 	struct ep93xx_dma_chan	channels[];
213 };
214 
215 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
216 {
217 	return &edmac->chan.dev->device;
218 }
219 
220 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
221 {
222 	return container_of(chan, struct ep93xx_dma_chan, chan);
223 }
224 
225 /**
226  * ep93xx_dma_set_active - set new active descriptor chain
227  * @edmac: channel
228  * @desc: head of the new active descriptor chain
229  *
230  * Sets @desc to be the head of the new active descriptor chain. This is the
231  * chain which is processed next. The active list must be empty before calling
232  * this function.
233  *
234  * Called with @edmac->lock held and interrupts disabled.
235  */
236 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
237 				  struct ep93xx_dma_desc *desc)
238 {
239 	BUG_ON(!list_empty(&edmac->active));
240 
241 	list_add_tail(&desc->node, &edmac->active);
242 
243 	/* Flatten the @desc->tx_list chain into @edmac->active list */
244 	while (!list_empty(&desc->tx_list)) {
245 		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
246 			struct ep93xx_dma_desc, node);
247 
248 		/*
249 		 * We copy the callback parameters from the first descriptor
250 		 * to all the chained descriptors. This way we can call the
251 		 * callback without having to find out the first descriptor in
252 		 * the chain. Useful for cyclic transfers.
253 		 */
254 		d->txd.callback = desc->txd.callback;
255 		d->txd.callback_param = desc->txd.callback_param;
256 
257 		list_move_tail(&d->node, &edmac->active);
258 	}
259 }
260 
261 /* Called with @edmac->lock held and interrupts disabled */
262 static struct ep93xx_dma_desc *
263 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
264 {
265 	return list_first_entry_or_null(&edmac->active,
266 					struct ep93xx_dma_desc, node);
267 }
268 
269 /**
270  * ep93xx_dma_advance_active - advances to the next active descriptor
271  * @edmac: channel
272  *
273  * Function advances active descriptor to the next in the @edmac->active and
274  * returns %true if we still have descriptors in the chain to process.
275  * Otherwise returns %false.
276  *
277  * When the channel is in cyclic mode always returns %true.
278  *
279  * Called with @edmac->lock held and interrupts disabled.
280  */
281 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
282 {
283 	struct ep93xx_dma_desc *desc;
284 
285 	list_rotate_left(&edmac->active);
286 
287 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
288 		return true;
289 
290 	desc = ep93xx_dma_get_active(edmac);
291 	if (!desc)
292 		return false;
293 
294 	/*
295 	 * If txd.cookie is set it means that we are back in the first
296 	 * descriptor in the chain and hence done with it.
297 	 */
298 	return !desc->txd.cookie;
299 }
300 
301 /*
302  * M2P DMA implementation
303  */
304 
305 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
306 {
307 	writel(control, edmac->regs + M2P_CONTROL);
308 	/*
309 	 * EP93xx User's Guide states that we must perform a dummy read after
310 	 * write to the control register.
311 	 */
312 	readl(edmac->regs + M2P_CONTROL);
313 }
314 
315 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
316 {
317 	struct ep93xx_dma_data *data = edmac->chan.private;
318 	u32 control;
319 
320 	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
321 
322 	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
323 		| M2P_CONTROL_ENABLE;
324 	m2p_set_control(edmac, control);
325 
326 	return 0;
327 }
328 
329 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
330 {
331 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
332 }
333 
334 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
335 {
336 	u32 control;
337 
338 	control = readl(edmac->regs + M2P_CONTROL);
339 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
340 	m2p_set_control(edmac, control);
341 
342 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
343 		cpu_relax();
344 
345 	m2p_set_control(edmac, 0);
346 
347 	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
348 		cpu_relax();
349 }
350 
351 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
352 {
353 	struct ep93xx_dma_desc *desc;
354 	u32 bus_addr;
355 
356 	desc = ep93xx_dma_get_active(edmac);
357 	if (!desc) {
358 		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
359 		return;
360 	}
361 
362 	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
363 		bus_addr = desc->src_addr;
364 	else
365 		bus_addr = desc->dst_addr;
366 
367 	if (edmac->buffer == 0) {
368 		writel(desc->size, edmac->regs + M2P_MAXCNT0);
369 		writel(bus_addr, edmac->regs + M2P_BASE0);
370 	} else {
371 		writel(desc->size, edmac->regs + M2P_MAXCNT1);
372 		writel(bus_addr, edmac->regs + M2P_BASE1);
373 	}
374 
375 	edmac->buffer ^= 1;
376 }
377 
378 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
379 {
380 	u32 control = readl(edmac->regs + M2P_CONTROL);
381 
382 	m2p_fill_desc(edmac);
383 	control |= M2P_CONTROL_STALLINT;
384 
385 	if (ep93xx_dma_advance_active(edmac)) {
386 		m2p_fill_desc(edmac);
387 		control |= M2P_CONTROL_NFBINT;
388 	}
389 
390 	m2p_set_control(edmac, control);
391 }
392 
393 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
394 {
395 	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
396 	u32 control;
397 
398 	if (irq_status & M2P_INTERRUPT_ERROR) {
399 		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
400 
401 		/* Clear the error interrupt */
402 		writel(1, edmac->regs + M2P_INTERRUPT);
403 
404 		/*
405 		 * It seems that there is no easy way of reporting errors back
406 		 * to client so we just report the error here and continue as
407 		 * usual.
408 		 *
409 		 * Revisit this when there is a mechanism to report back the
410 		 * errors.
411 		 */
412 		dev_err(chan2dev(edmac),
413 			"DMA transfer failed! Details:\n"
414 			"\tcookie	: %d\n"
415 			"\tsrc_addr	: 0x%08x\n"
416 			"\tdst_addr	: 0x%08x\n"
417 			"\tsize		: %zu\n",
418 			desc->txd.cookie, desc->src_addr, desc->dst_addr,
419 			desc->size);
420 	}
421 
422 	/*
423 	 * Even latest E2 silicon revision sometimes assert STALL interrupt
424 	 * instead of NFB. Therefore we treat them equally, basing on the
425 	 * amount of data we still have to transfer.
426 	 */
427 	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
428 		return INTERRUPT_UNKNOWN;
429 
430 	if (ep93xx_dma_advance_active(edmac)) {
431 		m2p_fill_desc(edmac);
432 		return INTERRUPT_NEXT_BUFFER;
433 	}
434 
435 	/* Disable interrupts */
436 	control = readl(edmac->regs + M2P_CONTROL);
437 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
438 	m2p_set_control(edmac, control);
439 
440 	return INTERRUPT_DONE;
441 }
442 
443 /*
444  * M2M DMA implementation
445  */
446 
447 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
448 {
449 	const struct ep93xx_dma_data *data = edmac->chan.private;
450 	u32 control = 0;
451 
452 	if (!data) {
453 		/* This is memcpy channel, nothing to configure */
454 		writel(control, edmac->regs + M2M_CONTROL);
455 		return 0;
456 	}
457 
458 	switch (data->port) {
459 	case EP93XX_DMA_SSP:
460 		/*
461 		 * This was found via experimenting - anything less than 5
462 		 * causes the channel to perform only a partial transfer which
463 		 * leads to problems since we don't get DONE interrupt then.
464 		 */
465 		control = (5 << M2M_CONTROL_PWSC_SHIFT);
466 		control |= M2M_CONTROL_NO_HDSK;
467 
468 		if (data->direction == DMA_MEM_TO_DEV) {
469 			control |= M2M_CONTROL_DAH;
470 			control |= M2M_CONTROL_TM_TX;
471 			control |= M2M_CONTROL_RSS_SSPTX;
472 		} else {
473 			control |= M2M_CONTROL_SAH;
474 			control |= M2M_CONTROL_TM_RX;
475 			control |= M2M_CONTROL_RSS_SSPRX;
476 		}
477 		break;
478 
479 	case EP93XX_DMA_IDE:
480 		/*
481 		 * This IDE part is totally untested. Values below are taken
482 		 * from the EP93xx Users's Guide and might not be correct.
483 		 */
484 		if (data->direction == DMA_MEM_TO_DEV) {
485 			/* Worst case from the UG */
486 			control = (3 << M2M_CONTROL_PWSC_SHIFT);
487 			control |= M2M_CONTROL_DAH;
488 			control |= M2M_CONTROL_TM_TX;
489 		} else {
490 			control = (2 << M2M_CONTROL_PWSC_SHIFT);
491 			control |= M2M_CONTROL_SAH;
492 			control |= M2M_CONTROL_TM_RX;
493 		}
494 
495 		control |= M2M_CONTROL_NO_HDSK;
496 		control |= M2M_CONTROL_RSS_IDE;
497 		control |= M2M_CONTROL_PW_16;
498 		break;
499 
500 	default:
501 		return -EINVAL;
502 	}
503 
504 	writel(control, edmac->regs + M2M_CONTROL);
505 	return 0;
506 }
507 
508 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
509 {
510 	/* Just disable the channel */
511 	writel(0, edmac->regs + M2M_CONTROL);
512 }
513 
514 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
515 {
516 	struct ep93xx_dma_desc *desc;
517 
518 	desc = ep93xx_dma_get_active(edmac);
519 	if (!desc) {
520 		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
521 		return;
522 	}
523 
524 	if (edmac->buffer == 0) {
525 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
526 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
527 		writel(desc->size, edmac->regs + M2M_BCR0);
528 	} else {
529 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
530 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
531 		writel(desc->size, edmac->regs + M2M_BCR1);
532 	}
533 
534 	edmac->buffer ^= 1;
535 }
536 
537 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
538 {
539 	struct ep93xx_dma_data *data = edmac->chan.private;
540 	u32 control = readl(edmac->regs + M2M_CONTROL);
541 
542 	/*
543 	 * Since we allow clients to configure PW (peripheral width) we always
544 	 * clear PW bits here and then set them according what is given in
545 	 * the runtime configuration.
546 	 */
547 	control &= ~M2M_CONTROL_PW_MASK;
548 	control |= edmac->runtime_ctrl;
549 
550 	m2m_fill_desc(edmac);
551 	control |= M2M_CONTROL_DONEINT;
552 
553 	if (ep93xx_dma_advance_active(edmac)) {
554 		m2m_fill_desc(edmac);
555 		control |= M2M_CONTROL_NFBINT;
556 	}
557 
558 	/*
559 	 * Now we can finally enable the channel. For M2M channel this must be
560 	 * done _after_ the BCRx registers are programmed.
561 	 */
562 	control |= M2M_CONTROL_ENABLE;
563 	writel(control, edmac->regs + M2M_CONTROL);
564 
565 	if (!data) {
566 		/*
567 		 * For memcpy channels the software trigger must be asserted
568 		 * in order to start the memcpy operation.
569 		 */
570 		control |= M2M_CONTROL_START;
571 		writel(control, edmac->regs + M2M_CONTROL);
572 	}
573 }
574 
575 /*
576  * According to EP93xx User's Guide, we should receive DONE interrupt when all
577  * M2M DMA controller transactions complete normally. This is not always the
578  * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
579  * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
580  * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
581  * In effect, disabling the channel when only DONE bit is set could stop
582  * currently running DMA transfer. To avoid this, we use Buffer FSM and
583  * Control FSM to check current state of DMA channel.
584  */
585 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
586 {
587 	u32 status = readl(edmac->regs + M2M_STATUS);
588 	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589 	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590 	bool done = status & M2M_STATUS_DONE;
591 	bool last_done;
592 	u32 control;
593 	struct ep93xx_dma_desc *desc;
594 
595 	/* Accept only DONE and NFB interrupts */
596 	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
597 		return INTERRUPT_UNKNOWN;
598 
599 	if (done) {
600 		/* Clear the DONE bit */
601 		writel(0, edmac->regs + M2M_INTERRUPT);
602 	}
603 
604 	/*
605 	 * Check whether we are done with descriptors or not. This, together
606 	 * with DMA channel state, determines action to take in interrupt.
607 	 */
608 	desc = ep93xx_dma_get_active(edmac);
609 	last_done = !desc || desc->txd.cookie;
610 
611 	/*
612 	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
613 	 * DMA channel. Using DONE and NFB bits from channel status register
614 	 * or bits from channel interrupt register is not reliable.
615 	 */
616 	if (!last_done &&
617 	    (buf_fsm == M2M_STATUS_BUF_NO ||
618 	     buf_fsm == M2M_STATUS_BUF_ON)) {
619 		/*
620 		 * Two buffers are ready for update when Buffer FSM is in
621 		 * DMA_NO_BUF state. Only one buffer can be prepared without
622 		 * disabling the channel or polling the DONE bit.
623 		 * To simplify things, always prepare only one buffer.
624 		 */
625 		if (ep93xx_dma_advance_active(edmac)) {
626 			m2m_fill_desc(edmac);
627 			if (done && !edmac->chan.private) {
628 				/* Software trigger for memcpy channel */
629 				control = readl(edmac->regs + M2M_CONTROL);
630 				control |= M2M_CONTROL_START;
631 				writel(control, edmac->regs + M2M_CONTROL);
632 			}
633 			return INTERRUPT_NEXT_BUFFER;
634 		} else {
635 			last_done = true;
636 		}
637 	}
638 
639 	/*
640 	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
641 	 * and Control FSM is in DMA_STALL state.
642 	 */
643 	if (last_done &&
644 	    buf_fsm == M2M_STATUS_BUF_NO &&
645 	    ctl_fsm == M2M_STATUS_CTL_STALL) {
646 		/* Disable interrupts and the channel */
647 		control = readl(edmac->regs + M2M_CONTROL);
648 		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649 			    | M2M_CONTROL_ENABLE);
650 		writel(control, edmac->regs + M2M_CONTROL);
651 		return INTERRUPT_DONE;
652 	}
653 
654 	/*
655 	 * Nothing to do this time.
656 	 */
657 	return INTERRUPT_NEXT_BUFFER;
658 }
659 
660 /*
661  * DMA engine API implementation
662  */
663 
664 static struct ep93xx_dma_desc *
665 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
666 {
667 	struct ep93xx_dma_desc *desc, *_desc;
668 	struct ep93xx_dma_desc *ret = NULL;
669 	unsigned long flags;
670 
671 	spin_lock_irqsave(&edmac->lock, flags);
672 	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
673 		if (async_tx_test_ack(&desc->txd)) {
674 			list_del_init(&desc->node);
675 
676 			/* Re-initialize the descriptor */
677 			desc->src_addr = 0;
678 			desc->dst_addr = 0;
679 			desc->size = 0;
680 			desc->complete = false;
681 			desc->txd.cookie = 0;
682 			desc->txd.callback = NULL;
683 			desc->txd.callback_param = NULL;
684 
685 			ret = desc;
686 			break;
687 		}
688 	}
689 	spin_unlock_irqrestore(&edmac->lock, flags);
690 	return ret;
691 }
692 
693 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
694 				struct ep93xx_dma_desc *desc)
695 {
696 	if (desc) {
697 		unsigned long flags;
698 
699 		spin_lock_irqsave(&edmac->lock, flags);
700 		list_splice_init(&desc->tx_list, &edmac->free_list);
701 		list_add(&desc->node, &edmac->free_list);
702 		spin_unlock_irqrestore(&edmac->lock, flags);
703 	}
704 }
705 
706 /**
707  * ep93xx_dma_advance_work - start processing the next pending transaction
708  * @edmac: channel
709  *
710  * If we have pending transactions queued and we are currently idling, this
711  * function takes the next queued transaction from the @edmac->queue and
712  * pushes it to the hardware for execution.
713  */
714 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
715 {
716 	struct ep93xx_dma_desc *new;
717 	unsigned long flags;
718 
719 	spin_lock_irqsave(&edmac->lock, flags);
720 	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
721 		spin_unlock_irqrestore(&edmac->lock, flags);
722 		return;
723 	}
724 
725 	/* Take the next descriptor from the pending queue */
726 	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
727 	list_del_init(&new->node);
728 
729 	ep93xx_dma_set_active(edmac, new);
730 
731 	/* Push it to the hardware */
732 	edmac->edma->hw_submit(edmac);
733 	spin_unlock_irqrestore(&edmac->lock, flags);
734 }
735 
736 static void ep93xx_dma_tasklet(unsigned long data)
737 {
738 	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
739 	struct ep93xx_dma_desc *desc, *d;
740 	struct dmaengine_desc_callback cb;
741 	LIST_HEAD(list);
742 
743 	memset(&cb, 0, sizeof(cb));
744 	spin_lock_irq(&edmac->lock);
745 	/*
746 	 * If dma_terminate_all() was called before we get to run, the active
747 	 * list has become empty. If that happens we aren't supposed to do
748 	 * anything more than call ep93xx_dma_advance_work().
749 	 */
750 	desc = ep93xx_dma_get_active(edmac);
751 	if (desc) {
752 		if (desc->complete) {
753 			/* mark descriptor complete for non cyclic case only */
754 			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
755 				dma_cookie_complete(&desc->txd);
756 			list_splice_init(&edmac->active, &list);
757 		}
758 		dmaengine_desc_get_callback(&desc->txd, &cb);
759 	}
760 	spin_unlock_irq(&edmac->lock);
761 
762 	/* Pick up the next descriptor from the queue */
763 	ep93xx_dma_advance_work(edmac);
764 
765 	/* Now we can release all the chained descriptors */
766 	list_for_each_entry_safe(desc, d, &list, node) {
767 		dma_descriptor_unmap(&desc->txd);
768 		ep93xx_dma_desc_put(edmac, desc);
769 	}
770 
771 	dmaengine_desc_callback_invoke(&cb, NULL);
772 }
773 
774 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
775 {
776 	struct ep93xx_dma_chan *edmac = dev_id;
777 	struct ep93xx_dma_desc *desc;
778 	irqreturn_t ret = IRQ_HANDLED;
779 
780 	spin_lock(&edmac->lock);
781 
782 	desc = ep93xx_dma_get_active(edmac);
783 	if (!desc) {
784 		dev_warn(chan2dev(edmac),
785 			 "got interrupt while active list is empty\n");
786 		spin_unlock(&edmac->lock);
787 		return IRQ_NONE;
788 	}
789 
790 	switch (edmac->edma->hw_interrupt(edmac)) {
791 	case INTERRUPT_DONE:
792 		desc->complete = true;
793 		tasklet_schedule(&edmac->tasklet);
794 		break;
795 
796 	case INTERRUPT_NEXT_BUFFER:
797 		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
798 			tasklet_schedule(&edmac->tasklet);
799 		break;
800 
801 	default:
802 		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
803 		ret = IRQ_NONE;
804 		break;
805 	}
806 
807 	spin_unlock(&edmac->lock);
808 	return ret;
809 }
810 
811 /**
812  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
813  * @tx: descriptor to be executed
814  *
815  * Function will execute given descriptor on the hardware or if the hardware
816  * is busy, queue the descriptor to be executed later on. Returns cookie which
817  * can be used to poll the status of the descriptor.
818  */
819 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
820 {
821 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
822 	struct ep93xx_dma_desc *desc;
823 	dma_cookie_t cookie;
824 	unsigned long flags;
825 
826 	spin_lock_irqsave(&edmac->lock, flags);
827 	cookie = dma_cookie_assign(tx);
828 
829 	desc = container_of(tx, struct ep93xx_dma_desc, txd);
830 
831 	/*
832 	 * If nothing is currently prosessed, we push this descriptor
833 	 * directly to the hardware. Otherwise we put the descriptor
834 	 * to the pending queue.
835 	 */
836 	if (list_empty(&edmac->active)) {
837 		ep93xx_dma_set_active(edmac, desc);
838 		edmac->edma->hw_submit(edmac);
839 	} else {
840 		list_add_tail(&desc->node, &edmac->queue);
841 	}
842 
843 	spin_unlock_irqrestore(&edmac->lock, flags);
844 	return cookie;
845 }
846 
847 /**
848  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
849  * @chan: channel to allocate resources
850  *
851  * Function allocates necessary resources for the given DMA channel and
852  * returns number of allocated descriptors for the channel. Negative errno
853  * is returned in case of failure.
854  */
855 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
856 {
857 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
858 	struct ep93xx_dma_data *data = chan->private;
859 	const char *name = dma_chan_name(chan);
860 	int ret, i;
861 
862 	/* Sanity check the channel parameters */
863 	if (!edmac->edma->m2m) {
864 		if (!data)
865 			return -EINVAL;
866 		if (data->port < EP93XX_DMA_I2S1 ||
867 		    data->port > EP93XX_DMA_IRDA)
868 			return -EINVAL;
869 		if (data->direction != ep93xx_dma_chan_direction(chan))
870 			return -EINVAL;
871 	} else {
872 		if (data) {
873 			switch (data->port) {
874 			case EP93XX_DMA_SSP:
875 			case EP93XX_DMA_IDE:
876 				if (!is_slave_direction(data->direction))
877 					return -EINVAL;
878 				break;
879 			default:
880 				return -EINVAL;
881 			}
882 		}
883 	}
884 
885 	if (data && data->name)
886 		name = data->name;
887 
888 	ret = clk_enable(edmac->clk);
889 	if (ret)
890 		return ret;
891 
892 	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
893 	if (ret)
894 		goto fail_clk_disable;
895 
896 	spin_lock_irq(&edmac->lock);
897 	dma_cookie_init(&edmac->chan);
898 	ret = edmac->edma->hw_setup(edmac);
899 	spin_unlock_irq(&edmac->lock);
900 
901 	if (ret)
902 		goto fail_free_irq;
903 
904 	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
905 		struct ep93xx_dma_desc *desc;
906 
907 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
908 		if (!desc) {
909 			dev_warn(chan2dev(edmac), "not enough descriptors\n");
910 			break;
911 		}
912 
913 		INIT_LIST_HEAD(&desc->tx_list);
914 
915 		dma_async_tx_descriptor_init(&desc->txd, chan);
916 		desc->txd.flags = DMA_CTRL_ACK;
917 		desc->txd.tx_submit = ep93xx_dma_tx_submit;
918 
919 		ep93xx_dma_desc_put(edmac, desc);
920 	}
921 
922 	return i;
923 
924 fail_free_irq:
925 	free_irq(edmac->irq, edmac);
926 fail_clk_disable:
927 	clk_disable(edmac->clk);
928 
929 	return ret;
930 }
931 
932 /**
933  * ep93xx_dma_free_chan_resources - release resources for the channel
934  * @chan: channel
935  *
936  * Function releases all the resources allocated for the given channel.
937  * The channel must be idle when this is called.
938  */
939 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
940 {
941 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
942 	struct ep93xx_dma_desc *desc, *d;
943 	unsigned long flags;
944 	LIST_HEAD(list);
945 
946 	BUG_ON(!list_empty(&edmac->active));
947 	BUG_ON(!list_empty(&edmac->queue));
948 
949 	spin_lock_irqsave(&edmac->lock, flags);
950 	edmac->edma->hw_shutdown(edmac);
951 	edmac->runtime_addr = 0;
952 	edmac->runtime_ctrl = 0;
953 	edmac->buffer = 0;
954 	list_splice_init(&edmac->free_list, &list);
955 	spin_unlock_irqrestore(&edmac->lock, flags);
956 
957 	list_for_each_entry_safe(desc, d, &list, node)
958 		kfree(desc);
959 
960 	clk_disable(edmac->clk);
961 	free_irq(edmac->irq, edmac);
962 }
963 
964 /**
965  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
966  * @chan: channel
967  * @dest: destination bus address
968  * @src: source bus address
969  * @len: size of the transaction
970  * @flags: flags for the descriptor
971  *
972  * Returns a valid DMA descriptor or %NULL in case of failure.
973  */
974 static struct dma_async_tx_descriptor *
975 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
976 			   dma_addr_t src, size_t len, unsigned long flags)
977 {
978 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
979 	struct ep93xx_dma_desc *desc, *first;
980 	size_t bytes, offset;
981 
982 	first = NULL;
983 	for (offset = 0; offset < len; offset += bytes) {
984 		desc = ep93xx_dma_desc_get(edmac);
985 		if (!desc) {
986 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
987 			goto fail;
988 		}
989 
990 		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
991 
992 		desc->src_addr = src + offset;
993 		desc->dst_addr = dest + offset;
994 		desc->size = bytes;
995 
996 		if (!first)
997 			first = desc;
998 		else
999 			list_add_tail(&desc->node, &first->tx_list);
1000 	}
1001 
1002 	first->txd.cookie = -EBUSY;
1003 	first->txd.flags = flags;
1004 
1005 	return &first->txd;
1006 fail:
1007 	ep93xx_dma_desc_put(edmac, first);
1008 	return NULL;
1009 }
1010 
1011 /**
1012  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1013  * @chan: channel
1014  * @sgl: list of buffers to transfer
1015  * @sg_len: number of entries in @sgl
1016  * @dir: direction of tha DMA transfer
1017  * @flags: flags for the descriptor
1018  * @context: operation context (ignored)
1019  *
1020  * Returns a valid DMA descriptor or %NULL in case of failure.
1021  */
1022 static struct dma_async_tx_descriptor *
1023 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1024 			 unsigned int sg_len, enum dma_transfer_direction dir,
1025 			 unsigned long flags, void *context)
1026 {
1027 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1028 	struct ep93xx_dma_desc *desc, *first;
1029 	struct scatterlist *sg;
1030 	int i;
1031 
1032 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1033 		dev_warn(chan2dev(edmac),
1034 			 "channel was configured with different direction\n");
1035 		return NULL;
1036 	}
1037 
1038 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1039 		dev_warn(chan2dev(edmac),
1040 			 "channel is already used for cyclic transfers\n");
1041 		return NULL;
1042 	}
1043 
1044 	first = NULL;
1045 	for_each_sg(sgl, sg, sg_len, i) {
1046 		size_t len = sg_dma_len(sg);
1047 
1048 		if (len > DMA_MAX_CHAN_BYTES) {
1049 			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1050 				 len);
1051 			goto fail;
1052 		}
1053 
1054 		desc = ep93xx_dma_desc_get(edmac);
1055 		if (!desc) {
1056 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1057 			goto fail;
1058 		}
1059 
1060 		if (dir == DMA_MEM_TO_DEV) {
1061 			desc->src_addr = sg_dma_address(sg);
1062 			desc->dst_addr = edmac->runtime_addr;
1063 		} else {
1064 			desc->src_addr = edmac->runtime_addr;
1065 			desc->dst_addr = sg_dma_address(sg);
1066 		}
1067 		desc->size = len;
1068 
1069 		if (!first)
1070 			first = desc;
1071 		else
1072 			list_add_tail(&desc->node, &first->tx_list);
1073 	}
1074 
1075 	first->txd.cookie = -EBUSY;
1076 	first->txd.flags = flags;
1077 
1078 	return &first->txd;
1079 
1080 fail:
1081 	ep93xx_dma_desc_put(edmac, first);
1082 	return NULL;
1083 }
1084 
1085 /**
1086  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1087  * @chan: channel
1088  * @dma_addr: DMA mapped address of the buffer
1089  * @buf_len: length of the buffer (in bytes)
1090  * @period_len: length of a single period
1091  * @dir: direction of the operation
1092  * @flags: tx descriptor status flags
1093  *
1094  * Prepares a descriptor for cyclic DMA operation. This means that once the
1095  * descriptor is submitted, we will be submitting in a @period_len sized
1096  * buffers and calling callback once the period has been elapsed. Transfer
1097  * terminates only when client calls dmaengine_terminate_all() for this
1098  * channel.
1099  *
1100  * Returns a valid DMA descriptor or %NULL in case of failure.
1101  */
1102 static struct dma_async_tx_descriptor *
1103 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1104 			   size_t buf_len, size_t period_len,
1105 			   enum dma_transfer_direction dir, unsigned long flags)
1106 {
1107 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1108 	struct ep93xx_dma_desc *desc, *first;
1109 	size_t offset = 0;
1110 
1111 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1112 		dev_warn(chan2dev(edmac),
1113 			 "channel was configured with different direction\n");
1114 		return NULL;
1115 	}
1116 
1117 	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1118 		dev_warn(chan2dev(edmac),
1119 			 "channel is already used for cyclic transfers\n");
1120 		return NULL;
1121 	}
1122 
1123 	if (period_len > DMA_MAX_CHAN_BYTES) {
1124 		dev_warn(chan2dev(edmac), "too big period length %zu\n",
1125 			 period_len);
1126 		return NULL;
1127 	}
1128 
1129 	/* Split the buffer into period size chunks */
1130 	first = NULL;
1131 	for (offset = 0; offset < buf_len; offset += period_len) {
1132 		desc = ep93xx_dma_desc_get(edmac);
1133 		if (!desc) {
1134 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1135 			goto fail;
1136 		}
1137 
1138 		if (dir == DMA_MEM_TO_DEV) {
1139 			desc->src_addr = dma_addr + offset;
1140 			desc->dst_addr = edmac->runtime_addr;
1141 		} else {
1142 			desc->src_addr = edmac->runtime_addr;
1143 			desc->dst_addr = dma_addr + offset;
1144 		}
1145 
1146 		desc->size = period_len;
1147 
1148 		if (!first)
1149 			first = desc;
1150 		else
1151 			list_add_tail(&desc->node, &first->tx_list);
1152 	}
1153 
1154 	first->txd.cookie = -EBUSY;
1155 
1156 	return &first->txd;
1157 
1158 fail:
1159 	ep93xx_dma_desc_put(edmac, first);
1160 	return NULL;
1161 }
1162 
1163 /**
1164  * ep93xx_dma_terminate_all - terminate all transactions
1165  * @chan: channel
1166  *
1167  * Stops all DMA transactions. All descriptors are put back to the
1168  * @edmac->free_list and callbacks are _not_ called.
1169  */
1170 static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1171 {
1172 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1173 	struct ep93xx_dma_desc *desc, *_d;
1174 	unsigned long flags;
1175 	LIST_HEAD(list);
1176 
1177 	spin_lock_irqsave(&edmac->lock, flags);
1178 	/* First we disable and flush the DMA channel */
1179 	edmac->edma->hw_shutdown(edmac);
1180 	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1181 	list_splice_init(&edmac->active, &list);
1182 	list_splice_init(&edmac->queue, &list);
1183 	/*
1184 	 * We then re-enable the channel. This way we can continue submitting
1185 	 * the descriptors by just calling ->hw_submit() again.
1186 	 */
1187 	edmac->edma->hw_setup(edmac);
1188 	spin_unlock_irqrestore(&edmac->lock, flags);
1189 
1190 	list_for_each_entry_safe(desc, _d, &list, node)
1191 		ep93xx_dma_desc_put(edmac, desc);
1192 
1193 	return 0;
1194 }
1195 
1196 static int ep93xx_dma_slave_config(struct dma_chan *chan,
1197 				   struct dma_slave_config *config)
1198 {
1199 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1200 	enum dma_slave_buswidth width;
1201 	unsigned long flags;
1202 	u32 addr, ctrl;
1203 
1204 	if (!edmac->edma->m2m)
1205 		return -EINVAL;
1206 
1207 	switch (config->direction) {
1208 	case DMA_DEV_TO_MEM:
1209 		width = config->src_addr_width;
1210 		addr = config->src_addr;
1211 		break;
1212 
1213 	case DMA_MEM_TO_DEV:
1214 		width = config->dst_addr_width;
1215 		addr = config->dst_addr;
1216 		break;
1217 
1218 	default:
1219 		return -EINVAL;
1220 	}
1221 
1222 	switch (width) {
1223 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1224 		ctrl = 0;
1225 		break;
1226 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1227 		ctrl = M2M_CONTROL_PW_16;
1228 		break;
1229 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1230 		ctrl = M2M_CONTROL_PW_32;
1231 		break;
1232 	default:
1233 		return -EINVAL;
1234 	}
1235 
1236 	spin_lock_irqsave(&edmac->lock, flags);
1237 	edmac->runtime_addr = addr;
1238 	edmac->runtime_ctrl = ctrl;
1239 	spin_unlock_irqrestore(&edmac->lock, flags);
1240 
1241 	return 0;
1242 }
1243 
1244 /**
1245  * ep93xx_dma_tx_status - check if a transaction is completed
1246  * @chan: channel
1247  * @cookie: transaction specific cookie
1248  * @state: state of the transaction is stored here if given
1249  *
1250  * This function can be used to query state of a given transaction.
1251  */
1252 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1253 					    dma_cookie_t cookie,
1254 					    struct dma_tx_state *state)
1255 {
1256 	return dma_cookie_status(chan, cookie, state);
1257 }
1258 
1259 /**
1260  * ep93xx_dma_issue_pending - push pending transactions to the hardware
1261  * @chan: channel
1262  *
1263  * When this function is called, all pending transactions are pushed to the
1264  * hardware and executed.
1265  */
1266 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1267 {
1268 	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1269 }
1270 
1271 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1272 {
1273 	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1274 	struct ep93xx_dma_engine *edma;
1275 	struct dma_device *dma_dev;
1276 	size_t edma_size;
1277 	int ret, i;
1278 
1279 	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1280 	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1281 	if (!edma)
1282 		return -ENOMEM;
1283 
1284 	dma_dev = &edma->dma_dev;
1285 	edma->m2m = platform_get_device_id(pdev)->driver_data;
1286 	edma->num_channels = pdata->num_channels;
1287 
1288 	INIT_LIST_HEAD(&dma_dev->channels);
1289 	for (i = 0; i < pdata->num_channels; i++) {
1290 		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1291 		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1292 
1293 		edmac->chan.device = dma_dev;
1294 		edmac->regs = cdata->base;
1295 		edmac->irq = cdata->irq;
1296 		edmac->edma = edma;
1297 
1298 		edmac->clk = clk_get(NULL, cdata->name);
1299 		if (IS_ERR(edmac->clk)) {
1300 			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1301 				 cdata->name);
1302 			continue;
1303 		}
1304 
1305 		spin_lock_init(&edmac->lock);
1306 		INIT_LIST_HEAD(&edmac->active);
1307 		INIT_LIST_HEAD(&edmac->queue);
1308 		INIT_LIST_HEAD(&edmac->free_list);
1309 		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1310 			     (unsigned long)edmac);
1311 
1312 		list_add_tail(&edmac->chan.device_node,
1313 			      &dma_dev->channels);
1314 	}
1315 
1316 	dma_cap_zero(dma_dev->cap_mask);
1317 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1318 	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1319 
1320 	dma_dev->dev = &pdev->dev;
1321 	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1322 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1323 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1324 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1325 	dma_dev->device_config = ep93xx_dma_slave_config;
1326 	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1327 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1328 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1329 
1330 	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1331 
1332 	if (edma->m2m) {
1333 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1334 		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1335 
1336 		edma->hw_setup = m2m_hw_setup;
1337 		edma->hw_shutdown = m2m_hw_shutdown;
1338 		edma->hw_submit = m2m_hw_submit;
1339 		edma->hw_interrupt = m2m_hw_interrupt;
1340 	} else {
1341 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1342 
1343 		edma->hw_setup = m2p_hw_setup;
1344 		edma->hw_shutdown = m2p_hw_shutdown;
1345 		edma->hw_submit = m2p_hw_submit;
1346 		edma->hw_interrupt = m2p_hw_interrupt;
1347 	}
1348 
1349 	ret = dma_async_device_register(dma_dev);
1350 	if (unlikely(ret)) {
1351 		for (i = 0; i < edma->num_channels; i++) {
1352 			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1353 			if (!IS_ERR_OR_NULL(edmac->clk))
1354 				clk_put(edmac->clk);
1355 		}
1356 		kfree(edma);
1357 	} else {
1358 		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1359 			 edma->m2m ? "M" : "P");
1360 	}
1361 
1362 	return ret;
1363 }
1364 
1365 static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1366 	{ "ep93xx-dma-m2p", 0 },
1367 	{ "ep93xx-dma-m2m", 1 },
1368 	{ },
1369 };
1370 
1371 static struct platform_driver ep93xx_dma_driver = {
1372 	.driver		= {
1373 		.name	= "ep93xx-dma",
1374 	},
1375 	.id_table	= ep93xx_dma_driver_ids,
1376 };
1377 
1378 static int __init ep93xx_dma_module_init(void)
1379 {
1380 	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1381 }
1382 subsys_initcall(ep93xx_dma_module_init);
1383 
1384 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1385 MODULE_DESCRIPTION("EP93xx DMA driver");
1386 MODULE_LICENSE("GPL");
1387