xref: /linux/drivers/dma/ep93xx_dma.c (revision f2ee442115c9b6219083c019939a9cc0c9abb2f8)
1 /*
2  * Driver for the Cirrus Logic EP93xx DMA Controller
3  *
4  * Copyright (C) 2011 Mika Westerberg
5  *
6  * DMA M2P implementation is based on the original
7  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
8  *
9  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
10  *   Copyright (C) 2006 Applied Data Systems
11  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
12  *
13  * This driver is based on dw_dmac and amba-pl08x drivers.
14  *
15  * This program is free software; you can redistribute it and/or modify
16  * it under the terms of the GNU General Public License as published by
17  * the Free Software Foundation; either version 2 of the License, or
18  * (at your option) any later version.
19  */
20 
21 #include <linux/clk.h>
22 #include <linux/init.h>
23 #include <linux/interrupt.h>
24 #include <linux/dmaengine.h>
25 #include <linux/module.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 
29 #include <mach/dma.h>
30 
31 /* M2P registers */
32 #define M2P_CONTROL			0x0000
33 #define M2P_CONTROL_STALLINT		BIT(0)
34 #define M2P_CONTROL_NFBINT		BIT(1)
35 #define M2P_CONTROL_CH_ERROR_INT	BIT(3)
36 #define M2P_CONTROL_ENABLE		BIT(4)
37 #define M2P_CONTROL_ICE			BIT(6)
38 
39 #define M2P_INTERRUPT			0x0004
40 #define M2P_INTERRUPT_STALL		BIT(0)
41 #define M2P_INTERRUPT_NFB		BIT(1)
42 #define M2P_INTERRUPT_ERROR		BIT(3)
43 
44 #define M2P_PPALLOC			0x0008
45 #define M2P_STATUS			0x000c
46 
47 #define M2P_MAXCNT0			0x0020
48 #define M2P_BASE0			0x0024
49 #define M2P_MAXCNT1			0x0030
50 #define M2P_BASE1			0x0034
51 
52 #define M2P_STATE_IDLE			0
53 #define M2P_STATE_STALL			1
54 #define M2P_STATE_ON			2
55 #define M2P_STATE_NEXT			3
56 
57 /* M2M registers */
58 #define M2M_CONTROL			0x0000
59 #define M2M_CONTROL_DONEINT		BIT(2)
60 #define M2M_CONTROL_ENABLE		BIT(3)
61 #define M2M_CONTROL_START		BIT(4)
62 #define M2M_CONTROL_DAH			BIT(11)
63 #define M2M_CONTROL_SAH			BIT(12)
64 #define M2M_CONTROL_PW_SHIFT		9
65 #define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
66 #define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
67 #define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_TM_SHIFT		13
70 #define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
71 #define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
72 #define M2M_CONTROL_RSS_SHIFT		22
73 #define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
74 #define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
75 #define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
76 #define M2M_CONTROL_NO_HDSK		BIT(24)
77 #define M2M_CONTROL_PWSC_SHIFT		25
78 
79 #define M2M_INTERRUPT			0x0004
80 #define M2M_INTERRUPT_DONEINT		BIT(1)
81 
82 #define M2M_BCR0			0x0010
83 #define M2M_BCR1			0x0014
84 #define M2M_SAR_BASE0			0x0018
85 #define M2M_SAR_BASE1			0x001c
86 #define M2M_DAR_BASE0			0x002c
87 #define M2M_DAR_BASE1			0x0030
88 
89 #define DMA_MAX_CHAN_BYTES		0xffff
90 #define DMA_MAX_CHAN_DESCRIPTORS	32
91 
92 struct ep93xx_dma_engine;
93 
94 /**
95  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
96  * @src_addr: source address of the transaction
97  * @dst_addr: destination address of the transaction
98  * @size: size of the transaction (in bytes)
99  * @complete: this descriptor is completed
100  * @txd: dmaengine API descriptor
101  * @tx_list: list of linked descriptors
102  * @node: link used for putting this into a channel queue
103  */
104 struct ep93xx_dma_desc {
105 	u32				src_addr;
106 	u32				dst_addr;
107 	size_t				size;
108 	bool				complete;
109 	struct dma_async_tx_descriptor	txd;
110 	struct list_head		tx_list;
111 	struct list_head		node;
112 };
113 
114 /**
115  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
116  * @chan: dmaengine API channel
117  * @edma: pointer to to the engine device
118  * @regs: memory mapped registers
119  * @irq: interrupt number of the channel
120  * @clk: clock used by this channel
121  * @tasklet: channel specific tasklet used for callbacks
122  * @lock: lock protecting the fields following
123  * @flags: flags for the channel
124  * @buffer: which buffer to use next (0/1)
125  * @last_completed: last completed cookie value
126  * @active: flattened chain of descriptors currently being processed
127  * @queue: pending descriptors which are handled next
128  * @free_list: list of free descriptors which can be used
129  * @runtime_addr: physical address currently used as dest/src (M2M only). This
130  *                is set via %DMA_SLAVE_CONFIG before slave operation is
131  *                prepared
132  * @runtime_ctrl: M2M runtime values for the control register.
133  *
134  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
135  * will have slightly different scheme here: @active points to a head of
136  * flattened DMA descriptor chain.
137  *
138  * @queue holds pending transactions. These are linked through the first
139  * descriptor in the chain. When a descriptor is moved to the @active queue,
140  * the first and chained descriptors are flattened into a single list.
141  *
142  * @chan.private holds pointer to &struct ep93xx_dma_data which contains
143  * necessary channel configuration information. For memcpy channels this must
144  * be %NULL.
145  */
146 struct ep93xx_dma_chan {
147 	struct dma_chan			chan;
148 	const struct ep93xx_dma_engine	*edma;
149 	void __iomem			*regs;
150 	int				irq;
151 	struct clk			*clk;
152 	struct tasklet_struct		tasklet;
153 	/* protects the fields following */
154 	spinlock_t			lock;
155 	unsigned long			flags;
156 /* Channel is configured for cyclic transfers */
157 #define EP93XX_DMA_IS_CYCLIC		0
158 
159 	int				buffer;
160 	dma_cookie_t			last_completed;
161 	struct list_head		active;
162 	struct list_head		queue;
163 	struct list_head		free_list;
164 	u32				runtime_addr;
165 	u32				runtime_ctrl;
166 };
167 
168 /**
169  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
170  * @dma_dev: holds the dmaengine device
171  * @m2m: is this an M2M or M2P device
172  * @hw_setup: method which sets the channel up for operation
173  * @hw_shutdown: shuts the channel down and flushes whatever is left
174  * @hw_submit: pushes active descriptor(s) to the hardware
175  * @hw_interrupt: handle the interrupt
176  * @num_channels: number of channels for this instance
177  * @channels: array of channels
178  *
179  * There is one instance of this struct for the M2P channels and one for the
180  * M2M channels. hw_xxx() methods are used to perform operations which are
181  * different on M2M and M2P channels. These methods are called with channel
182  * lock held and interrupts disabled so they cannot sleep.
183  */
184 struct ep93xx_dma_engine {
185 	struct dma_device	dma_dev;
186 	bool			m2m;
187 	int			(*hw_setup)(struct ep93xx_dma_chan *);
188 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
189 	void			(*hw_submit)(struct ep93xx_dma_chan *);
190 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
191 #define INTERRUPT_UNKNOWN	0
192 #define INTERRUPT_DONE		1
193 #define INTERRUPT_NEXT_BUFFER	2
194 
195 	size_t			num_channels;
196 	struct ep93xx_dma_chan	channels[];
197 };
198 
199 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
200 {
201 	return &edmac->chan.dev->device;
202 }
203 
204 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
205 {
206 	return container_of(chan, struct ep93xx_dma_chan, chan);
207 }
208 
209 /**
210  * ep93xx_dma_set_active - set new active descriptor chain
211  * @edmac: channel
212  * @desc: head of the new active descriptor chain
213  *
214  * Sets @desc to be the head of the new active descriptor chain. This is the
215  * chain which is processed next. The active list must be empty before calling
216  * this function.
217  *
218  * Called with @edmac->lock held and interrupts disabled.
219  */
220 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
221 				  struct ep93xx_dma_desc *desc)
222 {
223 	BUG_ON(!list_empty(&edmac->active));
224 
225 	list_add_tail(&desc->node, &edmac->active);
226 
227 	/* Flatten the @desc->tx_list chain into @edmac->active list */
228 	while (!list_empty(&desc->tx_list)) {
229 		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
230 			struct ep93xx_dma_desc, node);
231 
232 		/*
233 		 * We copy the callback parameters from the first descriptor
234 		 * to all the chained descriptors. This way we can call the
235 		 * callback without having to find out the first descriptor in
236 		 * the chain. Useful for cyclic transfers.
237 		 */
238 		d->txd.callback = desc->txd.callback;
239 		d->txd.callback_param = desc->txd.callback_param;
240 
241 		list_move_tail(&d->node, &edmac->active);
242 	}
243 }
244 
245 /* Called with @edmac->lock held and interrupts disabled */
246 static struct ep93xx_dma_desc *
247 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
248 {
249 	return list_first_entry(&edmac->active, struct ep93xx_dma_desc, node);
250 }
251 
252 /**
253  * ep93xx_dma_advance_active - advances to the next active descriptor
254  * @edmac: channel
255  *
256  * Function advances active descriptor to the next in the @edmac->active and
257  * returns %true if we still have descriptors in the chain to process.
258  * Otherwise returns %false.
259  *
260  * When the channel is in cyclic mode always returns %true.
261  *
262  * Called with @edmac->lock held and interrupts disabled.
263  */
264 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
265 {
266 	list_rotate_left(&edmac->active);
267 
268 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
269 		return true;
270 
271 	/*
272 	 * If txd.cookie is set it means that we are back in the first
273 	 * descriptor in the chain and hence done with it.
274 	 */
275 	return !ep93xx_dma_get_active(edmac)->txd.cookie;
276 }
277 
278 /*
279  * M2P DMA implementation
280  */
281 
282 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
283 {
284 	writel(control, edmac->regs + M2P_CONTROL);
285 	/*
286 	 * EP93xx User's Guide states that we must perform a dummy read after
287 	 * write to the control register.
288 	 */
289 	readl(edmac->regs + M2P_CONTROL);
290 }
291 
292 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
293 {
294 	struct ep93xx_dma_data *data = edmac->chan.private;
295 	u32 control;
296 
297 	writel(data->port & 0xf, edmac->regs + M2P_PPALLOC);
298 
299 	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
300 		| M2P_CONTROL_ENABLE;
301 	m2p_set_control(edmac, control);
302 
303 	return 0;
304 }
305 
306 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
307 {
308 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
309 }
310 
311 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
312 {
313 	u32 control;
314 
315 	control = readl(edmac->regs + M2P_CONTROL);
316 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
317 	m2p_set_control(edmac, control);
318 
319 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
320 		cpu_relax();
321 
322 	m2p_set_control(edmac, 0);
323 
324 	while (m2p_channel_state(edmac) == M2P_STATE_STALL)
325 		cpu_relax();
326 }
327 
328 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
329 {
330 	struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
331 	u32 bus_addr;
332 
333 	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_TO_DEVICE)
334 		bus_addr = desc->src_addr;
335 	else
336 		bus_addr = desc->dst_addr;
337 
338 	if (edmac->buffer == 0) {
339 		writel(desc->size, edmac->regs + M2P_MAXCNT0);
340 		writel(bus_addr, edmac->regs + M2P_BASE0);
341 	} else {
342 		writel(desc->size, edmac->regs + M2P_MAXCNT1);
343 		writel(bus_addr, edmac->regs + M2P_BASE1);
344 	}
345 
346 	edmac->buffer ^= 1;
347 }
348 
349 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
350 {
351 	u32 control = readl(edmac->regs + M2P_CONTROL);
352 
353 	m2p_fill_desc(edmac);
354 	control |= M2P_CONTROL_STALLINT;
355 
356 	if (ep93xx_dma_advance_active(edmac)) {
357 		m2p_fill_desc(edmac);
358 		control |= M2P_CONTROL_NFBINT;
359 	}
360 
361 	m2p_set_control(edmac, control);
362 }
363 
364 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
365 {
366 	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
367 	u32 control;
368 
369 	if (irq_status & M2P_INTERRUPT_ERROR) {
370 		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
371 
372 		/* Clear the error interrupt */
373 		writel(1, edmac->regs + M2P_INTERRUPT);
374 
375 		/*
376 		 * It seems that there is no easy way of reporting errors back
377 		 * to client so we just report the error here and continue as
378 		 * usual.
379 		 *
380 		 * Revisit this when there is a mechanism to report back the
381 		 * errors.
382 		 */
383 		dev_err(chan2dev(edmac),
384 			"DMA transfer failed! Details:\n"
385 			"\tcookie	: %d\n"
386 			"\tsrc_addr	: 0x%08x\n"
387 			"\tdst_addr	: 0x%08x\n"
388 			"\tsize		: %zu\n",
389 			desc->txd.cookie, desc->src_addr, desc->dst_addr,
390 			desc->size);
391 	}
392 
393 	switch (irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)) {
394 	case M2P_INTERRUPT_STALL:
395 		/* Disable interrupts */
396 		control = readl(edmac->regs + M2P_CONTROL);
397 		control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
398 		m2p_set_control(edmac, control);
399 
400 		return INTERRUPT_DONE;
401 
402 	case M2P_INTERRUPT_NFB:
403 		if (ep93xx_dma_advance_active(edmac))
404 			m2p_fill_desc(edmac);
405 
406 		return INTERRUPT_NEXT_BUFFER;
407 	}
408 
409 	return INTERRUPT_UNKNOWN;
410 }
411 
412 /*
413  * M2M DMA implementation
414  *
415  * For the M2M transfers we don't use NFB at all. This is because it simply
416  * doesn't work well with memcpy transfers. When you submit both buffers it is
417  * extremely unlikely that you get an NFB interrupt, but it instead reports
418  * DONE interrupt and both buffers are already transferred which means that we
419  * weren't able to update the next buffer.
420  *
421  * So for now we "simulate" NFB by just submitting buffer after buffer
422  * without double buffering.
423  */
424 
425 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
426 {
427 	const struct ep93xx_dma_data *data = edmac->chan.private;
428 	u32 control = 0;
429 
430 	if (!data) {
431 		/* This is memcpy channel, nothing to configure */
432 		writel(control, edmac->regs + M2M_CONTROL);
433 		return 0;
434 	}
435 
436 	switch (data->port) {
437 	case EP93XX_DMA_SSP:
438 		/*
439 		 * This was found via experimenting - anything less than 5
440 		 * causes the channel to perform only a partial transfer which
441 		 * leads to problems since we don't get DONE interrupt then.
442 		 */
443 		control = (5 << M2M_CONTROL_PWSC_SHIFT);
444 		control |= M2M_CONTROL_NO_HDSK;
445 
446 		if (data->direction == DMA_TO_DEVICE) {
447 			control |= M2M_CONTROL_DAH;
448 			control |= M2M_CONTROL_TM_TX;
449 			control |= M2M_CONTROL_RSS_SSPTX;
450 		} else {
451 			control |= M2M_CONTROL_SAH;
452 			control |= M2M_CONTROL_TM_RX;
453 			control |= M2M_CONTROL_RSS_SSPRX;
454 		}
455 		break;
456 
457 	case EP93XX_DMA_IDE:
458 		/*
459 		 * This IDE part is totally untested. Values below are taken
460 		 * from the EP93xx Users's Guide and might not be correct.
461 		 */
462 		control |= M2M_CONTROL_NO_HDSK;
463 		control |= M2M_CONTROL_RSS_IDE;
464 		control |= M2M_CONTROL_PW_16;
465 
466 		if (data->direction == DMA_TO_DEVICE) {
467 			/* Worst case from the UG */
468 			control = (3 << M2M_CONTROL_PWSC_SHIFT);
469 			control |= M2M_CONTROL_DAH;
470 			control |= M2M_CONTROL_TM_TX;
471 		} else {
472 			control = (2 << M2M_CONTROL_PWSC_SHIFT);
473 			control |= M2M_CONTROL_SAH;
474 			control |= M2M_CONTROL_TM_RX;
475 		}
476 		break;
477 
478 	default:
479 		return -EINVAL;
480 	}
481 
482 	writel(control, edmac->regs + M2M_CONTROL);
483 	return 0;
484 }
485 
486 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
487 {
488 	/* Just disable the channel */
489 	writel(0, edmac->regs + M2M_CONTROL);
490 }
491 
492 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
493 {
494 	struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
495 
496 	if (edmac->buffer == 0) {
497 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
498 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
499 		writel(desc->size, edmac->regs + M2M_BCR0);
500 	} else {
501 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
502 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
503 		writel(desc->size, edmac->regs + M2M_BCR1);
504 	}
505 
506 	edmac->buffer ^= 1;
507 }
508 
509 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
510 {
511 	struct ep93xx_dma_data *data = edmac->chan.private;
512 	u32 control = readl(edmac->regs + M2M_CONTROL);
513 
514 	/*
515 	 * Since we allow clients to configure PW (peripheral width) we always
516 	 * clear PW bits here and then set them according what is given in
517 	 * the runtime configuration.
518 	 */
519 	control &= ~M2M_CONTROL_PW_MASK;
520 	control |= edmac->runtime_ctrl;
521 
522 	m2m_fill_desc(edmac);
523 	control |= M2M_CONTROL_DONEINT;
524 
525 	/*
526 	 * Now we can finally enable the channel. For M2M channel this must be
527 	 * done _after_ the BCRx registers are programmed.
528 	 */
529 	control |= M2M_CONTROL_ENABLE;
530 	writel(control, edmac->regs + M2M_CONTROL);
531 
532 	if (!data) {
533 		/*
534 		 * For memcpy channels the software trigger must be asserted
535 		 * in order to start the memcpy operation.
536 		 */
537 		control |= M2M_CONTROL_START;
538 		writel(control, edmac->regs + M2M_CONTROL);
539 	}
540 }
541 
542 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
543 {
544 	u32 control;
545 
546 	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT))
547 		return INTERRUPT_UNKNOWN;
548 
549 	/* Clear the DONE bit */
550 	writel(0, edmac->regs + M2M_INTERRUPT);
551 
552 	/* Disable interrupts and the channel */
553 	control = readl(edmac->regs + M2M_CONTROL);
554 	control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE);
555 	writel(control, edmac->regs + M2M_CONTROL);
556 
557 	/*
558 	 * Since we only get DONE interrupt we have to find out ourselves
559 	 * whether there still is something to process. So we try to advance
560 	 * the chain an see whether it succeeds.
561 	 */
562 	if (ep93xx_dma_advance_active(edmac)) {
563 		edmac->edma->hw_submit(edmac);
564 		return INTERRUPT_NEXT_BUFFER;
565 	}
566 
567 	return INTERRUPT_DONE;
568 }
569 
570 /*
571  * DMA engine API implementation
572  */
573 
574 static struct ep93xx_dma_desc *
575 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
576 {
577 	struct ep93xx_dma_desc *desc, *_desc;
578 	struct ep93xx_dma_desc *ret = NULL;
579 	unsigned long flags;
580 
581 	spin_lock_irqsave(&edmac->lock, flags);
582 	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
583 		if (async_tx_test_ack(&desc->txd)) {
584 			list_del_init(&desc->node);
585 
586 			/* Re-initialize the descriptor */
587 			desc->src_addr = 0;
588 			desc->dst_addr = 0;
589 			desc->size = 0;
590 			desc->complete = false;
591 			desc->txd.cookie = 0;
592 			desc->txd.callback = NULL;
593 			desc->txd.callback_param = NULL;
594 
595 			ret = desc;
596 			break;
597 		}
598 	}
599 	spin_unlock_irqrestore(&edmac->lock, flags);
600 	return ret;
601 }
602 
603 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
604 				struct ep93xx_dma_desc *desc)
605 {
606 	if (desc) {
607 		unsigned long flags;
608 
609 		spin_lock_irqsave(&edmac->lock, flags);
610 		list_splice_init(&desc->tx_list, &edmac->free_list);
611 		list_add(&desc->node, &edmac->free_list);
612 		spin_unlock_irqrestore(&edmac->lock, flags);
613 	}
614 }
615 
616 /**
617  * ep93xx_dma_advance_work - start processing the next pending transaction
618  * @edmac: channel
619  *
620  * If we have pending transactions queued and we are currently idling, this
621  * function takes the next queued transaction from the @edmac->queue and
622  * pushes it to the hardware for execution.
623  */
624 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
625 {
626 	struct ep93xx_dma_desc *new;
627 	unsigned long flags;
628 
629 	spin_lock_irqsave(&edmac->lock, flags);
630 	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
631 		spin_unlock_irqrestore(&edmac->lock, flags);
632 		return;
633 	}
634 
635 	/* Take the next descriptor from the pending queue */
636 	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
637 	list_del_init(&new->node);
638 
639 	ep93xx_dma_set_active(edmac, new);
640 
641 	/* Push it to the hardware */
642 	edmac->edma->hw_submit(edmac);
643 	spin_unlock_irqrestore(&edmac->lock, flags);
644 }
645 
646 static void ep93xx_dma_unmap_buffers(struct ep93xx_dma_desc *desc)
647 {
648 	struct device *dev = desc->txd.chan->device->dev;
649 
650 	if (!(desc->txd.flags & DMA_COMPL_SKIP_SRC_UNMAP)) {
651 		if (desc->txd.flags & DMA_COMPL_SRC_UNMAP_SINGLE)
652 			dma_unmap_single(dev, desc->src_addr, desc->size,
653 					 DMA_TO_DEVICE);
654 		else
655 			dma_unmap_page(dev, desc->src_addr, desc->size,
656 				       DMA_TO_DEVICE);
657 	}
658 	if (!(desc->txd.flags & DMA_COMPL_SKIP_DEST_UNMAP)) {
659 		if (desc->txd.flags & DMA_COMPL_DEST_UNMAP_SINGLE)
660 			dma_unmap_single(dev, desc->dst_addr, desc->size,
661 					 DMA_FROM_DEVICE);
662 		else
663 			dma_unmap_page(dev, desc->dst_addr, desc->size,
664 				       DMA_FROM_DEVICE);
665 	}
666 }
667 
668 static void ep93xx_dma_tasklet(unsigned long data)
669 {
670 	struct ep93xx_dma_chan *edmac = (struct ep93xx_dma_chan *)data;
671 	struct ep93xx_dma_desc *desc, *d;
672 	dma_async_tx_callback callback;
673 	void *callback_param;
674 	LIST_HEAD(list);
675 
676 	spin_lock_irq(&edmac->lock);
677 	desc = ep93xx_dma_get_active(edmac);
678 	if (desc->complete) {
679 		edmac->last_completed = desc->txd.cookie;
680 		list_splice_init(&edmac->active, &list);
681 	}
682 	spin_unlock_irq(&edmac->lock);
683 
684 	/* Pick up the next descriptor from the queue */
685 	ep93xx_dma_advance_work(edmac);
686 
687 	callback = desc->txd.callback;
688 	callback_param = desc->txd.callback_param;
689 
690 	/* Now we can release all the chained descriptors */
691 	list_for_each_entry_safe(desc, d, &list, node) {
692 		/*
693 		 * For the memcpy channels the API requires us to unmap the
694 		 * buffers unless requested otherwise.
695 		 */
696 		if (!edmac->chan.private)
697 			ep93xx_dma_unmap_buffers(desc);
698 
699 		ep93xx_dma_desc_put(edmac, desc);
700 	}
701 
702 	if (callback)
703 		callback(callback_param);
704 }
705 
706 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
707 {
708 	struct ep93xx_dma_chan *edmac = dev_id;
709 	irqreturn_t ret = IRQ_HANDLED;
710 
711 	spin_lock(&edmac->lock);
712 
713 	switch (edmac->edma->hw_interrupt(edmac)) {
714 	case INTERRUPT_DONE:
715 		ep93xx_dma_get_active(edmac)->complete = true;
716 		tasklet_schedule(&edmac->tasklet);
717 		break;
718 
719 	case INTERRUPT_NEXT_BUFFER:
720 		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
721 			tasklet_schedule(&edmac->tasklet);
722 		break;
723 
724 	default:
725 		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
726 		ret = IRQ_NONE;
727 		break;
728 	}
729 
730 	spin_unlock(&edmac->lock);
731 	return ret;
732 }
733 
734 /**
735  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
736  * @tx: descriptor to be executed
737  *
738  * Function will execute given descriptor on the hardware or if the hardware
739  * is busy, queue the descriptor to be executed later on. Returns cookie which
740  * can be used to poll the status of the descriptor.
741  */
742 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
743 {
744 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
745 	struct ep93xx_dma_desc *desc;
746 	dma_cookie_t cookie;
747 	unsigned long flags;
748 
749 	spin_lock_irqsave(&edmac->lock, flags);
750 
751 	cookie = edmac->chan.cookie;
752 
753 	if (++cookie < 0)
754 		cookie = 1;
755 
756 	desc = container_of(tx, struct ep93xx_dma_desc, txd);
757 
758 	edmac->chan.cookie = cookie;
759 	desc->txd.cookie = cookie;
760 
761 	/*
762 	 * If nothing is currently prosessed, we push this descriptor
763 	 * directly to the hardware. Otherwise we put the descriptor
764 	 * to the pending queue.
765 	 */
766 	if (list_empty(&edmac->active)) {
767 		ep93xx_dma_set_active(edmac, desc);
768 		edmac->edma->hw_submit(edmac);
769 	} else {
770 		list_add_tail(&desc->node, &edmac->queue);
771 	}
772 
773 	spin_unlock_irqrestore(&edmac->lock, flags);
774 	return cookie;
775 }
776 
777 /**
778  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
779  * @chan: channel to allocate resources
780  *
781  * Function allocates necessary resources for the given DMA channel and
782  * returns number of allocated descriptors for the channel. Negative errno
783  * is returned in case of failure.
784  */
785 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
786 {
787 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
788 	struct ep93xx_dma_data *data = chan->private;
789 	const char *name = dma_chan_name(chan);
790 	int ret, i;
791 
792 	/* Sanity check the channel parameters */
793 	if (!edmac->edma->m2m) {
794 		if (!data)
795 			return -EINVAL;
796 		if (data->port < EP93XX_DMA_I2S1 ||
797 		    data->port > EP93XX_DMA_IRDA)
798 			return -EINVAL;
799 		if (data->direction != ep93xx_dma_chan_direction(chan))
800 			return -EINVAL;
801 	} else {
802 		if (data) {
803 			switch (data->port) {
804 			case EP93XX_DMA_SSP:
805 			case EP93XX_DMA_IDE:
806 				if (data->direction != DMA_TO_DEVICE &&
807 				    data->direction != DMA_FROM_DEVICE)
808 					return -EINVAL;
809 				break;
810 			default:
811 				return -EINVAL;
812 			}
813 		}
814 	}
815 
816 	if (data && data->name)
817 		name = data->name;
818 
819 	ret = clk_enable(edmac->clk);
820 	if (ret)
821 		return ret;
822 
823 	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
824 	if (ret)
825 		goto fail_clk_disable;
826 
827 	spin_lock_irq(&edmac->lock);
828 	edmac->last_completed = 1;
829 	edmac->chan.cookie = 1;
830 	ret = edmac->edma->hw_setup(edmac);
831 	spin_unlock_irq(&edmac->lock);
832 
833 	if (ret)
834 		goto fail_free_irq;
835 
836 	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
837 		struct ep93xx_dma_desc *desc;
838 
839 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
840 		if (!desc) {
841 			dev_warn(chan2dev(edmac), "not enough descriptors\n");
842 			break;
843 		}
844 
845 		INIT_LIST_HEAD(&desc->tx_list);
846 
847 		dma_async_tx_descriptor_init(&desc->txd, chan);
848 		desc->txd.flags = DMA_CTRL_ACK;
849 		desc->txd.tx_submit = ep93xx_dma_tx_submit;
850 
851 		ep93xx_dma_desc_put(edmac, desc);
852 	}
853 
854 	return i;
855 
856 fail_free_irq:
857 	free_irq(edmac->irq, edmac);
858 fail_clk_disable:
859 	clk_disable(edmac->clk);
860 
861 	return ret;
862 }
863 
864 /**
865  * ep93xx_dma_free_chan_resources - release resources for the channel
866  * @chan: channel
867  *
868  * Function releases all the resources allocated for the given channel.
869  * The channel must be idle when this is called.
870  */
871 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
872 {
873 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
874 	struct ep93xx_dma_desc *desc, *d;
875 	unsigned long flags;
876 	LIST_HEAD(list);
877 
878 	BUG_ON(!list_empty(&edmac->active));
879 	BUG_ON(!list_empty(&edmac->queue));
880 
881 	spin_lock_irqsave(&edmac->lock, flags);
882 	edmac->edma->hw_shutdown(edmac);
883 	edmac->runtime_addr = 0;
884 	edmac->runtime_ctrl = 0;
885 	edmac->buffer = 0;
886 	list_splice_init(&edmac->free_list, &list);
887 	spin_unlock_irqrestore(&edmac->lock, flags);
888 
889 	list_for_each_entry_safe(desc, d, &list, node)
890 		kfree(desc);
891 
892 	clk_disable(edmac->clk);
893 	free_irq(edmac->irq, edmac);
894 }
895 
896 /**
897  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
898  * @chan: channel
899  * @dest: destination bus address
900  * @src: source bus address
901  * @len: size of the transaction
902  * @flags: flags for the descriptor
903  *
904  * Returns a valid DMA descriptor or %NULL in case of failure.
905  */
906 static struct dma_async_tx_descriptor *
907 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
908 			   dma_addr_t src, size_t len, unsigned long flags)
909 {
910 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
911 	struct ep93xx_dma_desc *desc, *first;
912 	size_t bytes, offset;
913 
914 	first = NULL;
915 	for (offset = 0; offset < len; offset += bytes) {
916 		desc = ep93xx_dma_desc_get(edmac);
917 		if (!desc) {
918 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
919 			goto fail;
920 		}
921 
922 		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
923 
924 		desc->src_addr = src + offset;
925 		desc->dst_addr = dest + offset;
926 		desc->size = bytes;
927 
928 		if (!first)
929 			first = desc;
930 		else
931 			list_add_tail(&desc->node, &first->tx_list);
932 	}
933 
934 	first->txd.cookie = -EBUSY;
935 	first->txd.flags = flags;
936 
937 	return &first->txd;
938 fail:
939 	ep93xx_dma_desc_put(edmac, first);
940 	return NULL;
941 }
942 
943 /**
944  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
945  * @chan: channel
946  * @sgl: list of buffers to transfer
947  * @sg_len: number of entries in @sgl
948  * @dir: direction of tha DMA transfer
949  * @flags: flags for the descriptor
950  *
951  * Returns a valid DMA descriptor or %NULL in case of failure.
952  */
953 static struct dma_async_tx_descriptor *
954 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
955 			 unsigned int sg_len, enum dma_data_direction dir,
956 			 unsigned long flags)
957 {
958 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
959 	struct ep93xx_dma_desc *desc, *first;
960 	struct scatterlist *sg;
961 	int i;
962 
963 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
964 		dev_warn(chan2dev(edmac),
965 			 "channel was configured with different direction\n");
966 		return NULL;
967 	}
968 
969 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
970 		dev_warn(chan2dev(edmac),
971 			 "channel is already used for cyclic transfers\n");
972 		return NULL;
973 	}
974 
975 	first = NULL;
976 	for_each_sg(sgl, sg, sg_len, i) {
977 		size_t sg_len = sg_dma_len(sg);
978 
979 		if (sg_len > DMA_MAX_CHAN_BYTES) {
980 			dev_warn(chan2dev(edmac), "too big transfer size %d\n",
981 				 sg_len);
982 			goto fail;
983 		}
984 
985 		desc = ep93xx_dma_desc_get(edmac);
986 		if (!desc) {
987 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
988 			goto fail;
989 		}
990 
991 		if (dir == DMA_TO_DEVICE) {
992 			desc->src_addr = sg_dma_address(sg);
993 			desc->dst_addr = edmac->runtime_addr;
994 		} else {
995 			desc->src_addr = edmac->runtime_addr;
996 			desc->dst_addr = sg_dma_address(sg);
997 		}
998 		desc->size = sg_len;
999 
1000 		if (!first)
1001 			first = desc;
1002 		else
1003 			list_add_tail(&desc->node, &first->tx_list);
1004 	}
1005 
1006 	first->txd.cookie = -EBUSY;
1007 	first->txd.flags = flags;
1008 
1009 	return &first->txd;
1010 
1011 fail:
1012 	ep93xx_dma_desc_put(edmac, first);
1013 	return NULL;
1014 }
1015 
1016 /**
1017  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1018  * @chan: channel
1019  * @dma_addr: DMA mapped address of the buffer
1020  * @buf_len: length of the buffer (in bytes)
1021  * @period_len: lenght of a single period
1022  * @dir: direction of the operation
1023  *
1024  * Prepares a descriptor for cyclic DMA operation. This means that once the
1025  * descriptor is submitted, we will be submitting in a @period_len sized
1026  * buffers and calling callback once the period has been elapsed. Transfer
1027  * terminates only when client calls dmaengine_terminate_all() for this
1028  * channel.
1029  *
1030  * Returns a valid DMA descriptor or %NULL in case of failure.
1031  */
1032 static struct dma_async_tx_descriptor *
1033 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1034 			   size_t buf_len, size_t period_len,
1035 			   enum dma_data_direction dir)
1036 {
1037 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1038 	struct ep93xx_dma_desc *desc, *first;
1039 	size_t offset = 0;
1040 
1041 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1042 		dev_warn(chan2dev(edmac),
1043 			 "channel was configured with different direction\n");
1044 		return NULL;
1045 	}
1046 
1047 	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1048 		dev_warn(chan2dev(edmac),
1049 			 "channel is already used for cyclic transfers\n");
1050 		return NULL;
1051 	}
1052 
1053 	if (period_len > DMA_MAX_CHAN_BYTES) {
1054 		dev_warn(chan2dev(edmac), "too big period length %d\n",
1055 			 period_len);
1056 		return NULL;
1057 	}
1058 
1059 	/* Split the buffer into period size chunks */
1060 	first = NULL;
1061 	for (offset = 0; offset < buf_len; offset += period_len) {
1062 		desc = ep93xx_dma_desc_get(edmac);
1063 		if (!desc) {
1064 			dev_warn(chan2dev(edmac), "couln't get descriptor\n");
1065 			goto fail;
1066 		}
1067 
1068 		if (dir == DMA_TO_DEVICE) {
1069 			desc->src_addr = dma_addr + offset;
1070 			desc->dst_addr = edmac->runtime_addr;
1071 		} else {
1072 			desc->src_addr = edmac->runtime_addr;
1073 			desc->dst_addr = dma_addr + offset;
1074 		}
1075 
1076 		desc->size = period_len;
1077 
1078 		if (!first)
1079 			first = desc;
1080 		else
1081 			list_add_tail(&desc->node, &first->tx_list);
1082 	}
1083 
1084 	first->txd.cookie = -EBUSY;
1085 
1086 	return &first->txd;
1087 
1088 fail:
1089 	ep93xx_dma_desc_put(edmac, first);
1090 	return NULL;
1091 }
1092 
1093 /**
1094  * ep93xx_dma_terminate_all - terminate all transactions
1095  * @edmac: channel
1096  *
1097  * Stops all DMA transactions. All descriptors are put back to the
1098  * @edmac->free_list and callbacks are _not_ called.
1099  */
1100 static int ep93xx_dma_terminate_all(struct ep93xx_dma_chan *edmac)
1101 {
1102 	struct ep93xx_dma_desc *desc, *_d;
1103 	unsigned long flags;
1104 	LIST_HEAD(list);
1105 
1106 	spin_lock_irqsave(&edmac->lock, flags);
1107 	/* First we disable and flush the DMA channel */
1108 	edmac->edma->hw_shutdown(edmac);
1109 	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1110 	list_splice_init(&edmac->active, &list);
1111 	list_splice_init(&edmac->queue, &list);
1112 	/*
1113 	 * We then re-enable the channel. This way we can continue submitting
1114 	 * the descriptors by just calling ->hw_submit() again.
1115 	 */
1116 	edmac->edma->hw_setup(edmac);
1117 	spin_unlock_irqrestore(&edmac->lock, flags);
1118 
1119 	list_for_each_entry_safe(desc, _d, &list, node)
1120 		ep93xx_dma_desc_put(edmac, desc);
1121 
1122 	return 0;
1123 }
1124 
1125 static int ep93xx_dma_slave_config(struct ep93xx_dma_chan *edmac,
1126 				   struct dma_slave_config *config)
1127 {
1128 	enum dma_slave_buswidth width;
1129 	unsigned long flags;
1130 	u32 addr, ctrl;
1131 
1132 	if (!edmac->edma->m2m)
1133 		return -EINVAL;
1134 
1135 	switch (config->direction) {
1136 	case DMA_FROM_DEVICE:
1137 		width = config->src_addr_width;
1138 		addr = config->src_addr;
1139 		break;
1140 
1141 	case DMA_TO_DEVICE:
1142 		width = config->dst_addr_width;
1143 		addr = config->dst_addr;
1144 		break;
1145 
1146 	default:
1147 		return -EINVAL;
1148 	}
1149 
1150 	switch (width) {
1151 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1152 		ctrl = 0;
1153 		break;
1154 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1155 		ctrl = M2M_CONTROL_PW_16;
1156 		break;
1157 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1158 		ctrl = M2M_CONTROL_PW_32;
1159 		break;
1160 	default:
1161 		return -EINVAL;
1162 	}
1163 
1164 	spin_lock_irqsave(&edmac->lock, flags);
1165 	edmac->runtime_addr = addr;
1166 	edmac->runtime_ctrl = ctrl;
1167 	spin_unlock_irqrestore(&edmac->lock, flags);
1168 
1169 	return 0;
1170 }
1171 
1172 /**
1173  * ep93xx_dma_control - manipulate all pending operations on a channel
1174  * @chan: channel
1175  * @cmd: control command to perform
1176  * @arg: optional argument
1177  *
1178  * Controls the channel. Function returns %0 in case of success or negative
1179  * error in case of failure.
1180  */
1181 static int ep93xx_dma_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
1182 			      unsigned long arg)
1183 {
1184 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1185 	struct dma_slave_config *config;
1186 
1187 	switch (cmd) {
1188 	case DMA_TERMINATE_ALL:
1189 		return ep93xx_dma_terminate_all(edmac);
1190 
1191 	case DMA_SLAVE_CONFIG:
1192 		config = (struct dma_slave_config *)arg;
1193 		return ep93xx_dma_slave_config(edmac, config);
1194 
1195 	default:
1196 		break;
1197 	}
1198 
1199 	return -ENOSYS;
1200 }
1201 
1202 /**
1203  * ep93xx_dma_tx_status - check if a transaction is completed
1204  * @chan: channel
1205  * @cookie: transaction specific cookie
1206  * @state: state of the transaction is stored here if given
1207  *
1208  * This function can be used to query state of a given transaction.
1209  */
1210 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1211 					    dma_cookie_t cookie,
1212 					    struct dma_tx_state *state)
1213 {
1214 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1215 	dma_cookie_t last_used, last_completed;
1216 	enum dma_status ret;
1217 	unsigned long flags;
1218 
1219 	spin_lock_irqsave(&edmac->lock, flags);
1220 	last_used = chan->cookie;
1221 	last_completed = edmac->last_completed;
1222 	spin_unlock_irqrestore(&edmac->lock, flags);
1223 
1224 	ret = dma_async_is_complete(cookie, last_completed, last_used);
1225 	dma_set_tx_state(state, last_completed, last_used, 0);
1226 
1227 	return ret;
1228 }
1229 
1230 /**
1231  * ep93xx_dma_issue_pending - push pending transactions to the hardware
1232  * @chan: channel
1233  *
1234  * When this function is called, all pending transactions are pushed to the
1235  * hardware and executed.
1236  */
1237 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1238 {
1239 	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1240 }
1241 
1242 static int __init ep93xx_dma_probe(struct platform_device *pdev)
1243 {
1244 	struct ep93xx_dma_platform_data *pdata = dev_get_platdata(&pdev->dev);
1245 	struct ep93xx_dma_engine *edma;
1246 	struct dma_device *dma_dev;
1247 	size_t edma_size;
1248 	int ret, i;
1249 
1250 	edma_size = pdata->num_channels * sizeof(struct ep93xx_dma_chan);
1251 	edma = kzalloc(sizeof(*edma) + edma_size, GFP_KERNEL);
1252 	if (!edma)
1253 		return -ENOMEM;
1254 
1255 	dma_dev = &edma->dma_dev;
1256 	edma->m2m = platform_get_device_id(pdev)->driver_data;
1257 	edma->num_channels = pdata->num_channels;
1258 
1259 	INIT_LIST_HEAD(&dma_dev->channels);
1260 	for (i = 0; i < pdata->num_channels; i++) {
1261 		const struct ep93xx_dma_chan_data *cdata = &pdata->channels[i];
1262 		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1263 
1264 		edmac->chan.device = dma_dev;
1265 		edmac->regs = cdata->base;
1266 		edmac->irq = cdata->irq;
1267 		edmac->edma = edma;
1268 
1269 		edmac->clk = clk_get(NULL, cdata->name);
1270 		if (IS_ERR(edmac->clk)) {
1271 			dev_warn(&pdev->dev, "failed to get clock for %s\n",
1272 				 cdata->name);
1273 			continue;
1274 		}
1275 
1276 		spin_lock_init(&edmac->lock);
1277 		INIT_LIST_HEAD(&edmac->active);
1278 		INIT_LIST_HEAD(&edmac->queue);
1279 		INIT_LIST_HEAD(&edmac->free_list);
1280 		tasklet_init(&edmac->tasklet, ep93xx_dma_tasklet,
1281 			     (unsigned long)edmac);
1282 
1283 		list_add_tail(&edmac->chan.device_node,
1284 			      &dma_dev->channels);
1285 	}
1286 
1287 	dma_cap_zero(dma_dev->cap_mask);
1288 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1289 	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1290 
1291 	dma_dev->dev = &pdev->dev;
1292 	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1293 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1294 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1295 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1296 	dma_dev->device_control = ep93xx_dma_control;
1297 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1298 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1299 
1300 	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1301 
1302 	if (edma->m2m) {
1303 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1304 		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1305 
1306 		edma->hw_setup = m2m_hw_setup;
1307 		edma->hw_shutdown = m2m_hw_shutdown;
1308 		edma->hw_submit = m2m_hw_submit;
1309 		edma->hw_interrupt = m2m_hw_interrupt;
1310 	} else {
1311 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1312 
1313 		edma->hw_setup = m2p_hw_setup;
1314 		edma->hw_shutdown = m2p_hw_shutdown;
1315 		edma->hw_submit = m2p_hw_submit;
1316 		edma->hw_interrupt = m2p_hw_interrupt;
1317 	}
1318 
1319 	ret = dma_async_device_register(dma_dev);
1320 	if (unlikely(ret)) {
1321 		for (i = 0; i < edma->num_channels; i++) {
1322 			struct ep93xx_dma_chan *edmac = &edma->channels[i];
1323 			if (!IS_ERR_OR_NULL(edmac->clk))
1324 				clk_put(edmac->clk);
1325 		}
1326 		kfree(edma);
1327 	} else {
1328 		dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n",
1329 			 edma->m2m ? "M" : "P");
1330 	}
1331 
1332 	return ret;
1333 }
1334 
1335 static struct platform_device_id ep93xx_dma_driver_ids[] = {
1336 	{ "ep93xx-dma-m2p", 0 },
1337 	{ "ep93xx-dma-m2m", 1 },
1338 	{ },
1339 };
1340 
1341 static struct platform_driver ep93xx_dma_driver = {
1342 	.driver		= {
1343 		.name	= "ep93xx-dma",
1344 	},
1345 	.id_table	= ep93xx_dma_driver_ids,
1346 };
1347 
1348 static int __init ep93xx_dma_module_init(void)
1349 {
1350 	return platform_driver_probe(&ep93xx_dma_driver, ep93xx_dma_probe);
1351 }
1352 subsys_initcall(ep93xx_dma_module_init);
1353 
1354 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1355 MODULE_DESCRIPTION("EP93xx DMA driver");
1356 MODULE_LICENSE("GPL");
1357