xref: /linux/drivers/dma/ep93xx_dma.c (revision ec8c17e5ecb4a5a74069687ccb6d2cfe1851302e)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Driver for the Cirrus Logic EP93xx DMA Controller
4  *
5  * Copyright (C) 2011 Mika Westerberg
6  *
7  * DMA M2P implementation is based on the original
8  * arch/arm/mach-ep93xx/dma-m2p.c which has following copyrights:
9  *
10  *   Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
11  *   Copyright (C) 2006 Applied Data Systems
12  *   Copyright (C) 2009 Ryan Mallon <rmallon@gmail.com>
13  *
14  * This driver is based on dw_dmac and amba-pl08x drivers.
15  */
16 
17 #include <linux/clk.h>
18 #include <linux/init.h>
19 #include <linux/interrupt.h>
20 #include <linux/dma-mapping.h>
21 #include <linux/dmaengine.h>
22 #include <linux/module.h>
23 #include <linux/mod_devicetable.h>
24 #include <linux/of_dma.h>
25 #include <linux/overflow.h>
26 #include <linux/platform_device.h>
27 #include <linux/slab.h>
28 
29 #include "dmaengine.h"
30 
31 /* M2P registers */
32 #define M2P_CONTROL			0x0000
33 #define M2P_CONTROL_STALLINT		BIT(0)
34 #define M2P_CONTROL_NFBINT		BIT(1)
35 #define M2P_CONTROL_CH_ERROR_INT	BIT(3)
36 #define M2P_CONTROL_ENABLE		BIT(4)
37 #define M2P_CONTROL_ICE			BIT(6)
38 
39 #define M2P_INTERRUPT			0x0004
40 #define M2P_INTERRUPT_STALL		BIT(0)
41 #define M2P_INTERRUPT_NFB		BIT(1)
42 #define M2P_INTERRUPT_ERROR		BIT(3)
43 
44 #define M2P_PPALLOC			0x0008
45 #define M2P_STATUS			0x000c
46 
47 #define M2P_MAXCNT0			0x0020
48 #define M2P_BASE0			0x0024
49 #define M2P_MAXCNT1			0x0030
50 #define M2P_BASE1			0x0034
51 
52 #define M2P_STATE_IDLE			0
53 #define M2P_STATE_STALL			1
54 #define M2P_STATE_ON			2
55 #define M2P_STATE_NEXT			3
56 
57 /* M2M registers */
58 #define M2M_CONTROL			0x0000
59 #define M2M_CONTROL_DONEINT		BIT(2)
60 #define M2M_CONTROL_ENABLE		BIT(3)
61 #define M2M_CONTROL_START		BIT(4)
62 #define M2M_CONTROL_DAH			BIT(11)
63 #define M2M_CONTROL_SAH			BIT(12)
64 #define M2M_CONTROL_PW_SHIFT		9
65 #define M2M_CONTROL_PW_8		(0 << M2M_CONTROL_PW_SHIFT)
66 #define M2M_CONTROL_PW_16		(1 << M2M_CONTROL_PW_SHIFT)
67 #define M2M_CONTROL_PW_32		(2 << M2M_CONTROL_PW_SHIFT)
68 #define M2M_CONTROL_PW_MASK		(3 << M2M_CONTROL_PW_SHIFT)
69 #define M2M_CONTROL_TM_SHIFT		13
70 #define M2M_CONTROL_TM_TX		(1 << M2M_CONTROL_TM_SHIFT)
71 #define M2M_CONTROL_TM_RX		(2 << M2M_CONTROL_TM_SHIFT)
72 #define M2M_CONTROL_NFBINT		BIT(21)
73 #define M2M_CONTROL_RSS_SHIFT		22
74 #define M2M_CONTROL_RSS_SSPRX		(1 << M2M_CONTROL_RSS_SHIFT)
75 #define M2M_CONTROL_RSS_SSPTX		(2 << M2M_CONTROL_RSS_SHIFT)
76 #define M2M_CONTROL_RSS_IDE		(3 << M2M_CONTROL_RSS_SHIFT)
77 #define M2M_CONTROL_NO_HDSK		BIT(24)
78 #define M2M_CONTROL_PWSC_SHIFT		25
79 
80 #define M2M_INTERRUPT			0x0004
81 #define M2M_INTERRUPT_MASK		6
82 
83 #define M2M_STATUS			0x000c
84 #define M2M_STATUS_CTL_SHIFT		1
85 #define M2M_STATUS_CTL_IDLE		(0 << M2M_STATUS_CTL_SHIFT)
86 #define M2M_STATUS_CTL_STALL		(1 << M2M_STATUS_CTL_SHIFT)
87 #define M2M_STATUS_CTL_MEMRD		(2 << M2M_STATUS_CTL_SHIFT)
88 #define M2M_STATUS_CTL_MEMWR		(3 << M2M_STATUS_CTL_SHIFT)
89 #define M2M_STATUS_CTL_BWCWAIT		(4 << M2M_STATUS_CTL_SHIFT)
90 #define M2M_STATUS_CTL_MASK		(7 << M2M_STATUS_CTL_SHIFT)
91 #define M2M_STATUS_BUF_SHIFT		4
92 #define M2M_STATUS_BUF_NO		(0 << M2M_STATUS_BUF_SHIFT)
93 #define M2M_STATUS_BUF_ON		(1 << M2M_STATUS_BUF_SHIFT)
94 #define M2M_STATUS_BUF_NEXT		(2 << M2M_STATUS_BUF_SHIFT)
95 #define M2M_STATUS_BUF_MASK		(3 << M2M_STATUS_BUF_SHIFT)
96 #define M2M_STATUS_DONE			BIT(6)
97 
98 #define M2M_BCR0			0x0010
99 #define M2M_BCR1			0x0014
100 #define M2M_SAR_BASE0			0x0018
101 #define M2M_SAR_BASE1			0x001c
102 #define M2M_DAR_BASE0			0x002c
103 #define M2M_DAR_BASE1			0x0030
104 
105 #define DMA_MAX_CHAN_BYTES		0xffff
106 #define DMA_MAX_CHAN_DESCRIPTORS	32
107 
108 /*
109  * M2P channels.
110  *
111  * Note that these values are also directly used for setting the PPALLOC
112  * register.
113  */
114 #define EP93XX_DMA_I2S1			0
115 #define EP93XX_DMA_I2S2			1
116 #define EP93XX_DMA_AAC1			2
117 #define EP93XX_DMA_AAC2			3
118 #define EP93XX_DMA_AAC3			4
119 #define EP93XX_DMA_I2S3			5
120 #define EP93XX_DMA_UART1		6
121 #define EP93XX_DMA_UART2		7
122 #define EP93XX_DMA_UART3		8
123 #define EP93XX_DMA_IRDA			9
124 /* M2M channels */
125 #define EP93XX_DMA_SSP			10
126 #define EP93XX_DMA_IDE			11
127 
128 enum ep93xx_dma_type {
129 	M2P_DMA,
130 	M2M_DMA,
131 };
132 
133 struct ep93xx_dma_engine;
134 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
135 					 enum dma_transfer_direction dir,
136 					 struct dma_slave_config *config);
137 
138 /**
139  * struct ep93xx_dma_desc - EP93xx specific transaction descriptor
140  * @src_addr: source address of the transaction
141  * @dst_addr: destination address of the transaction
142  * @size: size of the transaction (in bytes)
143  * @complete: this descriptor is completed
144  * @txd: dmaengine API descriptor
145  * @tx_list: list of linked descriptors
146  * @node: link used for putting this into a channel queue
147  */
148 struct ep93xx_dma_desc {
149 	u32				src_addr;
150 	u32				dst_addr;
151 	size_t				size;
152 	bool				complete;
153 	struct dma_async_tx_descriptor	txd;
154 	struct list_head		tx_list;
155 	struct list_head		node;
156 };
157 
158 struct ep93xx_dma_chan_cfg {
159 	u8				port;
160 	enum dma_transfer_direction	dir;
161 };
162 
163 /**
164  * struct ep93xx_dma_chan - an EP93xx DMA M2P/M2M channel
165  * @chan: dmaengine API channel
166  * @edma: pointer to the engine device
167  * @regs: memory mapped registers
168  * @dma_cfg: channel number, direction
169  * @irq: interrupt number of the channel
170  * @clk: clock used by this channel
171  * @tasklet: channel specific tasklet used for callbacks
172  * @lock: lock protecting the fields following
173  * @flags: flags for the channel
174  * @buffer: which buffer to use next (0/1)
175  * @active: flattened chain of descriptors currently being processed
176  * @queue: pending descriptors which are handled next
177  * @free_list: list of free descriptors which can be used
178  * @runtime_addr: physical address currently used as dest/src (M2M only). This
179  *                is set via .device_config before slave operation is
180  *                prepared
181  * @runtime_ctrl: M2M runtime values for the control register.
182  * @slave_config: slave configuration
183  *
184  * As EP93xx DMA controller doesn't support real chained DMA descriptors we
185  * will have slightly different scheme here: @active points to a head of
186  * flattened DMA descriptor chain.
187  *
188  * @queue holds pending transactions. These are linked through the first
189  * descriptor in the chain. When a descriptor is moved to the @active queue,
190  * the first and chained descriptors are flattened into a single list.
191  *
192  */
193 struct ep93xx_dma_chan {
194 	struct dma_chan			chan;
195 	const struct ep93xx_dma_engine	*edma;
196 	void __iomem			*regs;
197 	struct ep93xx_dma_chan_cfg	dma_cfg;
198 	int				irq;
199 	struct clk			*clk;
200 	struct tasklet_struct		tasklet;
201 	/* protects the fields following */
202 	spinlock_t			lock;
203 	unsigned long			flags;
204 /* Channel is configured for cyclic transfers */
205 #define EP93XX_DMA_IS_CYCLIC		0
206 
207 	int				buffer;
208 	struct list_head		active;
209 	struct list_head		queue;
210 	struct list_head		free_list;
211 	u32				runtime_addr;
212 	u32				runtime_ctrl;
213 	struct dma_slave_config		slave_config;
214 };
215 
216 /**
217  * struct ep93xx_dma_engine - the EP93xx DMA engine instance
218  * @dma_dev: holds the dmaengine device
219  * @m2m: is this an M2M or M2P device
220  * @hw_setup: method which sets the channel up for operation
221  * @hw_synchronize: synchronizes DMA channel termination to current context
222  * @hw_shutdown: shuts the channel down and flushes whatever is left
223  * @hw_submit: pushes active descriptor(s) to the hardware
224  * @hw_interrupt: handle the interrupt
225  * @num_channels: number of channels for this instance
226  * @channels: array of channels
227  *
228  * There is one instance of this struct for the M2P channels and one for the
229  * M2M channels. hw_xxx() methods are used to perform operations which are
230  * different on M2M and M2P channels. These methods are called with channel
231  * lock held and interrupts disabled so they cannot sleep.
232  */
233 struct ep93xx_dma_engine {
234 	struct dma_device	dma_dev;
235 	bool			m2m;
236 	int			(*hw_setup)(struct ep93xx_dma_chan *);
237 	void			(*hw_synchronize)(struct ep93xx_dma_chan *);
238 	void			(*hw_shutdown)(struct ep93xx_dma_chan *);
239 	void			(*hw_submit)(struct ep93xx_dma_chan *);
240 	int			(*hw_interrupt)(struct ep93xx_dma_chan *);
241 #define INTERRUPT_UNKNOWN	0
242 #define INTERRUPT_DONE		1
243 #define INTERRUPT_NEXT_BUFFER	2
244 
245 	size_t			num_channels;
246 	struct ep93xx_dma_chan	channels[] __counted_by(num_channels);
247 };
248 
249 struct ep93xx_edma_data {
250 	u32	id;
251 	size_t	num_channels;
252 };
253 
chan2dev(struct ep93xx_dma_chan * edmac)254 static inline struct device *chan2dev(struct ep93xx_dma_chan *edmac)
255 {
256 	return &edmac->chan.dev->device;
257 }
258 
to_ep93xx_dma_chan(struct dma_chan * chan)259 static struct ep93xx_dma_chan *to_ep93xx_dma_chan(struct dma_chan *chan)
260 {
261 	return container_of(chan, struct ep93xx_dma_chan, chan);
262 }
263 
ep93xx_dma_chan_is_m2p(struct dma_chan * chan)264 static inline bool ep93xx_dma_chan_is_m2p(struct dma_chan *chan)
265 {
266 	if (device_is_compatible(chan->device->dev, "cirrus,ep9301-dma-m2p"))
267 		return true;
268 
269 	return !strcmp(dev_name(chan->device->dev), "ep93xx-dma-m2p");
270 }
271 
272 /*
273  * ep93xx_dma_chan_direction - returns direction the channel can be used
274  *
275  * This function can be used in filter functions to find out whether the
276  * channel supports given DMA direction. Only M2P channels have such
277  * limitation, for M2M channels the direction is configurable.
278  */
279 static inline enum dma_transfer_direction
ep93xx_dma_chan_direction(struct dma_chan * chan)280 ep93xx_dma_chan_direction(struct dma_chan *chan)
281 {
282 	if (!ep93xx_dma_chan_is_m2p(chan))
283 		return DMA_TRANS_NONE;
284 
285 	/* even channels are for TX, odd for RX */
286 	return (chan->chan_id % 2 == 0) ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
287 }
288 
289 /**
290  * ep93xx_dma_set_active - set new active descriptor chain
291  * @edmac: channel
292  * @desc: head of the new active descriptor chain
293  *
294  * Sets @desc to be the head of the new active descriptor chain. This is the
295  * chain which is processed next. The active list must be empty before calling
296  * this function.
297  *
298  * Called with @edmac->lock held and interrupts disabled.
299  */
ep93xx_dma_set_active(struct ep93xx_dma_chan * edmac,struct ep93xx_dma_desc * desc)300 static void ep93xx_dma_set_active(struct ep93xx_dma_chan *edmac,
301 				  struct ep93xx_dma_desc *desc)
302 {
303 	BUG_ON(!list_empty(&edmac->active));
304 
305 	list_add_tail(&desc->node, &edmac->active);
306 
307 	/* Flatten the @desc->tx_list chain into @edmac->active list */
308 	while (!list_empty(&desc->tx_list)) {
309 		struct ep93xx_dma_desc *d = list_first_entry(&desc->tx_list,
310 			struct ep93xx_dma_desc, node);
311 
312 		/*
313 		 * We copy the callback parameters from the first descriptor
314 		 * to all the chained descriptors. This way we can call the
315 		 * callback without having to find out the first descriptor in
316 		 * the chain. Useful for cyclic transfers.
317 		 */
318 		d->txd.callback = desc->txd.callback;
319 		d->txd.callback_param = desc->txd.callback_param;
320 
321 		list_move_tail(&d->node, &edmac->active);
322 	}
323 }
324 
325 /* Called with @edmac->lock held and interrupts disabled */
326 static struct ep93xx_dma_desc *
ep93xx_dma_get_active(struct ep93xx_dma_chan * edmac)327 ep93xx_dma_get_active(struct ep93xx_dma_chan *edmac)
328 {
329 	return list_first_entry_or_null(&edmac->active,
330 					struct ep93xx_dma_desc, node);
331 }
332 
333 /**
334  * ep93xx_dma_advance_active - advances to the next active descriptor
335  * @edmac: channel
336  *
337  * Function advances active descriptor to the next in the @edmac->active and
338  * returns %true if we still have descriptors in the chain to process.
339  * Otherwise returns %false.
340  *
341  * When the channel is in cyclic mode always returns %true.
342  *
343  * Called with @edmac->lock held and interrupts disabled.
344  */
ep93xx_dma_advance_active(struct ep93xx_dma_chan * edmac)345 static bool ep93xx_dma_advance_active(struct ep93xx_dma_chan *edmac)
346 {
347 	struct ep93xx_dma_desc *desc;
348 
349 	list_rotate_left(&edmac->active);
350 
351 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
352 		return true;
353 
354 	desc = ep93xx_dma_get_active(edmac);
355 	if (!desc)
356 		return false;
357 
358 	/*
359 	 * If txd.cookie is set it means that we are back in the first
360 	 * descriptor in the chain and hence done with it.
361 	 */
362 	return !desc->txd.cookie;
363 }
364 
365 /*
366  * M2P DMA implementation
367  */
368 
m2p_set_control(struct ep93xx_dma_chan * edmac,u32 control)369 static void m2p_set_control(struct ep93xx_dma_chan *edmac, u32 control)
370 {
371 	writel(control, edmac->regs + M2P_CONTROL);
372 	/*
373 	 * EP93xx User's Guide states that we must perform a dummy read after
374 	 * write to the control register.
375 	 */
376 	readl(edmac->regs + M2P_CONTROL);
377 }
378 
m2p_hw_setup(struct ep93xx_dma_chan * edmac)379 static int m2p_hw_setup(struct ep93xx_dma_chan *edmac)
380 {
381 	u32 control;
382 
383 	writel(edmac->dma_cfg.port & 0xf, edmac->regs + M2P_PPALLOC);
384 
385 	control = M2P_CONTROL_CH_ERROR_INT | M2P_CONTROL_ICE
386 		| M2P_CONTROL_ENABLE;
387 	m2p_set_control(edmac, control);
388 
389 	edmac->buffer = 0;
390 
391 	return 0;
392 }
393 
m2p_channel_state(struct ep93xx_dma_chan * edmac)394 static inline u32 m2p_channel_state(struct ep93xx_dma_chan *edmac)
395 {
396 	return (readl(edmac->regs + M2P_STATUS) >> 4) & 0x3;
397 }
398 
m2p_hw_synchronize(struct ep93xx_dma_chan * edmac)399 static void m2p_hw_synchronize(struct ep93xx_dma_chan *edmac)
400 {
401 	unsigned long flags;
402 	u32 control;
403 
404 	spin_lock_irqsave(&edmac->lock, flags);
405 	control = readl(edmac->regs + M2P_CONTROL);
406 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
407 	m2p_set_control(edmac, control);
408 	spin_unlock_irqrestore(&edmac->lock, flags);
409 
410 	while (m2p_channel_state(edmac) >= M2P_STATE_ON)
411 		schedule();
412 }
413 
m2p_hw_shutdown(struct ep93xx_dma_chan * edmac)414 static void m2p_hw_shutdown(struct ep93xx_dma_chan *edmac)
415 {
416 	m2p_set_control(edmac, 0);
417 
418 	while (m2p_channel_state(edmac) != M2P_STATE_IDLE)
419 		dev_warn(chan2dev(edmac), "M2P: Not yet IDLE\n");
420 }
421 
m2p_fill_desc(struct ep93xx_dma_chan * edmac)422 static void m2p_fill_desc(struct ep93xx_dma_chan *edmac)
423 {
424 	struct ep93xx_dma_desc *desc;
425 	u32 bus_addr;
426 
427 	desc = ep93xx_dma_get_active(edmac);
428 	if (!desc) {
429 		dev_warn(chan2dev(edmac), "M2P: empty descriptor list\n");
430 		return;
431 	}
432 
433 	if (ep93xx_dma_chan_direction(&edmac->chan) == DMA_MEM_TO_DEV)
434 		bus_addr = desc->src_addr;
435 	else
436 		bus_addr = desc->dst_addr;
437 
438 	if (edmac->buffer == 0) {
439 		writel(desc->size, edmac->regs + M2P_MAXCNT0);
440 		writel(bus_addr, edmac->regs + M2P_BASE0);
441 	} else {
442 		writel(desc->size, edmac->regs + M2P_MAXCNT1);
443 		writel(bus_addr, edmac->regs + M2P_BASE1);
444 	}
445 
446 	edmac->buffer ^= 1;
447 }
448 
m2p_hw_submit(struct ep93xx_dma_chan * edmac)449 static void m2p_hw_submit(struct ep93xx_dma_chan *edmac)
450 {
451 	u32 control = readl(edmac->regs + M2P_CONTROL);
452 
453 	m2p_fill_desc(edmac);
454 	control |= M2P_CONTROL_STALLINT;
455 
456 	if (ep93xx_dma_advance_active(edmac)) {
457 		m2p_fill_desc(edmac);
458 		control |= M2P_CONTROL_NFBINT;
459 	}
460 
461 	m2p_set_control(edmac, control);
462 }
463 
m2p_hw_interrupt(struct ep93xx_dma_chan * edmac)464 static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
465 {
466 	u32 irq_status = readl(edmac->regs + M2P_INTERRUPT);
467 	u32 control;
468 
469 	if (irq_status & M2P_INTERRUPT_ERROR) {
470 		struct ep93xx_dma_desc *desc = ep93xx_dma_get_active(edmac);
471 
472 		/* Clear the error interrupt */
473 		writel(1, edmac->regs + M2P_INTERRUPT);
474 
475 		/*
476 		 * It seems that there is no easy way of reporting errors back
477 		 * to client so we just report the error here and continue as
478 		 * usual.
479 		 *
480 		 * Revisit this when there is a mechanism to report back the
481 		 * errors.
482 		 */
483 		dev_err(chan2dev(edmac),
484 			"DMA transfer failed! Details:\n"
485 			"\tcookie	: %d\n"
486 			"\tsrc_addr	: 0x%08x\n"
487 			"\tdst_addr	: 0x%08x\n"
488 			"\tsize		: %zu\n",
489 			desc->txd.cookie, desc->src_addr, desc->dst_addr,
490 			desc->size);
491 	}
492 
493 	/*
494 	 * Even latest E2 silicon revision sometimes assert STALL interrupt
495 	 * instead of NFB. Therefore we treat them equally, basing on the
496 	 * amount of data we still have to transfer.
497 	 */
498 	if (!(irq_status & (M2P_INTERRUPT_STALL | M2P_INTERRUPT_NFB)))
499 		return INTERRUPT_UNKNOWN;
500 
501 	if (ep93xx_dma_advance_active(edmac)) {
502 		m2p_fill_desc(edmac);
503 		return INTERRUPT_NEXT_BUFFER;
504 	}
505 
506 	/* Disable interrupts */
507 	control = readl(edmac->regs + M2P_CONTROL);
508 	control &= ~(M2P_CONTROL_STALLINT | M2P_CONTROL_NFBINT);
509 	m2p_set_control(edmac, control);
510 
511 	return INTERRUPT_DONE;
512 }
513 
514 /*
515  * M2M DMA implementation
516  */
517 
m2m_hw_setup(struct ep93xx_dma_chan * edmac)518 static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
519 {
520 	u32 control = 0;
521 
522 	if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
523 		/* This is memcpy channel, nothing to configure */
524 		writel(control, edmac->regs + M2M_CONTROL);
525 		return 0;
526 	}
527 
528 	switch (edmac->dma_cfg.port) {
529 	case EP93XX_DMA_SSP:
530 		/*
531 		 * This was found via experimenting - anything less than 5
532 		 * causes the channel to perform only a partial transfer which
533 		 * leads to problems since we don't get DONE interrupt then.
534 		 */
535 		control = (5 << M2M_CONTROL_PWSC_SHIFT);
536 		control |= M2M_CONTROL_NO_HDSK;
537 
538 		if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
539 			control |= M2M_CONTROL_DAH;
540 			control |= M2M_CONTROL_TM_TX;
541 			control |= M2M_CONTROL_RSS_SSPTX;
542 		} else {
543 			control |= M2M_CONTROL_SAH;
544 			control |= M2M_CONTROL_TM_RX;
545 			control |= M2M_CONTROL_RSS_SSPRX;
546 		}
547 		break;
548 
549 	case EP93XX_DMA_IDE:
550 		/*
551 		 * This IDE part is totally untested. Values below are taken
552 		 * from the EP93xx Users's Guide and might not be correct.
553 		 */
554 		if (edmac->dma_cfg.dir == DMA_MEM_TO_DEV) {
555 			/* Worst case from the UG */
556 			control = (3 << M2M_CONTROL_PWSC_SHIFT);
557 			control |= M2M_CONTROL_DAH;
558 			control |= M2M_CONTROL_TM_TX;
559 		} else {
560 			control = (2 << M2M_CONTROL_PWSC_SHIFT);
561 			control |= M2M_CONTROL_SAH;
562 			control |= M2M_CONTROL_TM_RX;
563 		}
564 
565 		control |= M2M_CONTROL_NO_HDSK;
566 		control |= M2M_CONTROL_RSS_IDE;
567 		control |= M2M_CONTROL_PW_16;
568 		break;
569 
570 	default:
571 		return -EINVAL;
572 	}
573 
574 	writel(control, edmac->regs + M2M_CONTROL);
575 	return 0;
576 }
577 
m2m_hw_shutdown(struct ep93xx_dma_chan * edmac)578 static void m2m_hw_shutdown(struct ep93xx_dma_chan *edmac)
579 {
580 	/* Just disable the channel */
581 	writel(0, edmac->regs + M2M_CONTROL);
582 }
583 
m2m_fill_desc(struct ep93xx_dma_chan * edmac)584 static void m2m_fill_desc(struct ep93xx_dma_chan *edmac)
585 {
586 	struct ep93xx_dma_desc *desc;
587 
588 	desc = ep93xx_dma_get_active(edmac);
589 	if (!desc) {
590 		dev_warn(chan2dev(edmac), "M2M: empty descriptor list\n");
591 		return;
592 	}
593 
594 	if (edmac->buffer == 0) {
595 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE0);
596 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE0);
597 		writel(desc->size, edmac->regs + M2M_BCR0);
598 	} else {
599 		writel(desc->src_addr, edmac->regs + M2M_SAR_BASE1);
600 		writel(desc->dst_addr, edmac->regs + M2M_DAR_BASE1);
601 		writel(desc->size, edmac->regs + M2M_BCR1);
602 	}
603 
604 	edmac->buffer ^= 1;
605 }
606 
m2m_hw_submit(struct ep93xx_dma_chan * edmac)607 static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
608 {
609 	u32 control = readl(edmac->regs + M2M_CONTROL);
610 
611 	/*
612 	 * Since we allow clients to configure PW (peripheral width) we always
613 	 * clear PW bits here and then set them according what is given in
614 	 * the runtime configuration.
615 	 */
616 	control &= ~M2M_CONTROL_PW_MASK;
617 	control |= edmac->runtime_ctrl;
618 
619 	m2m_fill_desc(edmac);
620 	control |= M2M_CONTROL_DONEINT;
621 
622 	if (ep93xx_dma_advance_active(edmac)) {
623 		m2m_fill_desc(edmac);
624 		control |= M2M_CONTROL_NFBINT;
625 	}
626 
627 	/*
628 	 * Now we can finally enable the channel. For M2M channel this must be
629 	 * done _after_ the BCRx registers are programmed.
630 	 */
631 	control |= M2M_CONTROL_ENABLE;
632 	writel(control, edmac->regs + M2M_CONTROL);
633 
634 	if (edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
635 		/*
636 		 * For memcpy channels the software trigger must be asserted
637 		 * in order to start the memcpy operation.
638 		 */
639 		control |= M2M_CONTROL_START;
640 		writel(control, edmac->regs + M2M_CONTROL);
641 	}
642 }
643 
644 /*
645  * According to EP93xx User's Guide, we should receive DONE interrupt when all
646  * M2M DMA controller transactions complete normally. This is not always the
647  * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
648  * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
649  * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
650  * In effect, disabling the channel when only DONE bit is set could stop
651  * currently running DMA transfer. To avoid this, we use Buffer FSM and
652  * Control FSM to check current state of DMA channel.
653  */
m2m_hw_interrupt(struct ep93xx_dma_chan * edmac)654 static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
655 {
656 	u32 status = readl(edmac->regs + M2M_STATUS);
657 	u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
658 	u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
659 	bool done = status & M2M_STATUS_DONE;
660 	bool last_done;
661 	u32 control;
662 	struct ep93xx_dma_desc *desc;
663 
664 	/* Accept only DONE and NFB interrupts */
665 	if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
666 		return INTERRUPT_UNKNOWN;
667 
668 	if (done) {
669 		/* Clear the DONE bit */
670 		writel(0, edmac->regs + M2M_INTERRUPT);
671 	}
672 
673 	/*
674 	 * Check whether we are done with descriptors or not. This, together
675 	 * with DMA channel state, determines action to take in interrupt.
676 	 */
677 	desc = ep93xx_dma_get_active(edmac);
678 	last_done = !desc || desc->txd.cookie;
679 
680 	/*
681 	 * Use M2M DMA Buffer FSM and Control FSM to check current state of
682 	 * DMA channel. Using DONE and NFB bits from channel status register
683 	 * or bits from channel interrupt register is not reliable.
684 	 */
685 	if (!last_done &&
686 	    (buf_fsm == M2M_STATUS_BUF_NO ||
687 	     buf_fsm == M2M_STATUS_BUF_ON)) {
688 		/*
689 		 * Two buffers are ready for update when Buffer FSM is in
690 		 * DMA_NO_BUF state. Only one buffer can be prepared without
691 		 * disabling the channel or polling the DONE bit.
692 		 * To simplify things, always prepare only one buffer.
693 		 */
694 		if (ep93xx_dma_advance_active(edmac)) {
695 			m2m_fill_desc(edmac);
696 			if (done && edmac->dma_cfg.dir == DMA_MEM_TO_MEM) {
697 				/* Software trigger for memcpy channel */
698 				control = readl(edmac->regs + M2M_CONTROL);
699 				control |= M2M_CONTROL_START;
700 				writel(control, edmac->regs + M2M_CONTROL);
701 			}
702 			return INTERRUPT_NEXT_BUFFER;
703 		} else {
704 			last_done = true;
705 		}
706 	}
707 
708 	/*
709 	 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
710 	 * and Control FSM is in DMA_STALL state.
711 	 */
712 	if (last_done &&
713 	    buf_fsm == M2M_STATUS_BUF_NO &&
714 	    ctl_fsm == M2M_STATUS_CTL_STALL) {
715 		/* Disable interrupts and the channel */
716 		control = readl(edmac->regs + M2M_CONTROL);
717 		control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
718 			    | M2M_CONTROL_ENABLE);
719 		writel(control, edmac->regs + M2M_CONTROL);
720 		return INTERRUPT_DONE;
721 	}
722 
723 	/*
724 	 * Nothing to do this time.
725 	 */
726 	return INTERRUPT_NEXT_BUFFER;
727 }
728 
729 /*
730  * DMA engine API implementation
731  */
732 
733 static struct ep93xx_dma_desc *
ep93xx_dma_desc_get(struct ep93xx_dma_chan * edmac)734 ep93xx_dma_desc_get(struct ep93xx_dma_chan *edmac)
735 {
736 	struct ep93xx_dma_desc *desc, *_desc;
737 	struct ep93xx_dma_desc *ret = NULL;
738 	unsigned long flags;
739 
740 	spin_lock_irqsave(&edmac->lock, flags);
741 	list_for_each_entry_safe(desc, _desc, &edmac->free_list, node) {
742 		if (async_tx_test_ack(&desc->txd)) {
743 			list_del_init(&desc->node);
744 
745 			/* Re-initialize the descriptor */
746 			desc->src_addr = 0;
747 			desc->dst_addr = 0;
748 			desc->size = 0;
749 			desc->complete = false;
750 			desc->txd.cookie = 0;
751 			desc->txd.callback = NULL;
752 			desc->txd.callback_param = NULL;
753 
754 			ret = desc;
755 			break;
756 		}
757 	}
758 	spin_unlock_irqrestore(&edmac->lock, flags);
759 	return ret;
760 }
761 
ep93xx_dma_desc_put(struct ep93xx_dma_chan * edmac,struct ep93xx_dma_desc * desc)762 static void ep93xx_dma_desc_put(struct ep93xx_dma_chan *edmac,
763 				struct ep93xx_dma_desc *desc)
764 {
765 	if (desc) {
766 		unsigned long flags;
767 
768 		spin_lock_irqsave(&edmac->lock, flags);
769 		list_splice_init(&desc->tx_list, &edmac->free_list);
770 		list_add(&desc->node, &edmac->free_list);
771 		spin_unlock_irqrestore(&edmac->lock, flags);
772 	}
773 }
774 
775 /**
776  * ep93xx_dma_advance_work - start processing the next pending transaction
777  * @edmac: channel
778  *
779  * If we have pending transactions queued and we are currently idling, this
780  * function takes the next queued transaction from the @edmac->queue and
781  * pushes it to the hardware for execution.
782  */
ep93xx_dma_advance_work(struct ep93xx_dma_chan * edmac)783 static void ep93xx_dma_advance_work(struct ep93xx_dma_chan *edmac)
784 {
785 	struct ep93xx_dma_desc *new;
786 	unsigned long flags;
787 
788 	spin_lock_irqsave(&edmac->lock, flags);
789 	if (!list_empty(&edmac->active) || list_empty(&edmac->queue)) {
790 		spin_unlock_irqrestore(&edmac->lock, flags);
791 		return;
792 	}
793 
794 	/* Take the next descriptor from the pending queue */
795 	new = list_first_entry(&edmac->queue, struct ep93xx_dma_desc, node);
796 	list_del_init(&new->node);
797 
798 	ep93xx_dma_set_active(edmac, new);
799 
800 	/* Push it to the hardware */
801 	edmac->edma->hw_submit(edmac);
802 	spin_unlock_irqrestore(&edmac->lock, flags);
803 }
804 
ep93xx_dma_tasklet(struct tasklet_struct * t)805 static void ep93xx_dma_tasklet(struct tasklet_struct *t)
806 {
807 	struct ep93xx_dma_chan *edmac = from_tasklet(edmac, t, tasklet);
808 	struct ep93xx_dma_desc *desc, *d;
809 	struct dmaengine_desc_callback cb;
810 	LIST_HEAD(list);
811 
812 	memset(&cb, 0, sizeof(cb));
813 	spin_lock_irq(&edmac->lock);
814 	/*
815 	 * If dma_terminate_all() was called before we get to run, the active
816 	 * list has become empty. If that happens we aren't supposed to do
817 	 * anything more than call ep93xx_dma_advance_work().
818 	 */
819 	desc = ep93xx_dma_get_active(edmac);
820 	if (desc) {
821 		if (desc->complete) {
822 			/* mark descriptor complete for non cyclic case only */
823 			if (!test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
824 				dma_cookie_complete(&desc->txd);
825 			list_splice_init(&edmac->active, &list);
826 		}
827 		dmaengine_desc_get_callback(&desc->txd, &cb);
828 	}
829 	spin_unlock_irq(&edmac->lock);
830 
831 	/* Pick up the next descriptor from the queue */
832 	ep93xx_dma_advance_work(edmac);
833 
834 	/* Now we can release all the chained descriptors */
835 	list_for_each_entry_safe(desc, d, &list, node) {
836 		dma_descriptor_unmap(&desc->txd);
837 		ep93xx_dma_desc_put(edmac, desc);
838 	}
839 
840 	dmaengine_desc_callback_invoke(&cb, NULL);
841 }
842 
ep93xx_dma_interrupt(int irq,void * dev_id)843 static irqreturn_t ep93xx_dma_interrupt(int irq, void *dev_id)
844 {
845 	struct ep93xx_dma_chan *edmac = dev_id;
846 	struct ep93xx_dma_desc *desc;
847 	irqreturn_t ret = IRQ_HANDLED;
848 
849 	spin_lock(&edmac->lock);
850 
851 	desc = ep93xx_dma_get_active(edmac);
852 	if (!desc) {
853 		dev_warn(chan2dev(edmac),
854 			 "got interrupt while active list is empty\n");
855 		spin_unlock(&edmac->lock);
856 		return IRQ_NONE;
857 	}
858 
859 	switch (edmac->edma->hw_interrupt(edmac)) {
860 	case INTERRUPT_DONE:
861 		desc->complete = true;
862 		tasklet_schedule(&edmac->tasklet);
863 		break;
864 
865 	case INTERRUPT_NEXT_BUFFER:
866 		if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags))
867 			tasklet_schedule(&edmac->tasklet);
868 		break;
869 
870 	default:
871 		dev_warn(chan2dev(edmac), "unknown interrupt!\n");
872 		ret = IRQ_NONE;
873 		break;
874 	}
875 
876 	spin_unlock(&edmac->lock);
877 	return ret;
878 }
879 
880 /**
881  * ep93xx_dma_tx_submit - set the prepared descriptor(s) to be executed
882  * @tx: descriptor to be executed
883  *
884  * Function will execute given descriptor on the hardware or if the hardware
885  * is busy, queue the descriptor to be executed later on. Returns cookie which
886  * can be used to poll the status of the descriptor.
887  */
ep93xx_dma_tx_submit(struct dma_async_tx_descriptor * tx)888 static dma_cookie_t ep93xx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
889 {
890 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(tx->chan);
891 	struct ep93xx_dma_desc *desc;
892 	dma_cookie_t cookie;
893 	unsigned long flags;
894 
895 	spin_lock_irqsave(&edmac->lock, flags);
896 	cookie = dma_cookie_assign(tx);
897 
898 	desc = container_of(tx, struct ep93xx_dma_desc, txd);
899 
900 	/*
901 	 * If nothing is currently processed, we push this descriptor
902 	 * directly to the hardware. Otherwise we put the descriptor
903 	 * to the pending queue.
904 	 */
905 	if (list_empty(&edmac->active)) {
906 		ep93xx_dma_set_active(edmac, desc);
907 		edmac->edma->hw_submit(edmac);
908 	} else {
909 		list_add_tail(&desc->node, &edmac->queue);
910 	}
911 
912 	spin_unlock_irqrestore(&edmac->lock, flags);
913 	return cookie;
914 }
915 
916 /**
917  * ep93xx_dma_alloc_chan_resources - allocate resources for the channel
918  * @chan: channel to allocate resources
919  *
920  * Function allocates necessary resources for the given DMA channel and
921  * returns number of allocated descriptors for the channel. Negative errno
922  * is returned in case of failure.
923  */
ep93xx_dma_alloc_chan_resources(struct dma_chan * chan)924 static int ep93xx_dma_alloc_chan_resources(struct dma_chan *chan)
925 {
926 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
927 	const char *name = dma_chan_name(chan);
928 	int ret, i;
929 
930 	/* Sanity check the channel parameters */
931 	if (!edmac->edma->m2m) {
932 		if (edmac->dma_cfg.port > EP93XX_DMA_IRDA)
933 			return -EINVAL;
934 		if (edmac->dma_cfg.dir != ep93xx_dma_chan_direction(chan))
935 			return -EINVAL;
936 	} else {
937 		if (edmac->dma_cfg.dir != DMA_MEM_TO_MEM) {
938 			switch (edmac->dma_cfg.port) {
939 			case EP93XX_DMA_SSP:
940 			case EP93XX_DMA_IDE:
941 				if (!is_slave_direction(edmac->dma_cfg.dir))
942 					return -EINVAL;
943 				break;
944 			default:
945 				return -EINVAL;
946 			}
947 		}
948 	}
949 
950 	ret = clk_prepare_enable(edmac->clk);
951 	if (ret)
952 		return ret;
953 
954 	ret = request_irq(edmac->irq, ep93xx_dma_interrupt, 0, name, edmac);
955 	if (ret)
956 		goto fail_clk_disable;
957 
958 	spin_lock_irq(&edmac->lock);
959 	dma_cookie_init(&edmac->chan);
960 	ret = edmac->edma->hw_setup(edmac);
961 	spin_unlock_irq(&edmac->lock);
962 
963 	if (ret)
964 		goto fail_free_irq;
965 
966 	for (i = 0; i < DMA_MAX_CHAN_DESCRIPTORS; i++) {
967 		struct ep93xx_dma_desc *desc;
968 
969 		desc = kzalloc(sizeof(*desc), GFP_KERNEL);
970 		if (!desc) {
971 			dev_warn(chan2dev(edmac), "not enough descriptors\n");
972 			break;
973 		}
974 
975 		INIT_LIST_HEAD(&desc->tx_list);
976 
977 		dma_async_tx_descriptor_init(&desc->txd, chan);
978 		desc->txd.flags = DMA_CTRL_ACK;
979 		desc->txd.tx_submit = ep93xx_dma_tx_submit;
980 
981 		ep93xx_dma_desc_put(edmac, desc);
982 	}
983 
984 	return i;
985 
986 fail_free_irq:
987 	free_irq(edmac->irq, edmac);
988 fail_clk_disable:
989 	clk_disable_unprepare(edmac->clk);
990 
991 	return ret;
992 }
993 
994 /**
995  * ep93xx_dma_free_chan_resources - release resources for the channel
996  * @chan: channel
997  *
998  * Function releases all the resources allocated for the given channel.
999  * The channel must be idle when this is called.
1000  */
ep93xx_dma_free_chan_resources(struct dma_chan * chan)1001 static void ep93xx_dma_free_chan_resources(struct dma_chan *chan)
1002 {
1003 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1004 	struct ep93xx_dma_desc *desc, *d;
1005 	unsigned long flags;
1006 	LIST_HEAD(list);
1007 
1008 	BUG_ON(!list_empty(&edmac->active));
1009 	BUG_ON(!list_empty(&edmac->queue));
1010 
1011 	spin_lock_irqsave(&edmac->lock, flags);
1012 	edmac->edma->hw_shutdown(edmac);
1013 	edmac->runtime_addr = 0;
1014 	edmac->runtime_ctrl = 0;
1015 	edmac->buffer = 0;
1016 	list_splice_init(&edmac->free_list, &list);
1017 	spin_unlock_irqrestore(&edmac->lock, flags);
1018 
1019 	list_for_each_entry_safe(desc, d, &list, node)
1020 		kfree(desc);
1021 
1022 	clk_disable_unprepare(edmac->clk);
1023 	free_irq(edmac->irq, edmac);
1024 }
1025 
1026 /**
1027  * ep93xx_dma_prep_dma_memcpy - prepare a memcpy DMA operation
1028  * @chan: channel
1029  * @dest: destination bus address
1030  * @src: source bus address
1031  * @len: size of the transaction
1032  * @flags: flags for the descriptor
1033  *
1034  * Returns a valid DMA descriptor or %NULL in case of failure.
1035  */
1036 static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_memcpy(struct dma_chan * chan,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)1037 ep93xx_dma_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest,
1038 			   dma_addr_t src, size_t len, unsigned long flags)
1039 {
1040 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1041 	struct ep93xx_dma_desc *desc, *first;
1042 	size_t bytes, offset;
1043 
1044 	first = NULL;
1045 	for (offset = 0; offset < len; offset += bytes) {
1046 		desc = ep93xx_dma_desc_get(edmac);
1047 		if (!desc) {
1048 			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1049 			goto fail;
1050 		}
1051 
1052 		bytes = min_t(size_t, len - offset, DMA_MAX_CHAN_BYTES);
1053 
1054 		desc->src_addr = src + offset;
1055 		desc->dst_addr = dest + offset;
1056 		desc->size = bytes;
1057 
1058 		if (!first)
1059 			first = desc;
1060 		else
1061 			list_add_tail(&desc->node, &first->tx_list);
1062 	}
1063 
1064 	first->txd.cookie = -EBUSY;
1065 	first->txd.flags = flags;
1066 
1067 	return &first->txd;
1068 fail:
1069 	ep93xx_dma_desc_put(edmac, first);
1070 	return NULL;
1071 }
1072 
1073 /**
1074  * ep93xx_dma_prep_slave_sg - prepare a slave DMA operation
1075  * @chan: channel
1076  * @sgl: list of buffers to transfer
1077  * @sg_len: number of entries in @sgl
1078  * @dir: direction of the DMA transfer
1079  * @flags: flags for the descriptor
1080  * @context: operation context (ignored)
1081  *
1082  * Returns a valid DMA descriptor or %NULL in case of failure.
1083  */
1084 static struct dma_async_tx_descriptor *
ep93xx_dma_prep_slave_sg(struct dma_chan * chan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)1085 ep93xx_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1086 			 unsigned int sg_len, enum dma_transfer_direction dir,
1087 			 unsigned long flags, void *context)
1088 {
1089 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1090 	struct ep93xx_dma_desc *desc, *first;
1091 	struct scatterlist *sg;
1092 	int i;
1093 
1094 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1095 		dev_warn(chan2dev(edmac),
1096 			 "channel was configured with different direction\n");
1097 		return NULL;
1098 	}
1099 
1100 	if (test_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1101 		dev_warn(chan2dev(edmac),
1102 			 "channel is already used for cyclic transfers\n");
1103 		return NULL;
1104 	}
1105 
1106 	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1107 
1108 	first = NULL;
1109 	for_each_sg(sgl, sg, sg_len, i) {
1110 		size_t len = sg_dma_len(sg);
1111 
1112 		if (len > DMA_MAX_CHAN_BYTES) {
1113 			dev_warn(chan2dev(edmac), "too big transfer size %zu\n",
1114 				 len);
1115 			goto fail;
1116 		}
1117 
1118 		desc = ep93xx_dma_desc_get(edmac);
1119 		if (!desc) {
1120 			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1121 			goto fail;
1122 		}
1123 
1124 		if (dir == DMA_MEM_TO_DEV) {
1125 			desc->src_addr = sg_dma_address(sg);
1126 			desc->dst_addr = edmac->runtime_addr;
1127 		} else {
1128 			desc->src_addr = edmac->runtime_addr;
1129 			desc->dst_addr = sg_dma_address(sg);
1130 		}
1131 		desc->size = len;
1132 
1133 		if (!first)
1134 			first = desc;
1135 		else
1136 			list_add_tail(&desc->node, &first->tx_list);
1137 	}
1138 
1139 	first->txd.cookie = -EBUSY;
1140 	first->txd.flags = flags;
1141 
1142 	return &first->txd;
1143 
1144 fail:
1145 	ep93xx_dma_desc_put(edmac, first);
1146 	return NULL;
1147 }
1148 
1149 /**
1150  * ep93xx_dma_prep_dma_cyclic - prepare a cyclic DMA operation
1151  * @chan: channel
1152  * @dma_addr: DMA mapped address of the buffer
1153  * @buf_len: length of the buffer (in bytes)
1154  * @period_len: length of a single period
1155  * @dir: direction of the operation
1156  * @flags: tx descriptor status flags
1157  *
1158  * Prepares a descriptor for cyclic DMA operation. This means that once the
1159  * descriptor is submitted, we will be submitting in a @period_len sized
1160  * buffers and calling callback once the period has been elapsed. Transfer
1161  * terminates only when client calls dmaengine_terminate_all() for this
1162  * channel.
1163  *
1164  * Returns a valid DMA descriptor or %NULL in case of failure.
1165  */
1166 static struct dma_async_tx_descriptor *
ep93xx_dma_prep_dma_cyclic(struct dma_chan * chan,dma_addr_t dma_addr,size_t buf_len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1167 ep93xx_dma_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
1168 			   size_t buf_len, size_t period_len,
1169 			   enum dma_transfer_direction dir, unsigned long flags)
1170 {
1171 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1172 	struct ep93xx_dma_desc *desc, *first;
1173 	size_t offset = 0;
1174 
1175 	if (!edmac->edma->m2m && dir != ep93xx_dma_chan_direction(chan)) {
1176 		dev_warn(chan2dev(edmac),
1177 			 "channel was configured with different direction\n");
1178 		return NULL;
1179 	}
1180 
1181 	if (test_and_set_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags)) {
1182 		dev_warn(chan2dev(edmac),
1183 			 "channel is already used for cyclic transfers\n");
1184 		return NULL;
1185 	}
1186 
1187 	if (period_len > DMA_MAX_CHAN_BYTES) {
1188 		dev_warn(chan2dev(edmac), "too big period length %zu\n",
1189 			 period_len);
1190 		return NULL;
1191 	}
1192 
1193 	ep93xx_dma_slave_config_write(chan, dir, &edmac->slave_config);
1194 
1195 	/* Split the buffer into period size chunks */
1196 	first = NULL;
1197 	for (offset = 0; offset < buf_len; offset += period_len) {
1198 		desc = ep93xx_dma_desc_get(edmac);
1199 		if (!desc) {
1200 			dev_warn(chan2dev(edmac), "couldn't get descriptor\n");
1201 			goto fail;
1202 		}
1203 
1204 		if (dir == DMA_MEM_TO_DEV) {
1205 			desc->src_addr = dma_addr + offset;
1206 			desc->dst_addr = edmac->runtime_addr;
1207 		} else {
1208 			desc->src_addr = edmac->runtime_addr;
1209 			desc->dst_addr = dma_addr + offset;
1210 		}
1211 
1212 		desc->size = period_len;
1213 
1214 		if (!first)
1215 			first = desc;
1216 		else
1217 			list_add_tail(&desc->node, &first->tx_list);
1218 	}
1219 
1220 	first->txd.cookie = -EBUSY;
1221 
1222 	return &first->txd;
1223 
1224 fail:
1225 	ep93xx_dma_desc_put(edmac, first);
1226 	return NULL;
1227 }
1228 
1229 /**
1230  * ep93xx_dma_synchronize - Synchronizes the termination of transfers to the
1231  * current context.
1232  * @chan: channel
1233  *
1234  * Synchronizes the DMA channel termination to the current context. When this
1235  * function returns it is guaranteed that all transfers for previously issued
1236  * descriptors have stopped and it is safe to free the memory associated
1237  * with them. Furthermore it is guaranteed that all complete callback functions
1238  * for a previously submitted descriptor have finished running and it is safe to
1239  * free resources accessed from within the complete callbacks.
1240  */
ep93xx_dma_synchronize(struct dma_chan * chan)1241 static void ep93xx_dma_synchronize(struct dma_chan *chan)
1242 {
1243 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1244 
1245 	if (edmac->edma->hw_synchronize)
1246 		edmac->edma->hw_synchronize(edmac);
1247 }
1248 
1249 /**
1250  * ep93xx_dma_terminate_all - terminate all transactions
1251  * @chan: channel
1252  *
1253  * Stops all DMA transactions. All descriptors are put back to the
1254  * @edmac->free_list and callbacks are _not_ called.
1255  */
ep93xx_dma_terminate_all(struct dma_chan * chan)1256 static int ep93xx_dma_terminate_all(struct dma_chan *chan)
1257 {
1258 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1259 	struct ep93xx_dma_desc *desc, *_d;
1260 	unsigned long flags;
1261 	LIST_HEAD(list);
1262 
1263 	spin_lock_irqsave(&edmac->lock, flags);
1264 	/* First we disable and flush the DMA channel */
1265 	edmac->edma->hw_shutdown(edmac);
1266 	clear_bit(EP93XX_DMA_IS_CYCLIC, &edmac->flags);
1267 	list_splice_init(&edmac->active, &list);
1268 	list_splice_init(&edmac->queue, &list);
1269 	/*
1270 	 * We then re-enable the channel. This way we can continue submitting
1271 	 * the descriptors by just calling ->hw_submit() again.
1272 	 */
1273 	edmac->edma->hw_setup(edmac);
1274 	spin_unlock_irqrestore(&edmac->lock, flags);
1275 
1276 	list_for_each_entry_safe(desc, _d, &list, node)
1277 		ep93xx_dma_desc_put(edmac, desc);
1278 
1279 	return 0;
1280 }
1281 
ep93xx_dma_slave_config(struct dma_chan * chan,struct dma_slave_config * config)1282 static int ep93xx_dma_slave_config(struct dma_chan *chan,
1283 				   struct dma_slave_config *config)
1284 {
1285 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1286 
1287 	memcpy(&edmac->slave_config, config, sizeof(*config));
1288 
1289 	return 0;
1290 }
1291 
ep93xx_dma_slave_config_write(struct dma_chan * chan,enum dma_transfer_direction dir,struct dma_slave_config * config)1292 static int ep93xx_dma_slave_config_write(struct dma_chan *chan,
1293 					 enum dma_transfer_direction dir,
1294 					 struct dma_slave_config *config)
1295 {
1296 	struct ep93xx_dma_chan *edmac = to_ep93xx_dma_chan(chan);
1297 	enum dma_slave_buswidth width;
1298 	unsigned long flags;
1299 	u32 addr, ctrl;
1300 
1301 	if (!edmac->edma->m2m)
1302 		return -EINVAL;
1303 
1304 	switch (dir) {
1305 	case DMA_DEV_TO_MEM:
1306 		width = config->src_addr_width;
1307 		addr = config->src_addr;
1308 		break;
1309 
1310 	case DMA_MEM_TO_DEV:
1311 		width = config->dst_addr_width;
1312 		addr = config->dst_addr;
1313 		break;
1314 
1315 	default:
1316 		return -EINVAL;
1317 	}
1318 
1319 	switch (width) {
1320 	case DMA_SLAVE_BUSWIDTH_1_BYTE:
1321 		ctrl = 0;
1322 		break;
1323 	case DMA_SLAVE_BUSWIDTH_2_BYTES:
1324 		ctrl = M2M_CONTROL_PW_16;
1325 		break;
1326 	case DMA_SLAVE_BUSWIDTH_4_BYTES:
1327 		ctrl = M2M_CONTROL_PW_32;
1328 		break;
1329 	default:
1330 		return -EINVAL;
1331 	}
1332 
1333 	spin_lock_irqsave(&edmac->lock, flags);
1334 	edmac->runtime_addr = addr;
1335 	edmac->runtime_ctrl = ctrl;
1336 	spin_unlock_irqrestore(&edmac->lock, flags);
1337 
1338 	return 0;
1339 }
1340 
1341 /**
1342  * ep93xx_dma_tx_status - check if a transaction is completed
1343  * @chan: channel
1344  * @cookie: transaction specific cookie
1345  * @state: state of the transaction is stored here if given
1346  *
1347  * This function can be used to query state of a given transaction.
1348  */
ep93xx_dma_tx_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)1349 static enum dma_status ep93xx_dma_tx_status(struct dma_chan *chan,
1350 					    dma_cookie_t cookie,
1351 					    struct dma_tx_state *state)
1352 {
1353 	return dma_cookie_status(chan, cookie, state);
1354 }
1355 
1356 /**
1357  * ep93xx_dma_issue_pending - push pending transactions to the hardware
1358  * @chan: channel
1359  *
1360  * When this function is called, all pending transactions are pushed to the
1361  * hardware and executed.
1362  */
ep93xx_dma_issue_pending(struct dma_chan * chan)1363 static void ep93xx_dma_issue_pending(struct dma_chan *chan)
1364 {
1365 	ep93xx_dma_advance_work(to_ep93xx_dma_chan(chan));
1366 }
1367 
ep93xx_dma_of_probe(struct platform_device * pdev)1368 static struct ep93xx_dma_engine *ep93xx_dma_of_probe(struct platform_device *pdev)
1369 {
1370 	const struct ep93xx_edma_data *data;
1371 	struct device *dev = &pdev->dev;
1372 	struct ep93xx_dma_engine *edma;
1373 	struct dma_device *dma_dev;
1374 	char dma_clk_name[5];
1375 	int i;
1376 
1377 	data = device_get_match_data(dev);
1378 	if (!data)
1379 		return ERR_PTR(dev_err_probe(dev, -ENODEV, "No device match found\n"));
1380 
1381 	edma = devm_kzalloc(dev, struct_size(edma, channels, data->num_channels),
1382 			    GFP_KERNEL);
1383 	if (!edma)
1384 		return ERR_PTR(-ENOMEM);
1385 
1386 	edma->m2m = data->id;
1387 	edma->num_channels = data->num_channels;
1388 	dma_dev = &edma->dma_dev;
1389 
1390 	INIT_LIST_HEAD(&dma_dev->channels);
1391 	for (i = 0; i < edma->num_channels; i++) {
1392 		struct ep93xx_dma_chan *edmac = &edma->channels[i];
1393 		int len;
1394 
1395 		edmac->chan.device = dma_dev;
1396 		edmac->regs = devm_platform_ioremap_resource(pdev, i);
1397 		if (IS_ERR(edmac->regs))
1398 			return ERR_CAST(edmac->regs);
1399 
1400 		edmac->irq = fwnode_irq_get(dev_fwnode(dev), i);
1401 		if (edmac->irq < 0)
1402 			return ERR_PTR(edmac->irq);
1403 
1404 		edmac->edma = edma;
1405 
1406 		if (edma->m2m)
1407 			len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2m%u", i);
1408 		else
1409 			len = snprintf(dma_clk_name, sizeof(dma_clk_name), "m2p%u", i);
1410 		if (len >= sizeof(dma_clk_name))
1411 			return ERR_PTR(-ENOBUFS);
1412 
1413 		edmac->clk = devm_clk_get(dev, dma_clk_name);
1414 		if (IS_ERR(edmac->clk)) {
1415 			dev_err_probe(dev, PTR_ERR(edmac->clk),
1416 				      "no %s clock found\n", dma_clk_name);
1417 			return ERR_CAST(edmac->clk);
1418 		}
1419 
1420 		spin_lock_init(&edmac->lock);
1421 		INIT_LIST_HEAD(&edmac->active);
1422 		INIT_LIST_HEAD(&edmac->queue);
1423 		INIT_LIST_HEAD(&edmac->free_list);
1424 		tasklet_setup(&edmac->tasklet, ep93xx_dma_tasklet);
1425 
1426 		list_add_tail(&edmac->chan.device_node,
1427 			      &dma_dev->channels);
1428 	}
1429 
1430 	return edma;
1431 }
1432 
ep93xx_m2p_dma_filter(struct dma_chan * chan,void * filter_param)1433 static bool ep93xx_m2p_dma_filter(struct dma_chan *chan, void *filter_param)
1434 {
1435 	struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
1436 	struct ep93xx_dma_chan_cfg *cfg = filter_param;
1437 
1438 	if (cfg->dir != ep93xx_dma_chan_direction(chan))
1439 		return false;
1440 
1441 	echan->dma_cfg = *cfg;
1442 	return true;
1443 }
1444 
ep93xx_m2p_dma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1445 static struct dma_chan *ep93xx_m2p_dma_of_xlate(struct of_phandle_args *dma_spec,
1446 					    struct of_dma *ofdma)
1447 {
1448 	struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
1449 	dma_cap_mask_t mask = edma->dma_dev.cap_mask;
1450 	struct ep93xx_dma_chan_cfg dma_cfg;
1451 	u8 port = dma_spec->args[0];
1452 	u8 direction = dma_spec->args[1];
1453 
1454 	if (port > EP93XX_DMA_IRDA)
1455 		return NULL;
1456 
1457 	if (!is_slave_direction(direction))
1458 		return NULL;
1459 
1460 	dma_cfg.port = port;
1461 	dma_cfg.dir = direction;
1462 
1463 	return __dma_request_channel(&mask, ep93xx_m2p_dma_filter, &dma_cfg, ofdma->of_node);
1464 }
1465 
ep93xx_m2m_dma_filter(struct dma_chan * chan,void * filter_param)1466 static bool ep93xx_m2m_dma_filter(struct dma_chan *chan, void *filter_param)
1467 {
1468 	struct ep93xx_dma_chan *echan = to_ep93xx_dma_chan(chan);
1469 	struct ep93xx_dma_chan_cfg *cfg = filter_param;
1470 
1471 	echan->dma_cfg = *cfg;
1472 
1473 	return true;
1474 }
1475 
ep93xx_m2m_dma_of_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1476 static struct dma_chan *ep93xx_m2m_dma_of_xlate(struct of_phandle_args *dma_spec,
1477 					    struct of_dma *ofdma)
1478 {
1479 	struct ep93xx_dma_engine *edma = ofdma->of_dma_data;
1480 	dma_cap_mask_t mask = edma->dma_dev.cap_mask;
1481 	struct ep93xx_dma_chan_cfg dma_cfg;
1482 	u8 port = dma_spec->args[0];
1483 	u8 direction = dma_spec->args[1];
1484 
1485 	if (!is_slave_direction(direction))
1486 		return NULL;
1487 
1488 	switch (port) {
1489 	case EP93XX_DMA_SSP:
1490 	case EP93XX_DMA_IDE:
1491 		break;
1492 	default:
1493 		return NULL;
1494 	}
1495 
1496 	dma_cfg.port = port;
1497 	dma_cfg.dir = direction;
1498 
1499 	return __dma_request_channel(&mask, ep93xx_m2m_dma_filter, &dma_cfg, ofdma->of_node);
1500 }
1501 
ep93xx_dma_probe(struct platform_device * pdev)1502 static int ep93xx_dma_probe(struct platform_device *pdev)
1503 {
1504 	struct ep93xx_dma_engine *edma;
1505 	struct dma_device *dma_dev;
1506 	int ret;
1507 
1508 	edma = ep93xx_dma_of_probe(pdev);
1509 	if (IS_ERR(edma))
1510 		return PTR_ERR(edma);
1511 
1512 	dma_dev = &edma->dma_dev;
1513 
1514 	dma_cap_zero(dma_dev->cap_mask);
1515 	dma_cap_set(DMA_SLAVE, dma_dev->cap_mask);
1516 	dma_cap_set(DMA_CYCLIC, dma_dev->cap_mask);
1517 
1518 	dma_dev->dev = &pdev->dev;
1519 	dma_dev->device_alloc_chan_resources = ep93xx_dma_alloc_chan_resources;
1520 	dma_dev->device_free_chan_resources = ep93xx_dma_free_chan_resources;
1521 	dma_dev->device_prep_slave_sg = ep93xx_dma_prep_slave_sg;
1522 	dma_dev->device_prep_dma_cyclic = ep93xx_dma_prep_dma_cyclic;
1523 	dma_dev->device_config = ep93xx_dma_slave_config;
1524 	dma_dev->device_synchronize = ep93xx_dma_synchronize;
1525 	dma_dev->device_terminate_all = ep93xx_dma_terminate_all;
1526 	dma_dev->device_issue_pending = ep93xx_dma_issue_pending;
1527 	dma_dev->device_tx_status = ep93xx_dma_tx_status;
1528 
1529 	dma_set_max_seg_size(dma_dev->dev, DMA_MAX_CHAN_BYTES);
1530 
1531 	if (edma->m2m) {
1532 		dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1533 		dma_dev->device_prep_dma_memcpy = ep93xx_dma_prep_dma_memcpy;
1534 
1535 		edma->hw_setup = m2m_hw_setup;
1536 		edma->hw_shutdown = m2m_hw_shutdown;
1537 		edma->hw_submit = m2m_hw_submit;
1538 		edma->hw_interrupt = m2m_hw_interrupt;
1539 	} else {
1540 		dma_cap_set(DMA_PRIVATE, dma_dev->cap_mask);
1541 
1542 		edma->hw_synchronize = m2p_hw_synchronize;
1543 		edma->hw_setup = m2p_hw_setup;
1544 		edma->hw_shutdown = m2p_hw_shutdown;
1545 		edma->hw_submit = m2p_hw_submit;
1546 		edma->hw_interrupt = m2p_hw_interrupt;
1547 	}
1548 
1549 	ret = dma_async_device_register(dma_dev);
1550 	if (ret)
1551 		return ret;
1552 
1553 	if (edma->m2m) {
1554 		ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2m_dma_of_xlate,
1555 						 edma);
1556 	} else {
1557 		ret = of_dma_controller_register(pdev->dev.of_node, ep93xx_m2p_dma_of_xlate,
1558 						 edma);
1559 	}
1560 	if (ret)
1561 		goto err_dma_unregister;
1562 
1563 	dev_info(dma_dev->dev, "EP93xx M2%s DMA ready\n", edma->m2m ? "M" : "P");
1564 
1565 	return 0;
1566 
1567 err_dma_unregister:
1568 	dma_async_device_unregister(dma_dev);
1569 
1570 	return ret;
1571 }
1572 
1573 static const struct ep93xx_edma_data edma_m2p = {
1574 	.id = M2P_DMA,
1575 	.num_channels = 10,
1576 };
1577 
1578 static const struct ep93xx_edma_data edma_m2m = {
1579 	.id = M2M_DMA,
1580 	.num_channels = 2,
1581 };
1582 
1583 static const struct of_device_id ep93xx_dma_of_ids[] = {
1584 	{ .compatible = "cirrus,ep9301-dma-m2p", .data = &edma_m2p },
1585 	{ .compatible = "cirrus,ep9301-dma-m2m", .data = &edma_m2m },
1586 	{ /* sentinel */ }
1587 };
1588 MODULE_DEVICE_TABLE(of, ep93xx_dma_of_ids);
1589 
1590 static const struct platform_device_id ep93xx_dma_driver_ids[] = {
1591 	{ "ep93xx-dma-m2p", 0 },
1592 	{ "ep93xx-dma-m2m", 1 },
1593 	{ },
1594 };
1595 
1596 static struct platform_driver ep93xx_dma_driver = {
1597 	.driver		= {
1598 		.name	= "ep93xx-dma",
1599 		.of_match_table = ep93xx_dma_of_ids,
1600 	},
1601 	.id_table	= ep93xx_dma_driver_ids,
1602 	.probe		= ep93xx_dma_probe,
1603 };
1604 
1605 module_platform_driver(ep93xx_dma_driver);
1606 
1607 MODULE_AUTHOR("Mika Westerberg <mika.westerberg@iki.fi>");
1608 MODULE_DESCRIPTION("EP93xx DMA driver");
1609