xref: /linux/drivers/dma/pxa_dma.c (revision bf4afc53b77aeaa48b5409da5c8da6bb4eff7f43)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4  */
5 
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/string_choices.h>
14 #include <linux/dmaengine.h>
15 #include <linux/platform_device.h>
16 #include <linux/device.h>
17 #include <linux/platform_data/mmp_dma.h>
18 #include <linux/dmapool.h>
19 #include <linux/of.h>
20 #include <linux/of_dma.h>
21 #include <linux/wait.h>
22 #include <linux/dma/pxa-dma.h>
23 
24 #include "dmaengine.h"
25 #include "virt-dma.h"
26 
27 #define DCSR(n)		(0x0000 + ((n) << 2))
28 #define DALGN(n)	0x00a0
29 #define DINT		0x00f0
30 #define DDADR(n)	(0x0200 + ((n) << 4))
31 #define DSADR(n)	(0x0204 + ((n) << 4))
32 #define DTADR(n)	(0x0208 + ((n) << 4))
33 #define DCMD(n)		(0x020c + ((n) << 4))
34 
35 #define PXA_DCSR_RUN		BIT(31)	/* Run Bit (read / write) */
36 #define PXA_DCSR_NODESC		BIT(30)	/* No-Descriptor Fetch (read / write) */
37 #define PXA_DCSR_STOPIRQEN	BIT(29)	/* Stop Interrupt Enable (R/W) */
38 #define PXA_DCSR_REQPEND	BIT(8)	/* Request Pending (read-only) */
39 #define PXA_DCSR_STOPSTATE	BIT(3)	/* Stop State (read-only) */
40 #define PXA_DCSR_ENDINTR	BIT(2)	/* End Interrupt (read / write) */
41 #define PXA_DCSR_STARTINTR	BIT(1)	/* Start Interrupt (read / write) */
42 #define PXA_DCSR_BUSERR		BIT(0)	/* Bus Error Interrupt (read / write) */
43 
44 #define PXA_DCSR_EORIRQEN	BIT(28)	/* End of Receive IRQ Enable (R/W) */
45 #define PXA_DCSR_EORJMPEN	BIT(27)	/* Jump to next descriptor on EOR */
46 #define PXA_DCSR_EORSTOPEN	BIT(26)	/* STOP on an EOR */
47 #define PXA_DCSR_SETCMPST	BIT(25)	/* Set Descriptor Compare Status */
48 #define PXA_DCSR_CLRCMPST	BIT(24)	/* Clear Descriptor Compare Status */
49 #define PXA_DCSR_CMPST		BIT(10)	/* The Descriptor Compare Status */
50 #define PXA_DCSR_EORINTR	BIT(9)	/* The end of Receive */
51 
52 #define DRCMR_MAPVLD	BIT(7)	/* Map Valid (read / write) */
53 #define DRCMR_CHLNUM	0x1f	/* mask for Channel Number (read / write) */
54 
55 #define DDADR_DESCADDR	0xfffffff0	/* Address of next descriptor (mask) */
56 #define DDADR_STOP	BIT(0)	/* Stop (read / write) */
57 
58 #define PXA_DCMD_INCSRCADDR	BIT(31)	/* Source Address Increment Setting. */
59 #define PXA_DCMD_INCTRGADDR	BIT(30)	/* Target Address Increment Setting. */
60 #define PXA_DCMD_FLOWSRC	BIT(29)	/* Flow Control by the source. */
61 #define PXA_DCMD_FLOWTRG	BIT(28)	/* Flow Control by the target. */
62 #define PXA_DCMD_STARTIRQEN	BIT(22)	/* Start Interrupt Enable */
63 #define PXA_DCMD_ENDIRQEN	BIT(21)	/* End Interrupt Enable */
64 #define PXA_DCMD_ENDIAN		BIT(18)	/* Device Endian-ness. */
65 #define PXA_DCMD_BURST8		(1 << 16)	/* 8 byte burst */
66 #define PXA_DCMD_BURST16	(2 << 16)	/* 16 byte burst */
67 #define PXA_DCMD_BURST32	(3 << 16)	/* 32 byte burst */
68 #define PXA_DCMD_WIDTH1		(1 << 14)	/* 1 byte width */
69 #define PXA_DCMD_WIDTH2		(2 << 14)	/* 2 byte width (HalfWord) */
70 #define PXA_DCMD_WIDTH4		(3 << 14)	/* 4 byte width (Word) */
71 #define PXA_DCMD_LENGTH		0x01fff		/* length mask (max = 8K - 1) */
72 
73 #define PDMA_ALIGNMENT		3
74 #define PDMA_MAX_DESC_BYTES	(PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
75 
76 struct pxad_desc_hw {
77 	u32 ddadr;	/* Points to the next descriptor + flags */
78 	u32 dsadr;	/* DSADR value for the current transfer */
79 	u32 dtadr;	/* DTADR value for the current transfer */
80 	u32 dcmd;	/* DCMD value for the current transfer */
81 } __aligned(16);
82 
83 struct pxad_desc_sw {
84 	struct virt_dma_desc	vd;		/* Virtual descriptor */
85 	int			nb_desc;	/* Number of hw. descriptors */
86 	size_t			len;		/* Number of bytes xfered */
87 	dma_addr_t		first;		/* First descriptor's addr */
88 
89 	/* At least one descriptor has an src/dst address not multiple of 8 */
90 	bool			misaligned;
91 	bool			cyclic;
92 	struct dma_pool		*desc_pool;	/* Channel's used allocator */
93 
94 	struct pxad_desc_hw	*hw_desc[] __counted_by(nb_desc);
95 						/* DMA coherent descriptors */
96 };
97 
98 struct pxad_phy {
99 	int			idx;
100 	void __iomem		*base;
101 	struct pxad_chan	*vchan;
102 };
103 
104 struct pxad_chan {
105 	struct virt_dma_chan	vc;		/* Virtual channel */
106 	u32			drcmr;		/* Requestor of the channel */
107 	enum pxad_chan_prio	prio;		/* Required priority of phy */
108 	/*
109 	 * At least one desc_sw in submitted or issued transfers on this channel
110 	 * has one address such as: addr % 8 != 0. This implies the DALGN
111 	 * setting on the phy.
112 	 */
113 	bool			misaligned;
114 	struct dma_slave_config	cfg;		/* Runtime config */
115 
116 	/* protected by vc->lock */
117 	struct pxad_phy		*phy;
118 	struct dma_pool		*desc_pool;	/* Descriptors pool */
119 	dma_cookie_t		bus_error;
120 
121 	wait_queue_head_t	wq_state;
122 };
123 
124 struct pxad_device {
125 	struct dma_device		slave;
126 	int				nr_chans;
127 	int				nr_requestors;
128 	void __iomem			*base;
129 	struct pxad_phy			*phys;
130 	spinlock_t			phy_lock;	/* Phy association */
131 #ifdef CONFIG_DEBUG_FS
132 	struct dentry			*dbgfs_root;
133 	struct dentry			**dbgfs_chan;
134 #endif
135 };
136 
137 #define tx_to_pxad_desc(tx)					\
138 	container_of(tx, struct pxad_desc_sw, async_tx)
139 #define to_pxad_chan(dchan)					\
140 	container_of(dchan, struct pxad_chan, vc.chan)
141 #define to_pxad_dev(dmadev)					\
142 	container_of(dmadev, struct pxad_device, slave)
143 #define to_pxad_sw_desc(_vd)				\
144 	container_of((_vd), struct pxad_desc_sw, vd)
145 
146 #define _phy_readl_relaxed(phy, _reg)					\
147 	readl_relaxed((phy)->base + _reg((phy)->idx))
148 #define phy_readl_relaxed(phy, _reg)					\
149 	({								\
150 		u32 _v;							\
151 		_v = readl_relaxed((phy)->base + _reg((phy)->idx));	\
152 		dev_vdbg(&phy->vchan->vc.chan.dev->device,		\
153 			 "%s(): readl(%s): 0x%08x\n", __func__, #_reg,	\
154 			  _v);						\
155 		_v;							\
156 	})
157 #define phy_writel(phy, val, _reg)					\
158 	do {								\
159 		writel((val), (phy)->base + _reg((phy)->idx));		\
160 		dev_vdbg(&phy->vchan->vc.chan.dev->device,		\
161 			 "%s(): writel(0x%08x, %s)\n",			\
162 			 __func__, (u32)(val), #_reg);			\
163 	} while (0)
164 #define phy_writel_relaxed(phy, val, _reg)				\
165 	do {								\
166 		writel_relaxed((val), (phy)->base + _reg((phy)->idx));	\
167 		dev_vdbg(&phy->vchan->vc.chan.dev->device,		\
168 			 "%s(): writel_relaxed(0x%08x, %s)\n",		\
169 			 __func__, (u32)(val), #_reg);			\
170 	} while (0)
171 
pxad_drcmr(unsigned int line)172 static unsigned int pxad_drcmr(unsigned int line)
173 {
174 	if (line < 64)
175 		return 0x100 + line * 4;
176 	return 0x1000 + line * 4;
177 }
178 
179 static bool pxad_filter_fn(struct dma_chan *chan, void *param);
180 
181 /*
182  * Debug fs
183  */
184 #ifdef CONFIG_DEBUG_FS
185 #include <linux/debugfs.h>
186 #include <linux/uaccess.h>
187 #include <linux/seq_file.h>
188 
requester_chan_show(struct seq_file * s,void * p)189 static int requester_chan_show(struct seq_file *s, void *p)
190 {
191 	struct pxad_phy *phy = s->private;
192 	int i;
193 	u32 drcmr;
194 
195 	seq_printf(s, "DMA channel %d requester :\n", phy->idx);
196 	for (i = 0; i < 70; i++) {
197 		drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
198 		if ((drcmr & DRCMR_CHLNUM) == phy->idx)
199 			seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
200 				   !!(drcmr & DRCMR_MAPVLD));
201 	}
202 	return 0;
203 }
204 
dbg_burst_from_dcmd(u32 dcmd)205 static inline int dbg_burst_from_dcmd(u32 dcmd)
206 {
207 	int burst = (dcmd >> 16) & 0x3;
208 
209 	return burst ? 4 << burst : 0;
210 }
211 
is_phys_valid(unsigned long addr)212 static int is_phys_valid(unsigned long addr)
213 {
214 	return pfn_valid(__phys_to_pfn(addr));
215 }
216 
217 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
218 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
219 
descriptors_show(struct seq_file * s,void * p)220 static int descriptors_show(struct seq_file *s, void *p)
221 {
222 	struct pxad_phy *phy = s->private;
223 	int i, max_show = 20, burst, width;
224 	u32 dcmd;
225 	unsigned long phys_desc, ddadr;
226 	struct pxad_desc_hw *desc;
227 
228 	phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
229 
230 	seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
231 	seq_printf(s, "[%03d] First descriptor unknown\n", 0);
232 	for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
233 		desc = phys_to_virt(phys_desc);
234 		dcmd = desc->dcmd;
235 		burst = dbg_burst_from_dcmd(dcmd);
236 		width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
237 
238 		seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
239 			   i, phys_desc, desc);
240 		seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
241 		seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
242 		seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
243 		seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
244 			   dcmd,
245 			   PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
246 			   PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
247 			   PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
248 			   PXA_DCMD_STR(ENDIAN), burst, width,
249 			   dcmd & PXA_DCMD_LENGTH);
250 		phys_desc = desc->ddadr;
251 	}
252 	if (i == max_show)
253 		seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
254 			   i, phys_desc);
255 	else
256 		seq_printf(s, "[%03d] Desc at %08lx is %s\n",
257 			   i, phys_desc, phys_desc == DDADR_STOP ?
258 			   "DDADR_STOP" : "invalid");
259 
260 	return 0;
261 }
262 
chan_state_show(struct seq_file * s,void * p)263 static int chan_state_show(struct seq_file *s, void *p)
264 {
265 	struct pxad_phy *phy = s->private;
266 	u32 dcsr, dcmd;
267 	int burst, width;
268 	static const char * const str_prio[] = {
269 		"high", "normal", "low", "invalid"
270 	};
271 
272 	dcsr = _phy_readl_relaxed(phy, DCSR);
273 	dcmd = _phy_readl_relaxed(phy, DCMD);
274 	burst = dbg_burst_from_dcmd(dcmd);
275 	width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
276 
277 	seq_printf(s, "DMA channel %d\n", phy->idx);
278 	seq_printf(s, "\tPriority : %s\n",
279 			  str_prio[(phy->idx & 0xf) / 4]);
280 	seq_printf(s, "\tUnaligned transfer bit: %s\n",
281 			  str_yes_no(_phy_readl_relaxed(phy, DALGN) & BIT(phy->idx)));
282 	seq_printf(s, "\tDCSR  = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
283 		   dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
284 		   PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
285 		   PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
286 		   PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
287 		   PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
288 		   PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
289 		   PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
290 		   PXA_DCSR_STR(BUSERR));
291 
292 	seq_printf(s, "\tDCMD  = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
293 		   dcmd,
294 		   PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
295 		   PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
296 		   PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
297 		   PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
298 	seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
299 	seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
300 	seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
301 
302 	return 0;
303 }
304 
state_show(struct seq_file * s,void * p)305 static int state_show(struct seq_file *s, void *p)
306 {
307 	struct pxad_device *pdev = s->private;
308 
309 	/* basic device status */
310 	seq_puts(s, "DMA engine status\n");
311 	seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
312 
313 	return 0;
314 }
315 
316 DEFINE_SHOW_ATTRIBUTE(state);
317 DEFINE_SHOW_ATTRIBUTE(chan_state);
318 DEFINE_SHOW_ATTRIBUTE(descriptors);
319 DEFINE_SHOW_ATTRIBUTE(requester_chan);
320 
pxad_dbg_alloc_chan(struct pxad_device * pdev,int ch,struct dentry * chandir)321 static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
322 					     int ch, struct dentry *chandir)
323 {
324 	char chan_name[11];
325 	struct dentry *chan;
326 	void *dt;
327 
328 	scnprintf(chan_name, sizeof(chan_name), "%d", ch);
329 	chan = debugfs_create_dir(chan_name, chandir);
330 	dt = (void *)&pdev->phys[ch];
331 
332 	debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
333 	debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
334 	debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
335 
336 	return chan;
337 }
338 
pxad_init_debugfs(struct pxad_device * pdev)339 static void pxad_init_debugfs(struct pxad_device *pdev)
340 {
341 	int i;
342 	struct dentry *chandir;
343 
344 	pdev->dbgfs_chan =
345 		kmalloc_objs(struct dentry *, pdev->nr_chans);
346 	if (!pdev->dbgfs_chan)
347 		return;
348 
349 	pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
350 
351 	debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
352 
353 	chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
354 
355 	for (i = 0; i < pdev->nr_chans; i++)
356 		pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
357 }
358 
pxad_cleanup_debugfs(struct pxad_device * pdev)359 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
360 {
361 	debugfs_remove_recursive(pdev->dbgfs_root);
362 }
363 #else
pxad_init_debugfs(struct pxad_device * pdev)364 static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
pxad_cleanup_debugfs(struct pxad_device * pdev)365 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
366 #endif
367 
lookup_phy(struct pxad_chan * pchan)368 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
369 {
370 	int prio, i;
371 	struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
372 	struct pxad_phy *phy, *found = NULL;
373 	unsigned long flags;
374 
375 	/*
376 	 * dma channel priorities
377 	 * ch 0 - 3,  16 - 19  <--> (0)
378 	 * ch 4 - 7,  20 - 23  <--> (1)
379 	 * ch 8 - 11, 24 - 27  <--> (2)
380 	 * ch 12 - 15, 28 - 31  <--> (3)
381 	 */
382 
383 	spin_lock_irqsave(&pdev->phy_lock, flags);
384 	for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
385 		for (i = 0; i < pdev->nr_chans; i++) {
386 			if (prio != (i & 0xf) >> 2)
387 				continue;
388 			phy = &pdev->phys[i];
389 			if (!phy->vchan) {
390 				phy->vchan = pchan;
391 				found = phy;
392 				goto out_unlock;
393 			}
394 		}
395 	}
396 
397 out_unlock:
398 	spin_unlock_irqrestore(&pdev->phy_lock, flags);
399 	dev_dbg(&pchan->vc.chan.dev->device,
400 		"%s(): phy=%p(%d)\n", __func__, found,
401 		found ? found->idx : -1);
402 
403 	return found;
404 }
405 
pxad_free_phy(struct pxad_chan * chan)406 static void pxad_free_phy(struct pxad_chan *chan)
407 {
408 	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
409 	unsigned long flags;
410 	u32 reg;
411 
412 	dev_dbg(&chan->vc.chan.dev->device,
413 		"%s(): freeing\n", __func__);
414 	if (!chan->phy)
415 		return;
416 
417 	/* clear the channel mapping in DRCMR */
418 	if (chan->drcmr <= pdev->nr_requestors) {
419 		reg = pxad_drcmr(chan->drcmr);
420 		writel_relaxed(0, chan->phy->base + reg);
421 	}
422 
423 	spin_lock_irqsave(&pdev->phy_lock, flags);
424 	chan->phy->vchan = NULL;
425 	chan->phy = NULL;
426 	spin_unlock_irqrestore(&pdev->phy_lock, flags);
427 }
428 
is_chan_running(struct pxad_chan * chan)429 static bool is_chan_running(struct pxad_chan *chan)
430 {
431 	u32 dcsr;
432 	struct pxad_phy *phy = chan->phy;
433 
434 	if (!phy)
435 		return false;
436 	dcsr = phy_readl_relaxed(phy, DCSR);
437 	return dcsr & PXA_DCSR_RUN;
438 }
439 
is_running_chan_misaligned(struct pxad_chan * chan)440 static bool is_running_chan_misaligned(struct pxad_chan *chan)
441 {
442 	u32 dalgn;
443 
444 	BUG_ON(!chan->phy);
445 	dalgn = phy_readl_relaxed(chan->phy, DALGN);
446 	return dalgn & (BIT(chan->phy->idx));
447 }
448 
phy_enable(struct pxad_phy * phy,bool misaligned)449 static void phy_enable(struct pxad_phy *phy, bool misaligned)
450 {
451 	struct pxad_device *pdev;
452 	u32 reg, dalgn;
453 
454 	if (!phy->vchan)
455 		return;
456 
457 	dev_dbg(&phy->vchan->vc.chan.dev->device,
458 		"%s(); phy=%p(%d) misaligned=%d\n", __func__,
459 		phy, phy->idx, misaligned);
460 
461 	pdev = to_pxad_dev(phy->vchan->vc.chan.device);
462 	if (phy->vchan->drcmr <= pdev->nr_requestors) {
463 		reg = pxad_drcmr(phy->vchan->drcmr);
464 		writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
465 	}
466 
467 	dalgn = phy_readl_relaxed(phy, DALGN);
468 	if (misaligned)
469 		dalgn |= BIT(phy->idx);
470 	else
471 		dalgn &= ~BIT(phy->idx);
472 	phy_writel_relaxed(phy, dalgn, DALGN);
473 
474 	phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
475 		   PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
476 }
477 
phy_disable(struct pxad_phy * phy)478 static void phy_disable(struct pxad_phy *phy)
479 {
480 	u32 dcsr;
481 
482 	if (!phy)
483 		return;
484 
485 	dcsr = phy_readl_relaxed(phy, DCSR);
486 	dev_dbg(&phy->vchan->vc.chan.dev->device,
487 		"%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
488 	phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
489 }
490 
pxad_launch_chan(struct pxad_chan * chan,struct pxad_desc_sw * desc)491 static void pxad_launch_chan(struct pxad_chan *chan,
492 				 struct pxad_desc_sw *desc)
493 {
494 	dev_dbg(&chan->vc.chan.dev->device,
495 		"%s(): desc=%p\n", __func__, desc);
496 	if (!chan->phy) {
497 		chan->phy = lookup_phy(chan);
498 		if (!chan->phy) {
499 			dev_dbg(&chan->vc.chan.dev->device,
500 				"%s(): no free dma channel\n", __func__);
501 			return;
502 		}
503 	}
504 	chan->bus_error = 0;
505 
506 	/*
507 	 * Program the descriptor's address into the DMA controller,
508 	 * then start the DMA transaction
509 	 */
510 	phy_writel(chan->phy, desc->first, DDADR);
511 	phy_enable(chan->phy, chan->misaligned);
512 	wake_up(&chan->wq_state);
513 }
514 
set_updater_desc(struct pxad_desc_sw * sw_desc,unsigned long flags)515 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
516 			     unsigned long flags)
517 {
518 	struct pxad_desc_hw *updater =
519 		sw_desc->hw_desc[sw_desc->nb_desc - 1];
520 	dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
521 
522 	updater->ddadr = DDADR_STOP;
523 	updater->dsadr = dma;
524 	updater->dtadr = dma + 8;
525 	updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
526 		(PXA_DCMD_LENGTH & sizeof(u32));
527 	if (flags & DMA_PREP_INTERRUPT)
528 		updater->dcmd |= PXA_DCMD_ENDIRQEN;
529 	if (sw_desc->cyclic)
530 		sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
531 }
532 
is_desc_completed(struct virt_dma_desc * vd)533 static bool is_desc_completed(struct virt_dma_desc *vd)
534 {
535 	struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
536 	struct pxad_desc_hw *updater =
537 		sw_desc->hw_desc[sw_desc->nb_desc - 1];
538 
539 	return updater->dtadr != (updater->dsadr + 8);
540 }
541 
pxad_desc_chain(struct virt_dma_desc * vd1,struct virt_dma_desc * vd2)542 static void pxad_desc_chain(struct virt_dma_desc *vd1,
543 				struct virt_dma_desc *vd2)
544 {
545 	struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
546 	struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
547 	dma_addr_t dma_to_chain;
548 
549 	dma_to_chain = desc2->first;
550 	desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
551 }
552 
pxad_try_hotchain(struct virt_dma_chan * vc,struct virt_dma_desc * vd)553 static bool pxad_try_hotchain(struct virt_dma_chan *vc,
554 				  struct virt_dma_desc *vd)
555 {
556 	struct virt_dma_desc *vd_last_issued = NULL;
557 	struct pxad_chan *chan = to_pxad_chan(&vc->chan);
558 
559 	/*
560 	 * Attempt to hot chain the tx if the phy is still running. This is
561 	 * considered successful only if either the channel is still running
562 	 * after the chaining, or if the chained transfer is completed after
563 	 * having been hot chained.
564 	 * A change of alignment is not allowed, and forbids hotchaining.
565 	 */
566 	if (is_chan_running(chan)) {
567 		BUG_ON(list_empty(&vc->desc_issued));
568 
569 		if (!is_running_chan_misaligned(chan) &&
570 		    to_pxad_sw_desc(vd)->misaligned)
571 			return false;
572 
573 		vd_last_issued = list_entry(vc->desc_issued.prev,
574 					    struct virt_dma_desc, node);
575 		pxad_desc_chain(vd_last_issued, vd);
576 		if (is_chan_running(chan) || is_desc_completed(vd))
577 			return true;
578 	}
579 
580 	return false;
581 }
582 
clear_chan_irq(struct pxad_phy * phy)583 static unsigned int clear_chan_irq(struct pxad_phy *phy)
584 {
585 	u32 dcsr;
586 	u32 dint = readl(phy->base + DINT);
587 
588 	if (!(dint & BIT(phy->idx)))
589 		return PXA_DCSR_RUN;
590 
591 	/* clear irq */
592 	dcsr = phy_readl_relaxed(phy, DCSR);
593 	phy_writel(phy, dcsr, DCSR);
594 	if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
595 		dev_warn(&phy->vchan->vc.chan.dev->device,
596 			 "%s(chan=%p): PXA_DCSR_BUSERR\n",
597 			 __func__, &phy->vchan);
598 
599 	return dcsr & ~PXA_DCSR_RUN;
600 }
601 
pxad_chan_handler(int irq,void * dev_id)602 static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
603 {
604 	struct pxad_phy *phy = dev_id;
605 	struct pxad_chan *chan = phy->vchan;
606 	struct virt_dma_desc *vd, *tmp;
607 	unsigned int dcsr;
608 	bool vd_completed;
609 	dma_cookie_t last_started = 0;
610 
611 	BUG_ON(!chan);
612 
613 	dcsr = clear_chan_irq(phy);
614 	if (dcsr & PXA_DCSR_RUN)
615 		return IRQ_NONE;
616 
617 	spin_lock(&chan->vc.lock);
618 	list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
619 		vd_completed = is_desc_completed(vd);
620 		dev_dbg(&chan->vc.chan.dev->device,
621 			"%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
622 			__func__, vd, vd->tx.cookie, vd_completed,
623 			dcsr);
624 		last_started = vd->tx.cookie;
625 		if (to_pxad_sw_desc(vd)->cyclic) {
626 			vchan_cyclic_callback(vd);
627 			break;
628 		}
629 		if (vd_completed) {
630 			list_del(&vd->node);
631 			vchan_cookie_complete(vd);
632 		} else {
633 			break;
634 		}
635 	}
636 
637 	if (dcsr & PXA_DCSR_BUSERR) {
638 		chan->bus_error = last_started;
639 		phy_disable(phy);
640 	}
641 
642 	if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
643 		dev_dbg(&chan->vc.chan.dev->device,
644 		"%s(): channel stopped, submitted_empty=%d issued_empty=%d",
645 			__func__,
646 			list_empty(&chan->vc.desc_submitted),
647 			list_empty(&chan->vc.desc_issued));
648 		phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
649 
650 		if (list_empty(&chan->vc.desc_issued)) {
651 			chan->misaligned =
652 				!list_empty(&chan->vc.desc_submitted);
653 		} else {
654 			vd = list_first_entry(&chan->vc.desc_issued,
655 					      struct virt_dma_desc, node);
656 			pxad_launch_chan(chan, to_pxad_sw_desc(vd));
657 		}
658 	}
659 	spin_unlock(&chan->vc.lock);
660 	wake_up(&chan->wq_state);
661 
662 	return IRQ_HANDLED;
663 }
664 
pxad_int_handler(int irq,void * dev_id)665 static irqreturn_t pxad_int_handler(int irq, void *dev_id)
666 {
667 	struct pxad_device *pdev = dev_id;
668 	struct pxad_phy *phy;
669 	u32 dint = readl(pdev->base + DINT);
670 	int i, ret = IRQ_NONE;
671 
672 	while (dint) {
673 		i = __ffs(dint);
674 		dint &= (dint - 1);
675 		phy = &pdev->phys[i];
676 		if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
677 			ret = IRQ_HANDLED;
678 	}
679 
680 	return ret;
681 }
682 
pxad_alloc_chan_resources(struct dma_chan * dchan)683 static int pxad_alloc_chan_resources(struct dma_chan *dchan)
684 {
685 	struct pxad_chan *chan = to_pxad_chan(dchan);
686 	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
687 
688 	if (chan->desc_pool)
689 		return 1;
690 
691 	chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
692 					  pdev->slave.dev,
693 					  sizeof(struct pxad_desc_hw),
694 					  __alignof__(struct pxad_desc_hw),
695 					  0);
696 	if (!chan->desc_pool) {
697 		dev_err(&chan->vc.chan.dev->device,
698 			"%s(): unable to allocate descriptor pool\n",
699 			__func__);
700 		return -ENOMEM;
701 	}
702 
703 	return 1;
704 }
705 
pxad_free_chan_resources(struct dma_chan * dchan)706 static void pxad_free_chan_resources(struct dma_chan *dchan)
707 {
708 	struct pxad_chan *chan = to_pxad_chan(dchan);
709 
710 	vchan_free_chan_resources(&chan->vc);
711 	dma_pool_destroy(chan->desc_pool);
712 	chan->desc_pool = NULL;
713 
714 	chan->drcmr = U32_MAX;
715 	chan->prio = PXAD_PRIO_LOWEST;
716 }
717 
pxad_free_desc(struct virt_dma_desc * vd)718 static void pxad_free_desc(struct virt_dma_desc *vd)
719 {
720 	int i;
721 	dma_addr_t dma;
722 	struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
723 
724 	for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
725 		if (i > 0)
726 			dma = sw_desc->hw_desc[i - 1]->ddadr;
727 		else
728 			dma = sw_desc->first;
729 		dma_pool_free(sw_desc->desc_pool,
730 			      sw_desc->hw_desc[i], dma);
731 	}
732 	sw_desc->nb_desc = 0;
733 	kfree(sw_desc);
734 }
735 
736 static struct pxad_desc_sw *
pxad_alloc_desc(struct pxad_chan * chan,unsigned int nb_hw_desc)737 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
738 {
739 	struct pxad_desc_sw *sw_desc;
740 	dma_addr_t dma;
741 	void *desc;
742 	int i;
743 
744 	sw_desc = kzalloc_flex(*sw_desc, hw_desc, nb_hw_desc, GFP_NOWAIT);
745 	if (!sw_desc)
746 		return NULL;
747 	sw_desc->desc_pool = chan->desc_pool;
748 
749 	for (i = 0; i < nb_hw_desc; i++) {
750 		desc = dma_pool_alloc(sw_desc->desc_pool, GFP_NOWAIT, &dma);
751 		if (!desc) {
752 			dev_err(&chan->vc.chan.dev->device,
753 				"%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
754 				__func__, i, sw_desc->desc_pool);
755 			goto err;
756 		}
757 
758 		sw_desc->nb_desc++;
759 		sw_desc->hw_desc[i] = desc;
760 
761 		if (i == 0)
762 			sw_desc->first = dma;
763 		else
764 			sw_desc->hw_desc[i - 1]->ddadr = dma;
765 	}
766 
767 	return sw_desc;
768 err:
769 	pxad_free_desc(&sw_desc->vd);
770 	return NULL;
771 }
772 
pxad_tx_submit(struct dma_async_tx_descriptor * tx)773 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
774 {
775 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
776 	struct pxad_chan *chan = to_pxad_chan(&vc->chan);
777 	struct virt_dma_desc *vd_chained = NULL,
778 		*vd = container_of(tx, struct virt_dma_desc, tx);
779 	dma_cookie_t cookie;
780 	unsigned long flags;
781 
782 	set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
783 
784 	spin_lock_irqsave(&vc->lock, flags);
785 	cookie = dma_cookie_assign(tx);
786 
787 	if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
788 		list_move_tail(&vd->node, &vc->desc_issued);
789 		dev_dbg(&chan->vc.chan.dev->device,
790 			"%s(): txd %p[%x]: submitted (hot linked)\n",
791 			__func__, vd, cookie);
792 		goto out;
793 	}
794 
795 	/*
796 	 * Fallback to placing the tx in the submitted queue
797 	 */
798 	if (!list_empty(&vc->desc_submitted)) {
799 		vd_chained = list_entry(vc->desc_submitted.prev,
800 					struct virt_dma_desc, node);
801 		/*
802 		 * Only chain the descriptors if no new misalignment is
803 		 * introduced. If a new misalignment is chained, let the channel
804 		 * stop, and be relaunched in misalign mode from the irq
805 		 * handler.
806 		 */
807 		if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
808 			pxad_desc_chain(vd_chained, vd);
809 		else
810 			vd_chained = NULL;
811 	}
812 	dev_dbg(&chan->vc.chan.dev->device,
813 		"%s(): txd %p[%x]: submitted (%s linked)\n",
814 		__func__, vd, cookie, vd_chained ? "cold" : "not");
815 	list_move_tail(&vd->node, &vc->desc_submitted);
816 	chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
817 
818 out:
819 	spin_unlock_irqrestore(&vc->lock, flags);
820 	return cookie;
821 }
822 
pxad_issue_pending(struct dma_chan * dchan)823 static void pxad_issue_pending(struct dma_chan *dchan)
824 {
825 	struct pxad_chan *chan = to_pxad_chan(dchan);
826 	struct virt_dma_desc *vd_first;
827 	unsigned long flags;
828 
829 	spin_lock_irqsave(&chan->vc.lock, flags);
830 	if (list_empty(&chan->vc.desc_submitted))
831 		goto out;
832 
833 	vd_first = list_first_entry(&chan->vc.desc_submitted,
834 				    struct virt_dma_desc, node);
835 	dev_dbg(&chan->vc.chan.dev->device,
836 		"%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
837 
838 	vchan_issue_pending(&chan->vc);
839 	if (!pxad_try_hotchain(&chan->vc, vd_first))
840 		pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
841 out:
842 	spin_unlock_irqrestore(&chan->vc.lock, flags);
843 }
844 
845 static inline struct dma_async_tx_descriptor *
pxad_tx_prep(struct virt_dma_chan * vc,struct virt_dma_desc * vd,unsigned long tx_flags)846 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
847 		 unsigned long tx_flags)
848 {
849 	struct dma_async_tx_descriptor *tx;
850 	struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
851 
852 	INIT_LIST_HEAD(&vd->node);
853 	tx = vchan_tx_prep(vc, vd, tx_flags);
854 	tx->tx_submit = pxad_tx_submit;
855 	dev_dbg(&chan->vc.chan.dev->device,
856 		"%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
857 		vc, vd, vd->tx.cookie,
858 		tx_flags);
859 
860 	return tx;
861 }
862 
pxad_get_config(struct pxad_chan * chan,enum dma_transfer_direction dir,u32 * dcmd,u32 * dev_src,u32 * dev_dst)863 static void pxad_get_config(struct pxad_chan *chan,
864 			    enum dma_transfer_direction dir,
865 			    u32 *dcmd, u32 *dev_src, u32 *dev_dst)
866 {
867 	u32 maxburst = 0, dev_addr = 0;
868 	enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
869 	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
870 
871 	*dcmd = 0;
872 	if (dir == DMA_DEV_TO_MEM) {
873 		maxburst = chan->cfg.src_maxburst;
874 		width = chan->cfg.src_addr_width;
875 		dev_addr = chan->cfg.src_addr;
876 		*dev_src = dev_addr;
877 		*dcmd |= PXA_DCMD_INCTRGADDR;
878 		if (chan->drcmr <= pdev->nr_requestors)
879 			*dcmd |= PXA_DCMD_FLOWSRC;
880 	}
881 	if (dir == DMA_MEM_TO_DEV) {
882 		maxburst = chan->cfg.dst_maxburst;
883 		width = chan->cfg.dst_addr_width;
884 		dev_addr = chan->cfg.dst_addr;
885 		*dev_dst = dev_addr;
886 		*dcmd |= PXA_DCMD_INCSRCADDR;
887 		if (chan->drcmr <= pdev->nr_requestors)
888 			*dcmd |= PXA_DCMD_FLOWTRG;
889 	}
890 	if (dir == DMA_MEM_TO_MEM)
891 		*dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
892 			PXA_DCMD_INCSRCADDR;
893 
894 	dev_dbg(&chan->vc.chan.dev->device,
895 		"%s(): dev_addr=0x%x maxburst=%d width=%d  dir=%d\n",
896 		__func__, dev_addr, maxburst, width, dir);
897 
898 	if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
899 		*dcmd |= PXA_DCMD_WIDTH1;
900 	else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
901 		*dcmd |= PXA_DCMD_WIDTH2;
902 	else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
903 		*dcmd |= PXA_DCMD_WIDTH4;
904 
905 	if (maxburst == 8)
906 		*dcmd |= PXA_DCMD_BURST8;
907 	else if (maxburst == 16)
908 		*dcmd |= PXA_DCMD_BURST16;
909 	else if (maxburst == 32)
910 		*dcmd |= PXA_DCMD_BURST32;
911 }
912 
913 static struct dma_async_tx_descriptor *
pxad_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)914 pxad_prep_memcpy(struct dma_chan *dchan,
915 		 dma_addr_t dma_dst, dma_addr_t dma_src,
916 		 size_t len, unsigned long flags)
917 {
918 	struct pxad_chan *chan = to_pxad_chan(dchan);
919 	struct pxad_desc_sw *sw_desc;
920 	struct pxad_desc_hw *hw_desc;
921 	u32 dcmd;
922 	unsigned int i, nb_desc = 0;
923 	size_t copy;
924 
925 	if (!dchan || !len)
926 		return NULL;
927 
928 	dev_dbg(&chan->vc.chan.dev->device,
929 		"%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
930 		__func__, (unsigned long)dma_dst, (unsigned long)dma_src,
931 		len, flags);
932 	pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
933 
934 	nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
935 	sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
936 	if (!sw_desc)
937 		return NULL;
938 	sw_desc->len = len;
939 
940 	if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
941 	    !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
942 		sw_desc->misaligned = true;
943 
944 	i = 0;
945 	do {
946 		hw_desc = sw_desc->hw_desc[i++];
947 		copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
948 		hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
949 		hw_desc->dsadr = dma_src;
950 		hw_desc->dtadr = dma_dst;
951 		len -= copy;
952 		dma_src += copy;
953 		dma_dst += copy;
954 	} while (len);
955 	set_updater_desc(sw_desc, flags);
956 
957 	return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
958 }
959 
960 static struct dma_async_tx_descriptor *
pxad_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)961 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
962 		   unsigned int sg_len, enum dma_transfer_direction dir,
963 		   unsigned long flags, void *context)
964 {
965 	struct pxad_chan *chan = to_pxad_chan(dchan);
966 	struct pxad_desc_sw *sw_desc;
967 	size_t len, avail;
968 	struct scatterlist *sg;
969 	dma_addr_t dma;
970 	u32 dcmd, dsadr = 0, dtadr = 0;
971 	unsigned int nb_desc, i, j = 0;
972 
973 	if ((sgl == NULL) || (sg_len == 0))
974 		return NULL;
975 
976 	pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
977 	dev_dbg(&chan->vc.chan.dev->device,
978 		"%s(): dir=%d flags=%lx\n", __func__, dir, flags);
979 
980 	nb_desc = sg_nents_for_dma(sgl, sg_len, PDMA_MAX_DESC_BYTES);
981 	sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
982 	if (!sw_desc)
983 		return NULL;
984 
985 	for_each_sg(sgl, sg, sg_len, i) {
986 		dma = sg_dma_address(sg);
987 		avail = sg_dma_len(sg);
988 		sw_desc->len += avail;
989 
990 		do {
991 			len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
992 			if (dma & 0x7)
993 				sw_desc->misaligned = true;
994 
995 			sw_desc->hw_desc[j]->dcmd =
996 				dcmd | (PXA_DCMD_LENGTH & len);
997 			sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
998 			sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
999 
1000 			dma += len;
1001 			avail -= len;
1002 		} while (avail);
1003 	}
1004 	set_updater_desc(sw_desc, flags);
1005 
1006 	return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1007 }
1008 
1009 static struct dma_async_tx_descriptor *
pxad_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1010 pxad_prep_dma_cyclic(struct dma_chan *dchan,
1011 		     dma_addr_t buf_addr, size_t len, size_t period_len,
1012 		     enum dma_transfer_direction dir, unsigned long flags)
1013 {
1014 	struct pxad_chan *chan = to_pxad_chan(dchan);
1015 	struct pxad_desc_sw *sw_desc;
1016 	struct pxad_desc_hw **phw_desc;
1017 	dma_addr_t dma;
1018 	u32 dcmd, dsadr = 0, dtadr = 0;
1019 	unsigned int nb_desc = 0;
1020 
1021 	if (!dchan || !len || !period_len)
1022 		return NULL;
1023 	if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1024 		dev_err(&chan->vc.chan.dev->device,
1025 			"Unsupported direction for cyclic DMA\n");
1026 		return NULL;
1027 	}
1028 	/* the buffer length must be a multiple of period_len */
1029 	if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1030 	    !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1031 		return NULL;
1032 
1033 	pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1034 	dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1035 	dev_dbg(&chan->vc.chan.dev->device,
1036 		"%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1037 		__func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1038 
1039 	nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1040 	nb_desc *= DIV_ROUND_UP(len, period_len);
1041 	sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1042 	if (!sw_desc)
1043 		return NULL;
1044 	sw_desc->cyclic = true;
1045 	sw_desc->len = len;
1046 
1047 	phw_desc = sw_desc->hw_desc;
1048 	dma = buf_addr;
1049 	do {
1050 		phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1051 		phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1052 		phw_desc[0]->dcmd = dcmd;
1053 		phw_desc++;
1054 		dma += period_len;
1055 		len -= period_len;
1056 	} while (len);
1057 	set_updater_desc(sw_desc, flags);
1058 
1059 	return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1060 }
1061 
pxad_config(struct dma_chan * dchan,struct dma_slave_config * cfg)1062 static int pxad_config(struct dma_chan *dchan,
1063 		       struct dma_slave_config *cfg)
1064 {
1065 	struct pxad_chan *chan = to_pxad_chan(dchan);
1066 
1067 	if (!dchan)
1068 		return -EINVAL;
1069 
1070 	chan->cfg = *cfg;
1071 	return 0;
1072 }
1073 
pxad_terminate_all(struct dma_chan * dchan)1074 static int pxad_terminate_all(struct dma_chan *dchan)
1075 {
1076 	struct pxad_chan *chan = to_pxad_chan(dchan);
1077 	struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1078 	struct virt_dma_desc *vd = NULL;
1079 	unsigned long flags;
1080 	struct pxad_phy *phy;
1081 	LIST_HEAD(head);
1082 
1083 	dev_dbg(&chan->vc.chan.dev->device,
1084 		"%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1085 
1086 	spin_lock_irqsave(&chan->vc.lock, flags);
1087 	vchan_get_all_descriptors(&chan->vc, &head);
1088 
1089 	list_for_each_entry(vd, &head, node) {
1090 		dev_dbg(&chan->vc.chan.dev->device,
1091 			"%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1092 			vd, vd->tx.cookie, is_desc_completed(vd));
1093 	}
1094 
1095 	phy = chan->phy;
1096 	if (phy) {
1097 		phy_disable(chan->phy);
1098 		pxad_free_phy(chan);
1099 		chan->phy = NULL;
1100 		spin_lock(&pdev->phy_lock);
1101 		phy->vchan = NULL;
1102 		spin_unlock(&pdev->phy_lock);
1103 	}
1104 	spin_unlock_irqrestore(&chan->vc.lock, flags);
1105 	vchan_dma_desc_free_list(&chan->vc, &head);
1106 
1107 	return 0;
1108 }
1109 
pxad_residue(struct pxad_chan * chan,dma_cookie_t cookie)1110 static unsigned int pxad_residue(struct pxad_chan *chan,
1111 				 dma_cookie_t cookie)
1112 {
1113 	struct virt_dma_desc *vd = NULL;
1114 	struct pxad_desc_sw *sw_desc = NULL;
1115 	struct pxad_desc_hw *hw_desc = NULL;
1116 	u32 curr, start, len, end, residue = 0;
1117 	unsigned long flags;
1118 	bool passed = false;
1119 	int i;
1120 
1121 	/*
1122 	 * If the channel does not have a phy pointer anymore, it has already
1123 	 * been completed. Therefore, its residue is 0.
1124 	 */
1125 	if (!chan->phy)
1126 		return 0;
1127 
1128 	spin_lock_irqsave(&chan->vc.lock, flags);
1129 
1130 	vd = vchan_find_desc(&chan->vc, cookie);
1131 	if (!vd)
1132 		goto out;
1133 
1134 	sw_desc = to_pxad_sw_desc(vd);
1135 	if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1136 		curr = phy_readl_relaxed(chan->phy, DSADR);
1137 	else
1138 		curr = phy_readl_relaxed(chan->phy, DTADR);
1139 
1140 	/*
1141 	 * curr has to be actually read before checking descriptor
1142 	 * completion, so that a curr inside a status updater
1143 	 * descriptor implies the following test returns true, and
1144 	 * preventing reordering of curr load and the test.
1145 	 */
1146 	rmb();
1147 	if (is_desc_completed(vd))
1148 		goto out;
1149 
1150 	for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1151 		hw_desc = sw_desc->hw_desc[i];
1152 		if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1153 			start = hw_desc->dsadr;
1154 		else
1155 			start = hw_desc->dtadr;
1156 		len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1157 		end = start + len;
1158 
1159 		/*
1160 		 * 'passed' will be latched once we found the descriptor
1161 		 * which lies inside the boundaries of the curr
1162 		 * pointer. All descriptors that occur in the list
1163 		 * _after_ we found that partially handled descriptor
1164 		 * are still to be processed and are hence added to the
1165 		 * residual bytes counter.
1166 		 */
1167 
1168 		if (passed) {
1169 			residue += len;
1170 		} else if (curr >= start && curr <= end) {
1171 			residue += end - curr;
1172 			passed = true;
1173 		}
1174 	}
1175 	if (!passed)
1176 		residue = sw_desc->len;
1177 
1178 out:
1179 	spin_unlock_irqrestore(&chan->vc.lock, flags);
1180 	dev_dbg(&chan->vc.chan.dev->device,
1181 		"%s(): txd %p[%x] sw_desc=%p: %d\n",
1182 		__func__, vd, cookie, sw_desc, residue);
1183 	return residue;
1184 }
1185 
pxad_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)1186 static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1187 				      dma_cookie_t cookie,
1188 				      struct dma_tx_state *txstate)
1189 {
1190 	struct pxad_chan *chan = to_pxad_chan(dchan);
1191 	enum dma_status ret;
1192 
1193 	if (cookie == chan->bus_error)
1194 		return DMA_ERROR;
1195 
1196 	ret = dma_cookie_status(dchan, cookie, txstate);
1197 	if (likely(txstate && (ret != DMA_ERROR)))
1198 		dma_set_residue(txstate, pxad_residue(chan, cookie));
1199 
1200 	return ret;
1201 }
1202 
pxad_synchronize(struct dma_chan * dchan)1203 static void pxad_synchronize(struct dma_chan *dchan)
1204 {
1205 	struct pxad_chan *chan = to_pxad_chan(dchan);
1206 
1207 	wait_event(chan->wq_state, !is_chan_running(chan));
1208 	vchan_synchronize(&chan->vc);
1209 }
1210 
pxad_free_channels(struct dma_device * dmadev)1211 static void pxad_free_channels(struct dma_device *dmadev)
1212 {
1213 	struct pxad_chan *c, *cn;
1214 
1215 	list_for_each_entry_safe(c, cn, &dmadev->channels,
1216 				 vc.chan.device_node) {
1217 		list_del(&c->vc.chan.device_node);
1218 		tasklet_kill(&c->vc.task);
1219 	}
1220 }
1221 
pxad_remove(struct platform_device * op)1222 static void pxad_remove(struct platform_device *op)
1223 {
1224 	struct pxad_device *pdev = platform_get_drvdata(op);
1225 
1226 	pxad_cleanup_debugfs(pdev);
1227 	pxad_free_channels(&pdev->slave);
1228 }
1229 
pxad_init_phys(struct platform_device * op,struct pxad_device * pdev,unsigned int nb_phy_chans)1230 static int pxad_init_phys(struct platform_device *op,
1231 			  struct pxad_device *pdev,
1232 			  unsigned int nb_phy_chans)
1233 {
1234 	int irq0, irq, nr_irq = 0, i, ret;
1235 	struct pxad_phy *phy;
1236 
1237 	irq0 = platform_get_irq(op, 0);
1238 	if (irq0 < 0)
1239 		return irq0;
1240 
1241 	pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1242 				  sizeof(pdev->phys[0]), GFP_KERNEL);
1243 	if (!pdev->phys)
1244 		return -ENOMEM;
1245 
1246 	for (i = 0; i < nb_phy_chans; i++)
1247 		if (platform_get_irq_optional(op, i) > 0)
1248 			nr_irq++;
1249 
1250 	for (i = 0; i < nb_phy_chans; i++) {
1251 		phy = &pdev->phys[i];
1252 		phy->base = pdev->base;
1253 		phy->idx = i;
1254 		irq = platform_get_irq_optional(op, i);
1255 		if ((nr_irq > 1) && (irq > 0))
1256 			ret = devm_request_irq(&op->dev, irq,
1257 					       pxad_chan_handler,
1258 					       IRQF_SHARED, "pxa-dma", phy);
1259 		if ((nr_irq == 1) && (i == 0))
1260 			ret = devm_request_irq(&op->dev, irq0,
1261 					       pxad_int_handler,
1262 					       IRQF_SHARED, "pxa-dma", pdev);
1263 		if (ret) {
1264 			dev_err(pdev->slave.dev,
1265 				"%s(): can't request irq %d:%d\n", __func__,
1266 				irq, ret);
1267 			return ret;
1268 		}
1269 	}
1270 
1271 	return 0;
1272 }
1273 
1274 static const struct of_device_id pxad_dt_ids[] = {
1275 	{ .compatible = "marvell,pdma-1.0", },
1276 	{}
1277 };
1278 MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1279 
pxad_dma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1280 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1281 					   struct of_dma *ofdma)
1282 {
1283 	struct pxad_device *d = ofdma->of_dma_data;
1284 	struct dma_chan *chan;
1285 
1286 	chan = dma_get_any_slave_channel(&d->slave);
1287 	if (!chan)
1288 		return NULL;
1289 
1290 	to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1291 	to_pxad_chan(chan)->prio = dma_spec->args[1];
1292 
1293 	return chan;
1294 }
1295 
pxad_init_dmadev(struct platform_device * op,struct pxad_device * pdev,unsigned int nr_phy_chans,unsigned int nr_requestors)1296 static int pxad_init_dmadev(struct platform_device *op,
1297 			    struct pxad_device *pdev,
1298 			    unsigned int nr_phy_chans,
1299 			    unsigned int nr_requestors)
1300 {
1301 	int ret;
1302 	unsigned int i;
1303 	struct pxad_chan *c;
1304 
1305 	pdev->nr_chans = nr_phy_chans;
1306 	pdev->nr_requestors = nr_requestors;
1307 	INIT_LIST_HEAD(&pdev->slave.channels);
1308 	pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1309 	pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1310 	pdev->slave.device_tx_status = pxad_tx_status;
1311 	pdev->slave.device_issue_pending = pxad_issue_pending;
1312 	pdev->slave.device_config = pxad_config;
1313 	pdev->slave.device_synchronize = pxad_synchronize;
1314 	pdev->slave.device_terminate_all = pxad_terminate_all;
1315 
1316 	if (op->dev.coherent_dma_mask)
1317 		dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1318 	else
1319 		dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1320 
1321 	ret = pxad_init_phys(op, pdev, nr_phy_chans);
1322 	if (ret)
1323 		return ret;
1324 
1325 	for (i = 0; i < nr_phy_chans; i++) {
1326 		c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1327 		if (!c)
1328 			return -ENOMEM;
1329 
1330 		c->drcmr = U32_MAX;
1331 		c->prio = PXAD_PRIO_LOWEST;
1332 		c->vc.desc_free = pxad_free_desc;
1333 		vchan_init(&c->vc, &pdev->slave);
1334 		init_waitqueue_head(&c->wq_state);
1335 	}
1336 
1337 	return dmaenginem_async_device_register(&pdev->slave);
1338 }
1339 
pxad_probe(struct platform_device * op)1340 static int pxad_probe(struct platform_device *op)
1341 {
1342 	struct pxad_device *pdev;
1343 	const struct dma_slave_map *slave_map = NULL;
1344 	struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1345 	int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
1346 	const enum dma_slave_buswidth widths =
1347 		DMA_SLAVE_BUSWIDTH_1_BYTE   | DMA_SLAVE_BUSWIDTH_2_BYTES |
1348 		DMA_SLAVE_BUSWIDTH_4_BYTES;
1349 
1350 	pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1351 	if (!pdev)
1352 		return -ENOMEM;
1353 
1354 	spin_lock_init(&pdev->phy_lock);
1355 
1356 	pdev->base = devm_platform_ioremap_resource(op, 0);
1357 	if (IS_ERR(pdev->base))
1358 		return PTR_ERR(pdev->base);
1359 
1360 	if (op->dev.of_node) {
1361 		/* Parse new and deprecated dma-channels properties */
1362 		if (of_property_read_u32(op->dev.of_node, "dma-channels",
1363 					 &dma_channels))
1364 			of_property_read_u32(op->dev.of_node, "#dma-channels",
1365 					     &dma_channels);
1366 		/* Parse new and deprecated dma-requests properties */
1367 		ret = of_property_read_u32(op->dev.of_node, "dma-requests",
1368 					   &nb_requestors);
1369 		if (ret)
1370 			ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1371 						   &nb_requestors);
1372 		if (ret) {
1373 			dev_warn(pdev->slave.dev,
1374 				 "#dma-requests set to default 32 as missing in OF: %d",
1375 				 ret);
1376 			nb_requestors = 32;
1377 		}
1378 	} else if (pdata && pdata->dma_channels) {
1379 		dma_channels = pdata->dma_channels;
1380 		nb_requestors = pdata->nb_requestors;
1381 		slave_map = pdata->slave_map;
1382 		slave_map_cnt = pdata->slave_map_cnt;
1383 	} else {
1384 		dma_channels = 32;	/* default 32 channel */
1385 	}
1386 
1387 	dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1388 	dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1389 	dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1390 	dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1391 	pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1392 	pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1393 	pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1394 	pdev->slave.filter.map = slave_map;
1395 	pdev->slave.filter.mapcnt = slave_map_cnt;
1396 	pdev->slave.filter.fn = pxad_filter_fn;
1397 
1398 	pdev->slave.copy_align = PDMA_ALIGNMENT;
1399 	pdev->slave.src_addr_widths = widths;
1400 	pdev->slave.dst_addr_widths = widths;
1401 	pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1402 	pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1403 	pdev->slave.descriptor_reuse = true;
1404 
1405 	pdev->slave.dev = &op->dev;
1406 	ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1407 	if (ret) {
1408 		dev_err(pdev->slave.dev, "unable to register\n");
1409 		return ret;
1410 	}
1411 
1412 	if (op->dev.of_node) {
1413 		/* Device-tree DMA controller registration */
1414 		ret = of_dma_controller_register(op->dev.of_node,
1415 						 pxad_dma_xlate, pdev);
1416 		if (ret < 0) {
1417 			dev_err(pdev->slave.dev,
1418 				"of_dma_controller_register failed\n");
1419 			return ret;
1420 		}
1421 	}
1422 
1423 	platform_set_drvdata(op, pdev);
1424 	pxad_init_debugfs(pdev);
1425 	dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1426 		 dma_channels, nb_requestors);
1427 	return 0;
1428 }
1429 
1430 static const struct platform_device_id pxad_id_table[] = {
1431 	{ "pxa-dma", },
1432 	{ },
1433 };
1434 
1435 static struct platform_driver pxad_driver = {
1436 	.driver		= {
1437 		.name	= "pxa-dma",
1438 		.of_match_table = pxad_dt_ids,
1439 	},
1440 	.id_table	= pxad_id_table,
1441 	.probe		= pxad_probe,
1442 	.remove		= pxad_remove,
1443 };
1444 
pxad_filter_fn(struct dma_chan * chan,void * param)1445 static bool pxad_filter_fn(struct dma_chan *chan, void *param)
1446 {
1447 	struct pxad_chan *c = to_pxad_chan(chan);
1448 	struct pxad_param *p = param;
1449 
1450 	if (chan->device->dev->driver != &pxad_driver.driver)
1451 		return false;
1452 
1453 	c->drcmr = p->drcmr;
1454 	c->prio = p->prio;
1455 
1456 	return true;
1457 }
1458 
1459 module_platform_driver(pxad_driver);
1460 
1461 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1462 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1463 MODULE_LICENSE("GPL v2");
1464