xref: /linux/drivers/dma/fsl-edma-common.c (revision 002905eca5bedab08bafd9e325bbbb41670c7712)
1 // SPDX-License-Identifier: GPL-2.0+
2 //
3 // Copyright (c) 2013-2014 Freescale Semiconductor, Inc
4 // Copyright (c) 2017 Sysam, Angelo Dureghello  <angelo@sysam.it>
5 
6 #include <linux/dmapool.h>
7 #include <linux/module.h>
8 #include <linux/slab.h>
9 #include <linux/dma-mapping.h>
10 
11 #include "fsl-edma-common.h"
12 
13 #define EDMA_CR			0x00
14 #define EDMA_ES			0x04
15 #define EDMA_ERQ		0x0C
16 #define EDMA_EEI		0x14
17 #define EDMA_SERQ		0x1B
18 #define EDMA_CERQ		0x1A
19 #define EDMA_SEEI		0x19
20 #define EDMA_CEEI		0x18
21 #define EDMA_CINT		0x1F
22 #define EDMA_CERR		0x1E
23 #define EDMA_SSRT		0x1D
24 #define EDMA_CDNE		0x1C
25 #define EDMA_INTR		0x24
26 #define EDMA_ERR		0x2C
27 
28 #define EDMA64_ERQH		0x08
29 #define EDMA64_EEIH		0x10
30 #define EDMA64_SERQ		0x18
31 #define EDMA64_CERQ		0x19
32 #define EDMA64_SEEI		0x1a
33 #define EDMA64_CEEI		0x1b
34 #define EDMA64_CINT		0x1c
35 #define EDMA64_CERR		0x1d
36 #define EDMA64_SSRT		0x1e
37 #define EDMA64_CDNE		0x1f
38 #define EDMA64_INTH		0x20
39 #define EDMA64_INTL		0x24
40 #define EDMA64_ERRH		0x28
41 #define EDMA64_ERRL		0x2c
42 
43 #define EDMA_TCD		0x1000
44 
45 static void fsl_edma_enable_request(struct fsl_edma_chan *fsl_chan)
46 {
47 	struct edma_regs *regs = &fsl_chan->edma->regs;
48 	u32 ch = fsl_chan->vchan.chan.chan_id;
49 
50 	if (fsl_chan->edma->version == v1) {
51 		edma_writeb(fsl_chan->edma, EDMA_SEEI_SEEI(ch), regs->seei);
52 		edma_writeb(fsl_chan->edma, ch, regs->serq);
53 	} else {
54 		/* ColdFire is big endian, and accesses natively
55 		 * big endian I/O peripherals
56 		 */
57 		iowrite8(EDMA_SEEI_SEEI(ch), regs->seei);
58 		iowrite8(ch, regs->serq);
59 	}
60 }
61 
62 void fsl_edma_disable_request(struct fsl_edma_chan *fsl_chan)
63 {
64 	struct edma_regs *regs = &fsl_chan->edma->regs;
65 	u32 ch = fsl_chan->vchan.chan.chan_id;
66 
67 	if (fsl_chan->edma->version == v1) {
68 		edma_writeb(fsl_chan->edma, ch, regs->cerq);
69 		edma_writeb(fsl_chan->edma, EDMA_CEEI_CEEI(ch), regs->ceei);
70 	} else {
71 		/* ColdFire is big endian, and accesses natively
72 		 * big endian I/O peripherals
73 		 */
74 		iowrite8(ch, regs->cerq);
75 		iowrite8(EDMA_CEEI_CEEI(ch), regs->ceei);
76 	}
77 }
78 EXPORT_SYMBOL_GPL(fsl_edma_disable_request);
79 
80 void fsl_edma_chan_mux(struct fsl_edma_chan *fsl_chan,
81 			unsigned int slot, bool enable)
82 {
83 	u32 ch = fsl_chan->vchan.chan.chan_id;
84 	void __iomem *muxaddr;
85 	unsigned int chans_per_mux, ch_off;
86 	int endian_diff[4] = {3, 1, -1, -3};
87 
88 	chans_per_mux = fsl_chan->edma->n_chans / DMAMUX_NR;
89 	ch_off = fsl_chan->vchan.chan.chan_id % chans_per_mux;
90 
91 	if (!fsl_chan->edma->big_endian)
92 		ch_off += endian_diff[ch_off % 4];
93 
94 	muxaddr = fsl_chan->edma->muxbase[ch / chans_per_mux];
95 	slot = EDMAMUX_CHCFG_SOURCE(slot);
96 
97 	if (enable)
98 		iowrite8(EDMAMUX_CHCFG_ENBL | slot, muxaddr + ch_off);
99 	else
100 		iowrite8(EDMAMUX_CHCFG_DIS, muxaddr + ch_off);
101 }
102 EXPORT_SYMBOL_GPL(fsl_edma_chan_mux);
103 
104 static unsigned int fsl_edma_get_tcd_attr(enum dma_slave_buswidth addr_width)
105 {
106 	switch (addr_width) {
107 	case 1:
108 		return EDMA_TCD_ATTR_SSIZE_8BIT | EDMA_TCD_ATTR_DSIZE_8BIT;
109 	case 2:
110 		return EDMA_TCD_ATTR_SSIZE_16BIT | EDMA_TCD_ATTR_DSIZE_16BIT;
111 	case 4:
112 		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
113 	case 8:
114 		return EDMA_TCD_ATTR_SSIZE_64BIT | EDMA_TCD_ATTR_DSIZE_64BIT;
115 	default:
116 		return EDMA_TCD_ATTR_SSIZE_32BIT | EDMA_TCD_ATTR_DSIZE_32BIT;
117 	}
118 }
119 
120 void fsl_edma_free_desc(struct virt_dma_desc *vdesc)
121 {
122 	struct fsl_edma_desc *fsl_desc;
123 	int i;
124 
125 	fsl_desc = to_fsl_edma_desc(vdesc);
126 	for (i = 0; i < fsl_desc->n_tcds; i++)
127 		dma_pool_free(fsl_desc->echan->tcd_pool, fsl_desc->tcd[i].vtcd,
128 			      fsl_desc->tcd[i].ptcd);
129 	kfree(fsl_desc);
130 }
131 EXPORT_SYMBOL_GPL(fsl_edma_free_desc);
132 
133 int fsl_edma_terminate_all(struct dma_chan *chan)
134 {
135 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
136 	unsigned long flags;
137 	LIST_HEAD(head);
138 
139 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
140 	fsl_edma_disable_request(fsl_chan);
141 	fsl_chan->edesc = NULL;
142 	fsl_chan->idle = true;
143 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
144 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
145 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
146 	return 0;
147 }
148 EXPORT_SYMBOL_GPL(fsl_edma_terminate_all);
149 
150 int fsl_edma_pause(struct dma_chan *chan)
151 {
152 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
153 	unsigned long flags;
154 
155 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
156 	if (fsl_chan->edesc) {
157 		fsl_edma_disable_request(fsl_chan);
158 		fsl_chan->status = DMA_PAUSED;
159 		fsl_chan->idle = true;
160 	}
161 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
162 	return 0;
163 }
164 EXPORT_SYMBOL_GPL(fsl_edma_pause);
165 
166 int fsl_edma_resume(struct dma_chan *chan)
167 {
168 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
169 	unsigned long flags;
170 
171 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
172 	if (fsl_chan->edesc) {
173 		fsl_edma_enable_request(fsl_chan);
174 		fsl_chan->status = DMA_IN_PROGRESS;
175 		fsl_chan->idle = false;
176 	}
177 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
178 	return 0;
179 }
180 EXPORT_SYMBOL_GPL(fsl_edma_resume);
181 
182 static void fsl_edma_unprep_slave_dma(struct fsl_edma_chan *fsl_chan)
183 {
184 	if (fsl_chan->dma_dir != DMA_NONE)
185 		dma_unmap_resource(fsl_chan->vchan.chan.device->dev,
186 				   fsl_chan->dma_dev_addr,
187 				   fsl_chan->dma_dev_size,
188 				   fsl_chan->dma_dir, 0);
189 	fsl_chan->dma_dir = DMA_NONE;
190 }
191 
192 static bool fsl_edma_prep_slave_dma(struct fsl_edma_chan *fsl_chan,
193 				    enum dma_transfer_direction dir)
194 {
195 	struct device *dev = fsl_chan->vchan.chan.device->dev;
196 	enum dma_data_direction dma_dir;
197 	phys_addr_t addr = 0;
198 	u32 size = 0;
199 
200 	switch (dir) {
201 	case DMA_MEM_TO_DEV:
202 		dma_dir = DMA_FROM_DEVICE;
203 		addr = fsl_chan->cfg.dst_addr;
204 		size = fsl_chan->cfg.dst_maxburst;
205 		break;
206 	case DMA_DEV_TO_MEM:
207 		dma_dir = DMA_TO_DEVICE;
208 		addr = fsl_chan->cfg.src_addr;
209 		size = fsl_chan->cfg.src_maxburst;
210 		break;
211 	default:
212 		dma_dir = DMA_NONE;
213 		break;
214 	}
215 
216 	/* Already mapped for this config? */
217 	if (fsl_chan->dma_dir == dma_dir)
218 		return true;
219 
220 	fsl_edma_unprep_slave_dma(fsl_chan);
221 
222 	fsl_chan->dma_dev_addr = dma_map_resource(dev, addr, size, dma_dir, 0);
223 	if (dma_mapping_error(dev, fsl_chan->dma_dev_addr))
224 		return false;
225 	fsl_chan->dma_dev_size = size;
226 	fsl_chan->dma_dir = dma_dir;
227 
228 	return true;
229 }
230 
231 int fsl_edma_slave_config(struct dma_chan *chan,
232 				 struct dma_slave_config *cfg)
233 {
234 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
235 
236 	memcpy(&fsl_chan->cfg, cfg, sizeof(*cfg));
237 	fsl_edma_unprep_slave_dma(fsl_chan);
238 
239 	return 0;
240 }
241 EXPORT_SYMBOL_GPL(fsl_edma_slave_config);
242 
243 static size_t fsl_edma_desc_residue(struct fsl_edma_chan *fsl_chan,
244 		struct virt_dma_desc *vdesc, bool in_progress)
245 {
246 	struct fsl_edma_desc *edesc = fsl_chan->edesc;
247 	struct edma_regs *regs = &fsl_chan->edma->regs;
248 	u32 ch = fsl_chan->vchan.chan.chan_id;
249 	enum dma_transfer_direction dir = edesc->dirn;
250 	dma_addr_t cur_addr, dma_addr;
251 	size_t len, size;
252 	int i;
253 
254 	/* calculate the total size in this desc */
255 	for (len = i = 0; i < fsl_chan->edesc->n_tcds; i++)
256 		len += le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
257 			* le16_to_cpu(edesc->tcd[i].vtcd->biter);
258 
259 	if (!in_progress)
260 		return len;
261 
262 	if (dir == DMA_MEM_TO_DEV)
263 		cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].saddr);
264 	else
265 		cur_addr = edma_readl(fsl_chan->edma, &regs->tcd[ch].daddr);
266 
267 	/* figure out the finished and calculate the residue */
268 	for (i = 0; i < fsl_chan->edesc->n_tcds; i++) {
269 		size = le32_to_cpu(edesc->tcd[i].vtcd->nbytes)
270 			* le16_to_cpu(edesc->tcd[i].vtcd->biter);
271 		if (dir == DMA_MEM_TO_DEV)
272 			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->saddr);
273 		else
274 			dma_addr = le32_to_cpu(edesc->tcd[i].vtcd->daddr);
275 
276 		len -= size;
277 		if (cur_addr >= dma_addr && cur_addr < dma_addr + size) {
278 			len += dma_addr + size - cur_addr;
279 			break;
280 		}
281 	}
282 
283 	return len;
284 }
285 
286 enum dma_status fsl_edma_tx_status(struct dma_chan *chan,
287 		dma_cookie_t cookie, struct dma_tx_state *txstate)
288 {
289 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
290 	struct virt_dma_desc *vdesc;
291 	enum dma_status status;
292 	unsigned long flags;
293 
294 	status = dma_cookie_status(chan, cookie, txstate);
295 	if (status == DMA_COMPLETE)
296 		return status;
297 
298 	if (!txstate)
299 		return fsl_chan->status;
300 
301 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
302 	vdesc = vchan_find_desc(&fsl_chan->vchan, cookie);
303 	if (fsl_chan->edesc && cookie == fsl_chan->edesc->vdesc.tx.cookie)
304 		txstate->residue =
305 			fsl_edma_desc_residue(fsl_chan, vdesc, true);
306 	else if (vdesc)
307 		txstate->residue =
308 			fsl_edma_desc_residue(fsl_chan, vdesc, false);
309 	else
310 		txstate->residue = 0;
311 
312 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
313 
314 	return fsl_chan->status;
315 }
316 EXPORT_SYMBOL_GPL(fsl_edma_tx_status);
317 
318 static void fsl_edma_set_tcd_regs(struct fsl_edma_chan *fsl_chan,
319 				  struct fsl_edma_hw_tcd *tcd)
320 {
321 	struct fsl_edma_engine *edma = fsl_chan->edma;
322 	struct edma_regs *regs = &fsl_chan->edma->regs;
323 	u32 ch = fsl_chan->vchan.chan.chan_id;
324 
325 	/*
326 	 * TCD parameters are stored in struct fsl_edma_hw_tcd in little
327 	 * endian format. However, we need to load the TCD registers in
328 	 * big- or little-endian obeying the eDMA engine model endian.
329 	 */
330 	edma_writew(edma, 0,  &regs->tcd[ch].csr);
331 	edma_writel(edma, le32_to_cpu(tcd->saddr), &regs->tcd[ch].saddr);
332 	edma_writel(edma, le32_to_cpu(tcd->daddr), &regs->tcd[ch].daddr);
333 
334 	edma_writew(edma, le16_to_cpu(tcd->attr), &regs->tcd[ch].attr);
335 	edma_writew(edma, le16_to_cpu(tcd->soff), &regs->tcd[ch].soff);
336 
337 	edma_writel(edma, le32_to_cpu(tcd->nbytes), &regs->tcd[ch].nbytes);
338 	edma_writel(edma, le32_to_cpu(tcd->slast), &regs->tcd[ch].slast);
339 
340 	edma_writew(edma, le16_to_cpu(tcd->citer), &regs->tcd[ch].citer);
341 	edma_writew(edma, le16_to_cpu(tcd->biter), &regs->tcd[ch].biter);
342 	edma_writew(edma, le16_to_cpu(tcd->doff), &regs->tcd[ch].doff);
343 
344 	edma_writel(edma, le32_to_cpu(tcd->dlast_sga),
345 			&regs->tcd[ch].dlast_sga);
346 
347 	edma_writew(edma, le16_to_cpu(tcd->csr), &regs->tcd[ch].csr);
348 }
349 
350 static inline
351 void fsl_edma_fill_tcd(struct fsl_edma_hw_tcd *tcd, u32 src, u32 dst,
352 		       u16 attr, u16 soff, u32 nbytes, u32 slast, u16 citer,
353 		       u16 biter, u16 doff, u32 dlast_sga, bool major_int,
354 		       bool disable_req, bool enable_sg)
355 {
356 	u16 csr = 0;
357 
358 	/*
359 	 * eDMA hardware SGs require the TCDs to be stored in little
360 	 * endian format irrespective of the register endian model.
361 	 * So we put the value in little endian in memory, waiting
362 	 * for fsl_edma_set_tcd_regs doing the swap.
363 	 */
364 	tcd->saddr = cpu_to_le32(src);
365 	tcd->daddr = cpu_to_le32(dst);
366 
367 	tcd->attr = cpu_to_le16(attr);
368 
369 	tcd->soff = cpu_to_le16(soff);
370 
371 	tcd->nbytes = cpu_to_le32(nbytes);
372 	tcd->slast = cpu_to_le32(slast);
373 
374 	tcd->citer = cpu_to_le16(EDMA_TCD_CITER_CITER(citer));
375 	tcd->doff = cpu_to_le16(doff);
376 
377 	tcd->dlast_sga = cpu_to_le32(dlast_sga);
378 
379 	tcd->biter = cpu_to_le16(EDMA_TCD_BITER_BITER(biter));
380 	if (major_int)
381 		csr |= EDMA_TCD_CSR_INT_MAJOR;
382 
383 	if (disable_req)
384 		csr |= EDMA_TCD_CSR_D_REQ;
385 
386 	if (enable_sg)
387 		csr |= EDMA_TCD_CSR_E_SG;
388 
389 	tcd->csr = cpu_to_le16(csr);
390 }
391 
392 static struct fsl_edma_desc *fsl_edma_alloc_desc(struct fsl_edma_chan *fsl_chan,
393 		int sg_len)
394 {
395 	struct fsl_edma_desc *fsl_desc;
396 	int i;
397 
398 	fsl_desc = kzalloc(struct_size(fsl_desc, tcd, sg_len), GFP_NOWAIT);
399 	if (!fsl_desc)
400 		return NULL;
401 
402 	fsl_desc->echan = fsl_chan;
403 	fsl_desc->n_tcds = sg_len;
404 	for (i = 0; i < sg_len; i++) {
405 		fsl_desc->tcd[i].vtcd = dma_pool_alloc(fsl_chan->tcd_pool,
406 					GFP_NOWAIT, &fsl_desc->tcd[i].ptcd);
407 		if (!fsl_desc->tcd[i].vtcd)
408 			goto err;
409 	}
410 	return fsl_desc;
411 
412 err:
413 	while (--i >= 0)
414 		dma_pool_free(fsl_chan->tcd_pool, fsl_desc->tcd[i].vtcd,
415 				fsl_desc->tcd[i].ptcd);
416 	kfree(fsl_desc);
417 	return NULL;
418 }
419 
420 struct dma_async_tx_descriptor *fsl_edma_prep_dma_cyclic(
421 		struct dma_chan *chan, dma_addr_t dma_addr, size_t buf_len,
422 		size_t period_len, enum dma_transfer_direction direction,
423 		unsigned long flags)
424 {
425 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
426 	struct fsl_edma_desc *fsl_desc;
427 	dma_addr_t dma_buf_next;
428 	int sg_len, i;
429 	u32 src_addr, dst_addr, last_sg, nbytes;
430 	u16 soff, doff, iter;
431 
432 	if (!is_slave_direction(direction))
433 		return NULL;
434 
435 	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
436 		return NULL;
437 
438 	sg_len = buf_len / period_len;
439 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
440 	if (!fsl_desc)
441 		return NULL;
442 	fsl_desc->iscyclic = true;
443 	fsl_desc->dirn = direction;
444 
445 	dma_buf_next = dma_addr;
446 	if (direction == DMA_MEM_TO_DEV) {
447 		fsl_chan->attr =
448 			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
449 		nbytes = fsl_chan->cfg.dst_addr_width *
450 			fsl_chan->cfg.dst_maxburst;
451 	} else {
452 		fsl_chan->attr =
453 			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
454 		nbytes = fsl_chan->cfg.src_addr_width *
455 			fsl_chan->cfg.src_maxburst;
456 	}
457 
458 	iter = period_len / nbytes;
459 
460 	for (i = 0; i < sg_len; i++) {
461 		if (dma_buf_next >= dma_addr + buf_len)
462 			dma_buf_next = dma_addr;
463 
464 		/* get next sg's physical address */
465 		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
466 
467 		if (direction == DMA_MEM_TO_DEV) {
468 			src_addr = dma_buf_next;
469 			dst_addr = fsl_chan->dma_dev_addr;
470 			soff = fsl_chan->cfg.dst_addr_width;
471 			doff = 0;
472 		} else {
473 			src_addr = fsl_chan->dma_dev_addr;
474 			dst_addr = dma_buf_next;
475 			soff = 0;
476 			doff = fsl_chan->cfg.src_addr_width;
477 		}
478 
479 		fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr, dst_addr,
480 				  fsl_chan->attr, soff, nbytes, 0, iter,
481 				  iter, doff, last_sg, true, false, true);
482 		dma_buf_next += period_len;
483 	}
484 
485 	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
486 }
487 EXPORT_SYMBOL_GPL(fsl_edma_prep_dma_cyclic);
488 
489 struct dma_async_tx_descriptor *fsl_edma_prep_slave_sg(
490 		struct dma_chan *chan, struct scatterlist *sgl,
491 		unsigned int sg_len, enum dma_transfer_direction direction,
492 		unsigned long flags, void *context)
493 {
494 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
495 	struct fsl_edma_desc *fsl_desc;
496 	struct scatterlist *sg;
497 	u32 src_addr, dst_addr, last_sg, nbytes;
498 	u16 soff, doff, iter;
499 	int i;
500 
501 	if (!is_slave_direction(direction))
502 		return NULL;
503 
504 	if (!fsl_edma_prep_slave_dma(fsl_chan, direction))
505 		return NULL;
506 
507 	fsl_desc = fsl_edma_alloc_desc(fsl_chan, sg_len);
508 	if (!fsl_desc)
509 		return NULL;
510 	fsl_desc->iscyclic = false;
511 	fsl_desc->dirn = direction;
512 
513 	if (direction == DMA_MEM_TO_DEV) {
514 		fsl_chan->attr =
515 			fsl_edma_get_tcd_attr(fsl_chan->cfg.dst_addr_width);
516 		nbytes = fsl_chan->cfg.dst_addr_width *
517 			fsl_chan->cfg.dst_maxburst;
518 	} else {
519 		fsl_chan->attr =
520 			fsl_edma_get_tcd_attr(fsl_chan->cfg.src_addr_width);
521 		nbytes = fsl_chan->cfg.src_addr_width *
522 			fsl_chan->cfg.src_maxburst;
523 	}
524 
525 	for_each_sg(sgl, sg, sg_len, i) {
526 		/* get next sg's physical address */
527 		last_sg = fsl_desc->tcd[(i + 1) % sg_len].ptcd;
528 
529 		if (direction == DMA_MEM_TO_DEV) {
530 			src_addr = sg_dma_address(sg);
531 			dst_addr = fsl_chan->dma_dev_addr;
532 			soff = fsl_chan->cfg.dst_addr_width;
533 			doff = 0;
534 		} else {
535 			src_addr = fsl_chan->dma_dev_addr;
536 			dst_addr = sg_dma_address(sg);
537 			soff = 0;
538 			doff = fsl_chan->cfg.src_addr_width;
539 		}
540 
541 		iter = sg_dma_len(sg) / nbytes;
542 		if (i < sg_len - 1) {
543 			last_sg = fsl_desc->tcd[(i + 1)].ptcd;
544 			fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
545 					  dst_addr, fsl_chan->attr, soff,
546 					  nbytes, 0, iter, iter, doff, last_sg,
547 					  false, false, true);
548 		} else {
549 			last_sg = 0;
550 			fsl_edma_fill_tcd(fsl_desc->tcd[i].vtcd, src_addr,
551 					  dst_addr, fsl_chan->attr, soff,
552 					  nbytes, 0, iter, iter, doff, last_sg,
553 					  true, true, false);
554 		}
555 	}
556 
557 	return vchan_tx_prep(&fsl_chan->vchan, &fsl_desc->vdesc, flags);
558 }
559 EXPORT_SYMBOL_GPL(fsl_edma_prep_slave_sg);
560 
561 void fsl_edma_xfer_desc(struct fsl_edma_chan *fsl_chan)
562 {
563 	struct virt_dma_desc *vdesc;
564 
565 	vdesc = vchan_next_desc(&fsl_chan->vchan);
566 	if (!vdesc)
567 		return;
568 	fsl_chan->edesc = to_fsl_edma_desc(vdesc);
569 	fsl_edma_set_tcd_regs(fsl_chan, fsl_chan->edesc->tcd[0].vtcd);
570 	fsl_edma_enable_request(fsl_chan);
571 	fsl_chan->status = DMA_IN_PROGRESS;
572 	fsl_chan->idle = false;
573 }
574 EXPORT_SYMBOL_GPL(fsl_edma_xfer_desc);
575 
576 void fsl_edma_issue_pending(struct dma_chan *chan)
577 {
578 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
579 	unsigned long flags;
580 
581 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
582 
583 	if (unlikely(fsl_chan->pm_state != RUNNING)) {
584 		spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
585 		/* cannot submit due to suspend */
586 		return;
587 	}
588 
589 	if (vchan_issue_pending(&fsl_chan->vchan) && !fsl_chan->edesc)
590 		fsl_edma_xfer_desc(fsl_chan);
591 
592 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
593 }
594 EXPORT_SYMBOL_GPL(fsl_edma_issue_pending);
595 
596 int fsl_edma_alloc_chan_resources(struct dma_chan *chan)
597 {
598 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
599 
600 	fsl_chan->tcd_pool = dma_pool_create("tcd_pool", chan->device->dev,
601 				sizeof(struct fsl_edma_hw_tcd),
602 				32, 0);
603 	return 0;
604 }
605 EXPORT_SYMBOL_GPL(fsl_edma_alloc_chan_resources);
606 
607 void fsl_edma_free_chan_resources(struct dma_chan *chan)
608 {
609 	struct fsl_edma_chan *fsl_chan = to_fsl_edma_chan(chan);
610 	unsigned long flags;
611 	LIST_HEAD(head);
612 
613 	spin_lock_irqsave(&fsl_chan->vchan.lock, flags);
614 	fsl_edma_disable_request(fsl_chan);
615 	fsl_edma_chan_mux(fsl_chan, 0, false);
616 	fsl_chan->edesc = NULL;
617 	vchan_get_all_descriptors(&fsl_chan->vchan, &head);
618 	fsl_edma_unprep_slave_dma(fsl_chan);
619 	spin_unlock_irqrestore(&fsl_chan->vchan.lock, flags);
620 
621 	vchan_dma_desc_free_list(&fsl_chan->vchan, &head);
622 	dma_pool_destroy(fsl_chan->tcd_pool);
623 	fsl_chan->tcd_pool = NULL;
624 }
625 EXPORT_SYMBOL_GPL(fsl_edma_free_chan_resources);
626 
627 void fsl_edma_cleanup_vchan(struct dma_device *dmadev)
628 {
629 	struct fsl_edma_chan *chan, *_chan;
630 
631 	list_for_each_entry_safe(chan, _chan,
632 				&dmadev->channels, vchan.chan.device_node) {
633 		list_del(&chan->vchan.chan.device_node);
634 		tasklet_kill(&chan->vchan.task);
635 	}
636 }
637 EXPORT_SYMBOL_GPL(fsl_edma_cleanup_vchan);
638 
639 /*
640  * On the 32 channels Vybrid/mpc577x edma version (here called "v1"),
641  * register offsets are different compared to ColdFire mcf5441x 64 channels
642  * edma (here called "v2").
643  *
644  * This function sets up register offsets as per proper declared version
645  * so must be called in xxx_edma_probe() just after setting the
646  * edma "version" and "membase" appropriately.
647  */
648 void fsl_edma_setup_regs(struct fsl_edma_engine *edma)
649 {
650 	edma->regs.cr = edma->membase + EDMA_CR;
651 	edma->regs.es = edma->membase + EDMA_ES;
652 	edma->regs.erql = edma->membase + EDMA_ERQ;
653 	edma->regs.eeil = edma->membase + EDMA_EEI;
654 
655 	edma->regs.serq = edma->membase + ((edma->version == v1) ?
656 			EDMA_SERQ : EDMA64_SERQ);
657 	edma->regs.cerq = edma->membase + ((edma->version == v1) ?
658 			EDMA_CERQ : EDMA64_CERQ);
659 	edma->regs.seei = edma->membase + ((edma->version == v1) ?
660 			EDMA_SEEI : EDMA64_SEEI);
661 	edma->regs.ceei = edma->membase + ((edma->version == v1) ?
662 			EDMA_CEEI : EDMA64_CEEI);
663 	edma->regs.cint = edma->membase + ((edma->version == v1) ?
664 			EDMA_CINT : EDMA64_CINT);
665 	edma->regs.cerr = edma->membase + ((edma->version == v1) ?
666 			EDMA_CERR : EDMA64_CERR);
667 	edma->regs.ssrt = edma->membase + ((edma->version == v1) ?
668 			EDMA_SSRT : EDMA64_SSRT);
669 	edma->regs.cdne = edma->membase + ((edma->version == v1) ?
670 			EDMA_CDNE : EDMA64_CDNE);
671 	edma->regs.intl = edma->membase + ((edma->version == v1) ?
672 			EDMA_INTR : EDMA64_INTL);
673 	edma->regs.errl = edma->membase + ((edma->version == v1) ?
674 			EDMA_ERR : EDMA64_ERRL);
675 
676 	if (edma->version == v2) {
677 		edma->regs.erqh = edma->membase + EDMA64_ERQH;
678 		edma->regs.eeih = edma->membase + EDMA64_EEIH;
679 		edma->regs.errh = edma->membase + EDMA64_ERRH;
680 		edma->regs.inth = edma->membase + EDMA64_INTH;
681 	}
682 
683 	edma->regs.tcd = edma->membase + EDMA_TCD;
684 }
685 EXPORT_SYMBOL_GPL(fsl_edma_setup_regs);
686 
687 MODULE_LICENSE("GPL v2");
688